Class: GridFSBucket

GridFSBucket

new GridFSBucket(db, options){GridFSBucket}

Constructor for a streaming GridFS interface

Name Type Default Description
db Db

A db handle

options object null optional

Optional settings.

Name Type Default Description
bucketName string "fs" optional

The 'files' and 'chunks' collections will be prefixed with the bucket name followed by a dot.

chunkSizeBytes number 255 * 1024 optional

Number of bytes stored in each chunk. Defaults to 255KB

writeConcern object null optional

Optional write concern to be passed to write operations, for instance { w: 1 }

readPreference object null optional

Optional read preference to be passed to read operations

Fires:
  • GridFSBucketWriteStream#event:index

Methods

delete(id, callback)

Deletes a file with the given id

Name Type Description
id ObjectId

The id of the file doc

callback GridFSBucket~errorCallback optional
Examples
// Deleting a file from GridFS

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('test.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {
    bucket.delete(id, function(error) {
      test.equal(error, null);

      var chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
      chunksQuery.toArray(function(error, docs) {
        test.equal(error, null);
        test.equal(docs.length, 0);

        var filesQuery = db.collection(FILES_COLL).find({ _id: id });
        filesQuery.toArray(function(error, docs) {
          test.equal(error, null);
          test.equal(docs.length, 0);

        });
      });
    });
  });

  readStream.pipe(uploadStream);
});
// Deleting a file from GridFS using promises

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('test.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {
    bucket.delete(id).then(function() {
      var chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
      chunksQuery.toArray(function(error, docs) {
        test.equal(error, null);
        test.equal(docs.length, 0);

        var filesQuery = db.collection(FILES_COLL).find({ _id: id });
        filesQuery.toArray(function(error, docs) {
          test.equal(error, null);
          test.equal(docs.length, 0);

        });
      });
    });
  });

  readStream.pipe(uploadStream);
});

Removes this bucket's files collection, followed by its chunks collection.

Name Type Description
callback GridFSBucket~errorCallback optional
Examples
// Drop an entire buckets files and chunks

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('test.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {
    bucket.drop(function(error) {
      test.equal(error, null);

      var chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
      chunksQuery.toArray(function(error, docs) {
        test.equal(error, null);
        test.equal(docs.length, 0);

        var filesQuery = db.collection(FILES_COLL).find({ _id: id });
        filesQuery.toArray(function(error, docs) {
          test.equal(error, null);
          test.equal(docs.length, 0);

        });
      });
    });
  });

  readStream.pipe(uploadStream);
});
// Drop an entire buckets files and chunks using promises

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('test.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {
    bucket.drop().then(function() {
      var chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
      chunksQuery.toArray(function(error, docs) {
        test.equal(error, null);
        test.equal(docs.length, 0);

        var filesQuery = db.collection(FILES_COLL).find({ _id: id });
        filesQuery.toArray(function(error, docs) {
          test.equal(error, null);
          test.equal(docs.length, 0);

        });
      });
    });
  });

  readStream.pipe(uploadStream);
});

find(filter, options){Cursor}

Convenience wrapper around find on the files collection

Name Type Default Description
filter Object
options Object null optional

Optional settings for cursor

Name Type Default Description
batchSize number null optional

Optional batch size for cursor

limit number null optional

Optional limit for cursor

maxTimeMS number null optional

Optional maxTimeMS for cursor

noCursorTimeout boolean null optional

Optionally set cursor's noCursorTimeout flag

skip number null optional

Optional skip for cursor

sort object null optional

Optional sort for cursor

openDownloadStream(id, options){GridFSBucketReadStream}

Returns a readable stream (GridFSBucketReadStream) for streaming file
data from GridFS.

Name Type Default Description
id ObjectId

The id of the file doc

options Object null optional

Optional settings.

Name Type Default Description
start Number null optional

Optional 0-based offset in bytes to start streaming from

end Number null optional

Optional 0-based offset in bytes to stop streaming before

Examples
// Correctly upload a file to GridFS and then retrieve it as a stream

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('test.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {
    var downloadStream = bucket.openDownloadStream(id);
    uploadStream = bucket.openUploadStream('test2.dat');
    id = uploadStream.id;

    downloadStream.pipe(uploadStream).once('finish', function() {
      var chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
      chunksQuery.toArray(function(error, docs) {
        test.equal(error, null);
        test.equal(docs.length, 1);
        test.equal(docs[0].data.toString('hex'), license.toString('hex'));

        var filesQuery = db.collection(FILES_COLL).find({ _id: id });
        filesQuery.toArray(function(error, docs) {
          test.equal(error, null);
          test.equal(docs.length, 1);

          var hash = crypto.createHash('md5');
          hash.update(license);
          test.equal(docs[0].md5, hash.digest('hex'));
        });
      });
    });
  });

  readStream.pipe(uploadStream);
});
// Provide start and end parameters for file download to skip ahead x bytes and limit the total amount of bytes read to n

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, {
    bucketName: 'gridfsdownload',
    chunkSizeBytes: 2
  });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('teststart.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {
    var downloadStream = bucket.openDownloadStreamByName('teststart.dat',
      { start: 1 }).end(6);

    downloadStream.on('error', function(error) {
      test.equal(error, null);
    });

    var gotData = 0;
    var str = '';
    downloadStream.on('data', function(data) {
      ++gotData;
      str += data.toString('utf8');
    });

    downloadStream.on('end', function() {
      // Depending on different versions of node, we may get
      // different amounts of 'data' events. node 0.10 gives 2,
      // node >= 0.12 gives 3. Either is correct, but we just
      // care that we got between 1 and 3, and got the right result
      test.ok(gotData >= 1 && gotData <= 3);
      test.equal(str, 'pache');
    });
  });

  readStream.pipe(uploadStream);
});
// Provide start and end parameters for file download to skip ahead x bytes and limit the total amount of bytes read to n

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, {
    bucketName: 'gridfsdownload',
    chunkSizeBytes: 20
  });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('teststart.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {
    var downloadStream = bucket.openDownloadStreamByName('teststart.dat',
      { start: 1 }).end(6);

    downloadStream.on('error', function(error) {
      test.equal(error, null);
    });

    var gotData = 0;
    var str = '';
    downloadStream.on('data', function(data) {
      ++gotData;
      str += data.toString('utf8');
    });

    downloadStream.on('end', function() {
      // Depending on different versions of node, we may get
      // different amounts of 'data' events. node 0.10 gives 2,
      // node >= 0.12 gives 3. Either is correct, but we just
      // care that we got between 1 and 3, and got the right result
      test.ok(gotData >= 1 && gotData <= 3);
      test.equal(str, 'pache');
    });
  });

  readStream.pipe(uploadStream);
});

openDownloadStreamByName(filename, options){GridFSBucketReadStream}

Returns a readable stream (GridFSBucketReadStream) for streaming the
file with the given name from GridFS. If there are multiple files with
the same name, this will stream the most recent file with the given name
(as determined by the uploadDate field). You can set the revision
option to change this behavior.

Name Type Default Description
filename String

The name of the file to stream

options Object null optional

Optional settings

Name Type Default Description
revision number -1 optional

The revision number relative to the oldest file with the given filename. 0 gets you the oldest file, 1 gets you the 2nd oldest, -1 gets you the newest.

start Number null optional

Optional 0-based offset in bytes to start streaming from

end Number null optional

Optional 0-based offset in bytes to stop streaming before

Example
// Correctly download a GridFS file by name

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('test.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {
    var downloadStream = bucket.openDownloadStreamByName('test.dat');

    var gotData = false;
    downloadStream.on('data', function(data) {
      test.ok(!gotData);
      gotData = true;
      test.ok(data.toString('utf8').indexOf('TERMS AND CONDITIONS') !== -1);
    });

    downloadStream.on('end', function() {
      test.ok(gotData);
    });
  });

  readStream.pipe(uploadStream);
});

openUploadStream(filename, options){GridFSBucketWriteStream}

Returns a writable stream (GridFSBucketWriteStream) for writing
buffers to GridFS. The stream's 'id' property contains the resulting
file's id.

Name Type Default Description
filename string

The value of the 'filename' key in the files doc

options object null optional

Optional settings.

Name Type Default Description
chunkSizeBytes number null optional

Optional overwrite this bucket's chunkSizeBytes for this file

metadata object null optional

Optional object to store in the file document's metadata field

contentType string null optional

Optional string to store in the file document's contentType field

aliases array null optional

Optional array of strings to store in the file document's aliases field

Example
// Correctly stream a file from disk into GridFS using openUploadStream

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  db.dropDatabase(function(error) {
    test.equal(error, null);

    var bucket = new GridFSBucket(db);
    var readStream = fs.createReadStream('./LICENSE');

    var uploadStream = bucket.openUploadStream('test.dat');

    var license = fs.readFileSync('./LICENSE');
    var id = uploadStream.id;

    // Wait for stream to finish
    uploadStream.once('finish', function() {
      var chunksColl = db.collection('fs.chunks');
      var chunksQuery = chunksColl.find({ files_id: id });

      // Get all the chunks
      chunksQuery.toArray(function(error, docs) {
        test.equal(error, null);
        test.equal(docs.length, 1);
        test.equal(docs[0].data.toString('hex'), license.toString('hex'));

        var filesColl = db.collection('fs.files');
        var filesQuery = filesColl.find({ _id: id });
        filesQuery.toArray(function(error, docs) {
          test.equal(error, null);
          test.equal(docs.length, 1);

          var hash = crypto.createHash('md5');
          hash.update(license);
          test.equal(docs[0].md5, hash.digest('hex'));

          // make sure we created indexes
          filesColl.listIndexes().toArray(function(error, indexes) {
            test.equal(error, null);
            test.equal(indexes.length, 2);
            test.equal(indexes[1].name, 'filename_1_uploadDate_1');

            chunksColl.listIndexes().toArray(function(error, indexes) {
              test.equal(error, null);
              test.equal(indexes.length, 2);
              test.equal(indexes[1].name, 'files_id_1_n_1');
            });
          });
        });
      });
    });

    readStream.pipe(uploadStream);
  });
});

openUploadStreamWithId(id, filename, options){GridFSBucketWriteStream}

Returns a writable stream (GridFSBucketWriteStream) for writing
buffers to GridFS for a custom file id. The stream's 'id' property contains the resulting
file's id.

Name Type Default Description
id string | number | object

A custom id used to identify the file

filename string

The value of the 'filename' key in the files doc

options object null optional

Optional settings.

Name Type Default Description
chunkSizeBytes number null optional

Optional overwrite this bucket's chunkSizeBytes for this file

metadata object null optional

Optional object to store in the file document's metadata field

contentType string null optional

Optional string to store in the file document's contentType field

aliases array null optional

Optional array of strings to store in the file document's aliases field

Example
// Correctly stream a file from disk into GridFS using openUploadStream

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  db.dropDatabase(function(error) {
    test.equal(error, null);

    var bucket = new GridFSBucket(db);
    var readStream = fs.createReadStream('./LICENSE');

    var uploadStream = bucket.openUploadStreamWithId(1, 'test.dat');

    var license = fs.readFileSync('./LICENSE');
    var id = uploadStream.id;
    test.equal(1, id);

    // Wait for stream to finish
    uploadStream.once('finish', function() {
      var chunksColl = db.collection('fs.chunks');
      var chunksQuery = chunksColl.find({ files_id: id });

      // Get all the chunks
      chunksQuery.toArray(function(error, docs) {
        test.equal(error, null);
        test.equal(docs.length, 1);
        test.equal(docs[0].data.toString('hex'), license.toString('hex'));

        var filesColl = db.collection('fs.files');
        var filesQuery = filesColl.find({ _id: id });

        filesQuery.toArray(function(error, docs) {
          test.equal(error, null);
          test.equal(docs.length, 1);

          var hash = crypto.createHash('md5');
          hash.update(license);
          test.equal(docs[0].md5, hash.digest('hex'));

          // make sure we created indexes
          filesColl.listIndexes().toArray(function(error, indexes) {
            test.equal(error, null);
            test.equal(indexes.length, 2);
            test.equal(indexes[1].name, 'filename_1_uploadDate_1');

            chunksColl.listIndexes().toArray(function(error, indexes) {
              test.equal(error, null);
              test.equal(indexes.length, 2);
              test.equal(indexes[1].name, 'files_id_1_n_1');
            });
          });
        });
      });
    });

    readStream.pipe(uploadStream);
  });
});

rename(id, filename, callback)

Renames the file with the given _id to the given string

Name Type Description
id ObjectId

the id of the file to rename

filename String

new name for the file

callback GridFSBucket~errorCallback optional
Example
// Rename a file

var MongoClient = require('mongodb').MongoClient,
  test = require('assert');
MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
  var bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload_3' });
  var CHUNKS_COLL = 'gridfsdownload.chunks';
  var FILES_COLL = 'gridfsdownload.files';
  var readStream = fs.createReadStream('./LICENSE');

  var uploadStream = bucket.openUploadStream('test.dat');

  var license = fs.readFileSync('./LICENSE');
  var id = uploadStream.id;

  uploadStream.once('finish', function() {

    // Rename the file
    bucket.rename(id, 'renamed_it.dat', function(err) {
      test.equal(null, err);
    });
  });

  readStream.pipe(uploadStream);
});

Type Definitions

errorCallback(error)

Callback format for all GridFSBucket methods that can accept a callback.

Name Type Description
error MongoError

An error instance representing any errors that occurred

Events

When the first call to openUploadStream is made, the upload stream will
check to see if it needs to create the proper indexes on the chunks and
files collections. This event is fired either when 1) it determines that
no index creation is necessary, 2) when it successfully creates the
necessary indexes.

Type:
  • Error
comments powered by Disqus