Merge branch 'release-3.0' into release-3.0-tests-accounts-password

# Conflicts:
#	packages/mongo-async/oplog_observe_driver.js
This commit is contained in:
denihs
2022-12-22 14:17:09 -04:00
51 changed files with 3480 additions and 14083 deletions

View File

@@ -5,7 +5,7 @@
#### Breaking Changes
* `email`:
`Email.send` is no longer available. Use `Email.sendAsync` instead.
- `Email.send` is no longer available. Use `Email.sendAsync` instead.
* `accounts-password`:
- `Accounts.sendResetPasswordEmail` is now async
@@ -14,6 +14,9 @@
* `accounts-passwordless`:
- `Accounts.sendLoginTokenEmail` is now async
* `boilerplate-generator`:
- `toHTML` is no longer available (it was already deprecated). Use `toHTMLStream` instead.
#### Internal API changes

View File

@@ -18,8 +18,6 @@ function appendToStream(chunk, stream) {
}
}
let shouldWarnAboutToHTMLDeprecation = ! Meteor.isProduction;
export class Boilerplate {
constructor(arch, manifest, options = {}) {
const { headTemplate, closeTemplate } = getTemplate(arch);
@@ -34,17 +32,10 @@ export class Boilerplate {
}
toHTML(extraData) {
if (shouldWarnAboutToHTMLDeprecation) {
shouldWarnAboutToHTMLDeprecation = false;
console.error(
"The Boilerplate#toHTML method has been deprecated. " +
"Please use Boilerplate#toHTMLStream instead."
);
console.trace();
}
// Calling .await() requires a Fiber.
return this.toHTMLAsync(extraData).await();
throw new Error(
"The Boilerplate#toHTML method has been removed. " +
"Please use Boilerplate#toHTMLStream instead."
);
}
// Returns a Promise that resolves to a string of HTML.

View File

@@ -1,6 +1,6 @@
Package.describe({
summary: "Generates the boilerplate html from program's manifest",
version: '1.7.1'
version: '1.8.0'
});
Npm.depends({

View File

@@ -1,23 +1,22 @@
Tinytest.add('minifier-js - verify how terser handles an empty string', (test) => {
let result = meteorJsMinify('');
Tinytest.addAsync('minifier-js - verify how terser handles an empty string', async (test) => {
let result = await meteorJsMinify('');
test.equal(result.code, '');
test.equal(result.minifier, 'terser');
});
Tinytest.add('minifier-js - verify terser is able to minify valid javascript', (test) => {
let result = meteorJsMinify('function add(first,second){return first + second; }\n');
Tinytest.addAsync('minifier-js - verify terser is able to minify valid javascript', async (test) => {
let result = await meteorJsMinify('function add(first,second){return first + second; }\n');
test.equal(result.code, 'function add(n,d){return n+d}');
test.equal(result.minifier, 'terser');
});
Tinytest.add('minifier-js - verify error handling is done as expected', (test) => {
test.throws( () => meteorJsMinify('let name = {;\n'), undefined );
Tinytest.addAsync('minifier-js - verify error handling is done as expected', async (test) => {
await test.throwsAsync( async () => await meteorJsMinify('let name = {;\n'), undefined );
});
Tinytest.add('minifier-js - verify tersers error object has the fields we use for reporting errors to users', (test) => {
let result;
Tinytest.addAsync('minifier-js - verify tersers error object has the fields we use for reporting errors to users', async (test) => {
try {
result = meteorJsMinify('let name = {;\n');
await meteorJsMinify('let name = {;\n');
}
catch (err) {
test.isNotUndefined(err.name);

View File

@@ -1,18 +1,11 @@
let terser;
const terserMinify = async (source, options, callback) => {
const terserMinify = async (source, options) => {
terser = terser || Npm.require("terser");
try {
const result = await terser.minify(source, options);
callback(null, result);
return result;
} catch (e) {
callback(e);
return e;
}
return await terser.minify(source, options);
};
export const meteorJsMinify = function (source) {
export const meteorJsMinify = async function (source) {
const result = {};
const NODE_ENV = process.env.NODE_ENV || "development";
@@ -33,13 +26,7 @@ export const meteorJsMinify = function (source) {
safari10: true, // set this option to true to work around the Safari 10/11 await bug
};
const terserJsMinify = Meteor.wrapAsync(terserMinify);
let terserResult;
try {
terserResult = terserJsMinify(source, options);
} catch (e) {
throw e;
}
const terserResult = await terserMinify(source, options);
// this is kept to maintain backwards compatability
result.code = terserResult.code;

View File

@@ -1 +0,0 @@
.build*

View File

@@ -1 +0,0 @@
node_modules

View File

@@ -1,7 +0,0 @@
This directory and the files immediately inside it are automatically generated
when you change this package's NPM dependencies. Commit the files in this
directory (npm-shrinkwrap.json, .gitignore, and this README) to source control
so that others run the same versions of sub-dependencies.
You should NOT check in the node_modules directory that Meteor automatically
creates; if you are using git, the .gitignore file tells git to ignore it.

View File

@@ -1,10 +0,0 @@
{
"lockfileVersion": 1,
"dependencies": {
"mongodb-uri": {
"version": "0.9.7",
"resolved": "https://registry.npmjs.org/mongodb-uri/-/mongodb-uri-0.9.7.tgz",
"integrity": "sha1-D3ca0W9IOuZfQoeWlCjp+8SqYYE="
}
}
}

View File

@@ -1,37 +0,0 @@
# mongo
[Source code of released version](https://github.com/meteor/meteor/tree/master/packages/mongo) | [Source code of development version](https://github.com/meteor/meteor/tree/devel/packages/mongo)
***
The `mongo` package is a [full stack database
driver](https://www.meteor.com/full-stack-db-drivers) that provides
several paramount pieces of functionality to work with MongoDB in
Meteor:
- an efficient [Livequery][livequery] implementation providing real-time
updates from the database by consuming the MongoDB replication log
- a fall-back Livequery implementation for cases when the replication log is not
available, implemented by polling the database
- DDP RPC end-points for updating the data from clients connected over the wire
- Serialization and deserialization of updates to the DDP format
To learn more about Livequery, see the [project page on
www.meteor.com][livequery].
[livequery]: https://www.meteor.com/livequery
## Direct access to npm mongodb API
On the server, the `mongo` package is implemented using the
[npm `mongodb` module](https://www.npmjs.com/package/mongodb). If you'd like
direct access to this module, you can find it at
`MongoInternals.NpmModules.mongodb.module`. Its version can be read at
`MongoInternals.NpmModules.mongodb.version`.
Additionally, you can call `c.rawCollection()` or `c.rawDatabase()` on any
`Mongo.Collection` to get the object from the npm `mongodb` module corresponding
to the collection or database. This is documented at
http://mongodb.github.io/node-mongodb-native/
The version of `mongo` used may change incompatibly from version to version of
Meteor (or we may even replace it with an entirely different implementation);
use at your own risk.

View File

@@ -1,899 +0,0 @@
if (Meteor.isServer) {
// Set up allow/deny rules for test collections
var allowCollections = {};
// We create the collections in the publisher (instead of using a method or
// something) because if we made them with a method, we'd need to follow the
// method with some subscribes, and it's possible that the method call would
// be delayed by a wait method and the subscribe messages would be sent before
// it and fail due to the collection not yet existing. So we are very hacky
// and use a publish.
Meteor.publish("allowTests", function (nonce, idGeneration) {
check(nonce, String);
check(idGeneration, String);
var cursors = [];
var needToConfigure;
// helper for defining a collection. we are careful to create just one
// Mongo.Collection even if the sub body is rerun, by caching them.
var defineCollection = function(name, insecure, transform) {
var fullName = name + idGeneration + nonce;
var collection;
if (_.has(allowCollections, fullName)) {
collection = allowCollections[fullName];
if (needToConfigure === true)
throw new Error("collections inconsistently exist");
needToConfigure = false;
} else {
collection = new Mongo.Collection(
fullName, {idGeneration: idGeneration, transform: transform});
allowCollections[fullName] = collection;
if (needToConfigure === false)
throw new Error("collections inconsistently don't exist");
needToConfigure = true;
collection._insecure = insecure;
var m = {};
m["clear-collection-" + fullName] = function() {
collection.remove({});
};
Meteor.methods(m);
}
cursors.push(collection.find());
return collection;
};
var insecureCollection = defineCollection(
"collection-insecure", true /*insecure*/);
// totally locked down collection
var lockedDownCollection = defineCollection(
"collection-locked-down", false /*insecure*/);
// restricted collection with same allowed modifications, both with and
// without the `insecure` package
var restrictedCollectionDefaultSecure = defineCollection(
"collection-restrictedDefaultSecure", false /*insecure*/);
var restrictedCollectionDefaultInsecure = defineCollection(
"collection-restrictedDefaultInsecure", true /*insecure*/);
var restrictedCollectionForUpdateOptionsTest = defineCollection(
"collection-restrictedForUpdateOptionsTest", true /*insecure*/);
var restrictedCollectionForPartialAllowTest = defineCollection(
"collection-restrictedForPartialAllowTest", true /*insecure*/);
var restrictedCollectionForPartialDenyTest = defineCollection(
"collection-restrictedForPartialDenyTest", true /*insecure*/);
var restrictedCollectionForFetchTest = defineCollection(
"collection-restrictedForFetchTest", true /*insecure*/);
var restrictedCollectionForFetchAllTest = defineCollection(
"collection-restrictedForFetchAllTest", true /*insecure*/);
var restrictedCollectionWithTransform = defineCollection(
"withTransform", false, function (doc) {
return doc.a;
});
var restrictedCollectionForInvalidTransformTest = defineCollection(
"collection-restrictedForInvalidTransform", false /*insecure*/);
var restrictedCollectionForClientIdTest = defineCollection(
"collection-restrictedForClientIdTest", false /*insecure*/);
if (needToConfigure) {
restrictedCollectionWithTransform.allow({
insert: function (userId, doc) {
return doc.foo === "foo";
},
update: function (userId, doc) {
return doc.foo === "foo";
},
remove: function (userId, doc) {
return doc.bar === "bar";
}
});
restrictedCollectionWithTransform.allow({
// transform: null means that doc here is the top level, not the 'a'
// element.
transform: null,
insert: function (userId, doc) {
return !!doc.topLevelField;
},
update: function (userId, doc) {
return !!doc.topLevelField;
}
});
restrictedCollectionForInvalidTransformTest.allow({
// transform must return an object which is not a mongo id
transform: function (doc) { return doc._id; },
insert: function () { return true; }
});
restrictedCollectionForClientIdTest.allow({
// This test just requires the collection to trigger the restricted
// case.
insert: function () { return true; }
});
// two calls to allow to verify that either validator is sufficient.
var allows = [{
insert: function(userId, doc) {
return doc.canInsert;
},
update: function(userId, doc) {
return doc.canUpdate;
},
remove: function (userId, doc) {
return doc.canRemove;
}
}, {
insert: function(userId, doc) {
return doc.canInsert2;
},
update: function(userId, doc, fields, modifier) {
return -1 !== _.indexOf(fields, 'canUpdate2');
},
remove: function(userId, doc) {
return doc.canRemove2;
}
}];
// two calls to deny to verify that either one blocks the change.
var denies = [{
insert: function(userId, doc) {
return doc.cantInsert;
},
remove: function (userId, doc) {
return doc.cantRemove;
}
}, {
insert: function(userId, doc) {
// Don't allow explicit ID to be set by the client.
return _.has(doc, '_id');
},
update: function(userId, doc, fields, modifier) {
return -1 !== _.indexOf(fields, 'verySecret');
}
}];
_.each([
restrictedCollectionDefaultSecure,
restrictedCollectionDefaultInsecure,
restrictedCollectionForUpdateOptionsTest
], function (collection) {
_.each(allows, function (allow) {
collection.allow(allow);
});
_.each(denies, function (deny) {
collection.deny(deny);
});
});
// just restrict one operation so that we can verify that others
// fail
restrictedCollectionForPartialAllowTest.allow({
insert: function() {}
});
restrictedCollectionForPartialDenyTest.deny({
insert: function() {}
});
// verify that we only fetch the fields specified - we should
// be fetching just field1, field2, and field3.
restrictedCollectionForFetchTest.allow({
insert: function() { return true; },
update: function(userId, doc) {
// throw fields in doc so that we can inspect them in test
throw new Meteor.Error(
999, "Test: Fields in doc: " + _.keys(doc).sort().join(','));
},
remove: function(userId, doc) {
// throw fields in doc so that we can inspect them in test
throw new Meteor.Error(
999, "Test: Fields in doc: " + _.keys(doc).sort().join(','));
},
fetch: ['field1']
});
restrictedCollectionForFetchTest.allow({
fetch: ['field2']
});
restrictedCollectionForFetchTest.deny({
fetch: ['field3']
});
// verify that not passing fetch to one of the calls to allow
// causes all fields to be fetched
restrictedCollectionForFetchAllTest.allow({
insert: function() { return true; },
update: function(userId, doc) {
// throw fields in doc so that we can inspect them in test
throw new Meteor.Error(
999, "Test: Fields in doc: " + _.keys(doc).sort().join(','));
},
remove: function(userId, doc) {
// throw fields in doc so that we can inspect them in test
throw new Meteor.Error(
999, "Test: Fields in doc: " + _.keys(doc).sort().join(','));
},
fetch: ['field1']
});
restrictedCollectionForFetchAllTest.allow({
update: function() { return true; }
});
}
return cursors;
});
}
if (Meteor.isClient) {
_.each(['STRING', 'MONGO'], function (idGeneration) {
// Set up a bunch of test collections... on the client! They match the ones
// created by setUpAllowTestsCollections.
var nonce = Random.id();
// Tell the server to make, configure, and publish a set of collections unique
// to our test run. Since the method does not unblock, this will complete
// running on the server before anything else happens.
Meteor.subscribe('allowTests', nonce, idGeneration);
// helper for defining a collection, subscribing to it, and defining
// a method to clear it
var defineCollection = function(name, transform) {
var fullName = name + idGeneration + nonce;
var collection = new Mongo.Collection(
fullName, {idGeneration: idGeneration, transform: transform});
collection.callClearMethod = function (callback) {
Meteor.call("clear-collection-" + fullName, callback);
};
collection.unnoncedName = name + idGeneration;
return collection;
};
// totally insecure collection
var insecureCollection = defineCollection("collection-insecure");
// totally locked down collection
var lockedDownCollection = defineCollection("collection-locked-down");
// restricted collection with same allowed modifications, both with and
// without the `insecure` package
var restrictedCollectionDefaultSecure = defineCollection(
"collection-restrictedDefaultSecure");
var restrictedCollectionDefaultInsecure = defineCollection(
"collection-restrictedDefaultInsecure");
var restrictedCollectionForUpdateOptionsTest = defineCollection(
"collection-restrictedForUpdateOptionsTest");
var restrictedCollectionForPartialAllowTest = defineCollection(
"collection-restrictedForPartialAllowTest");
var restrictedCollectionForPartialDenyTest = defineCollection(
"collection-restrictedForPartialDenyTest");
var restrictedCollectionForFetchTest = defineCollection(
"collection-restrictedForFetchTest");
var restrictedCollectionForFetchAllTest = defineCollection(
"collection-restrictedForFetchAllTest");
var restrictedCollectionWithTransform = defineCollection(
"withTransform", function (doc) {
return doc.a;
});
var restrictedCollectionForInvalidTransformTest = defineCollection(
"collection-restrictedForInvalidTransform");
var restrictedCollectionForClientIdTest = defineCollection(
"collection-restrictedForClientIdTest");
// test that if allow is called once then the collection is
// restricted, and that other mutations aren't allowed
testAsyncMulti("collection - partial allow, " + idGeneration, [
function (test, expect) {
restrictedCollectionForPartialAllowTest.update(
'foo', {$set: {updated: true}}, expect(function (err, res) {
test.equal(err.error, 403);
}));
}
]);
// test that if deny is called once then the collection is
// restricted, and that other mutations aren't allowed
testAsyncMulti("collection - partial deny, " + idGeneration, [
function (test, expect) {
restrictedCollectionForPartialDenyTest.update(
'foo', {$set: {updated: true}}, expect(function (err, res) {
test.equal(err.error, 403);
}));
}
]);
// test that we only fetch the fields specified
testAsyncMulti("collection - fetch, " + idGeneration, [
function (test, expect) {
var fetchId = restrictedCollectionForFetchTest.insert(
{field1: 1, field2: 1, field3: 1, field4: 1});
var fetchAllId = restrictedCollectionForFetchAllTest.insert(
{field1: 1, field2: 1, field3: 1, field4: 1});
restrictedCollectionForFetchTest.update(
fetchId, {$set: {updated: true}}, expect(function (err, res) {
test.equal(err.reason,
"Test: Fields in doc: _id,field1,field2,field3");
}));
restrictedCollectionForFetchTest.remove(
fetchId, expect(function (err, res) {
test.equal(err.reason,
"Test: Fields in doc: _id,field1,field2,field3");
}));
restrictedCollectionForFetchAllTest.update(
fetchAllId, {$set: {updated: true}}, expect(function (err, res) {
test.equal(err.reason,
"Test: Fields in doc: _id,field1,field2,field3,field4");
}));
restrictedCollectionForFetchAllTest.remove(
fetchAllId, expect(function (err, res) {
test.equal(err.reason,
"Test: Fields in doc: _id,field1,field2,field3,field4");
}));
}
]);
(function(){
testAsyncMulti("collection - restricted factories " + idGeneration, [
function (test, expect) {
restrictedCollectionWithTransform.callClearMethod(expect(function () {
test.equal(restrictedCollectionWithTransform.find().count(), 0);
}));
},
function (test, expect) {
var self = this;
restrictedCollectionWithTransform.insert({
a: {foo: "foo", bar: "bar", baz: "baz"}
}, expect(function (e, res) {
test.isFalse(e);
test.isTrue(res);
self.item1 = res;
}));
restrictedCollectionWithTransform.insert({
a: {foo: "foo", bar: "quux", baz: "quux"},
b: "potato"
}, expect(function (e, res) {
test.isFalse(e);
test.isTrue(res);
self.item2 = res;
}));
restrictedCollectionWithTransform.insert({
a: {foo: "adsfadf", bar: "quux", baz: "quux"},
b: "potato"
}, expect(function (e, res) {
test.isTrue(e);
}));
restrictedCollectionWithTransform.insert({
a: {foo: "bar"},
topLevelField: true
}, expect(function (e, res) {
test.isFalse(e);
test.isTrue(res);
self.item3 = res;
}));
},
function (test, expect) {
var self = this;
// This should work, because there is an update allow for things with
// topLevelField.
restrictedCollectionWithTransform.update(
self.item3, { $set: { xxx: true } }, expect(function (e, res) {
test.isFalse(e);
test.equal(1, res);
}));
},
function (test, expect) {
var self = this;
test.equal(
restrictedCollectionWithTransform.findOne(self.item1),
{_id: self.item1, foo: "foo", bar: "bar", baz: "baz"});
restrictedCollectionWithTransform.remove(
self.item1, expect(function (e, res) {
test.isFalse(e);
}));
restrictedCollectionWithTransform.remove(
self.item2, expect(function (e, res) {
test.isTrue(e);
}));
}
]);
})();
testAsyncMulti("collection - insecure, " + idGeneration, [
function (test, expect) {
insecureCollection.callClearMethod(expect(function () {
test.equal(insecureCollection.find().count(), 0);
}));
},
function (test, expect) {
var id = insecureCollection.insert({foo: 'bar'}, expect(function(err, res) {
test.equal(res, id);
test.equal(insecureCollection.find(id).count(), 1);
test.equal(insecureCollection.findOne(id).foo, 'bar');
}));
test.equal(insecureCollection.find(id).count(), 1);
test.equal(insecureCollection.findOne(id).foo, 'bar');
}
]);
testAsyncMulti("collection - locked down, " + idGeneration, [
function (test, expect) {
lockedDownCollection.callClearMethod(expect(function() {
test.equal(lockedDownCollection.find().count(), 0);
}));
},
function (test, expect) {
lockedDownCollection.insert({foo: 'bar'}, expect(function (err, res) {
test.equal(err.error, 403);
test.equal(lockedDownCollection.find().count(), 0);
}));
}
]);
(function () {
var collection = restrictedCollectionForUpdateOptionsTest;
var id1, id2;
testAsyncMulti("collection - update options, " + idGeneration, [
// init
function (test, expect) {
collection.callClearMethod(expect(function () {
test.equal(collection.find().count(), 0);
}));
},
// put a few objects
function (test, expect) {
var doc = {canInsert: true, canUpdate: true};
id1 = collection.insert(doc);
id2 = collection.insert(doc);
collection.insert(doc);
collection.insert(doc, expect(function (err, res) {
test.isFalse(err);
test.equal(collection.find().count(), 4);
}));
},
// update by id
function (test, expect) {
collection.update(
id1,
{$set: {updated: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.find({updated: true}).count(), 1);
}));
},
// update by id in an object
function (test, expect) {
collection.update(
{_id: id2},
{$set: {updated: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.find({updated: true}).count(), 2);
}));
},
// update with replacement operator not allowed, and has nice error.
function (test, expect) {
collection.update(
{_id: id2},
{_id: id2, updated: true},
expect(function (err, res) {
test.equal(err.error, 403);
test.matches(err.reason, /In a restricted/);
// unchanged
test.equal(collection.find({updated: true}).count(), 2);
}));
},
// upsert not allowed, and has nice error.
function (test, expect) {
collection.update(
{_id: id2},
{$set: { upserted: true }},
{ upsert: true },
expect(function (err, res) {
test.equal(err.error, 403);
test.matches(err.reason, /in a restricted/);
test.equal(collection.find({ upserted: true }).count(), 0);
}));
},
// update with rename operator not allowed, and has nice error.
function (test, expect) {
collection.update(
{_id: id2},
{$rename: {updated: 'asdf'}},
expect(function (err, res) {
test.equal(err.error, 403);
test.matches(err.reason, /not allowed/);
// unchanged
test.equal(collection.find({updated: true}).count(), 2);
}));
},
// update method with a non-ID selector is not allowed
function (test, expect) {
// We shouldn't even send the method...
test.throws(function () {
collection.update(
{updated: {$exists: false}},
{$set: {updated: true}});
});
// ... but if we did, the server would reject it too.
Meteor.call(
'/' + collection._name + '/update',
{updated: {$exists: false}},
{$set: {updated: true}},
expect(function (err, res) {
test.equal(err.error, 403);
// unchanged
test.equal(collection.find({updated: true}).count(), 2);
}));
},
// make sure it doesn't think that {_id: 'foo', something: else} is ok.
function (test, expect) {
test.throws(function () {
collection.update(
{_id: id1, updated: {$exists: false}},
{$set: {updated: true}});
});
},
// remove method with a non-ID selector is not allowed
function (test, expect) {
// We shouldn't even send the method...
test.throws(function () {
collection.remove({updated: true});
});
// ... but if we did, the server would reject it too.
Meteor.call(
'/' + collection._name + '/remove',
{updated: true},
expect(function (err, res) {
test.equal(err.error, 403);
// unchanged
test.equal(collection.find({updated: true}).count(), 2);
}));
}
]);
}) ();
_.each(
[restrictedCollectionDefaultInsecure, restrictedCollectionDefaultSecure],
function(collection) {
var canUpdateId, canRemoveId;
testAsyncMulti("collection - " + collection.unnoncedName, [
// init
function (test, expect) {
collection.callClearMethod(expect(function () {
test.equal(collection.find().count(), 0);
}));
},
// insert with no allows passing. request is denied.
function (test, expect) {
collection.insert(
{},
expect(function (err, res) {
test.equal(err.error, 403);
test.equal(collection.find().count(), 0);
}));
},
// insert with one allow and one deny. denied.
function (test, expect) {
collection.insert(
{canInsert: true, cantInsert: true},
expect(function (err, res) {
test.equal(err.error, 403);
test.equal(collection.find().count(), 0);
}));
},
// insert with one allow and other deny. denied.
function (test, expect) {
collection.insert(
{canInsert: true, _id: Random.id()},
expect(function (err, res) {
test.equal(err.error, 403);
test.equal(collection.find().count(), 0);
}));
},
// insert one allow passes. allowed.
function (test, expect) {
collection.insert(
{canInsert: true},
expect(function (err, res) {
test.isFalse(err);
test.equal(collection.find().count(), 1);
}));
},
// insert other allow passes. allowed.
// includes canUpdate for later.
function (test, expect) {
canUpdateId = collection.insert(
{canInsert2: true, canUpdate: true},
expect(function (err, res) {
test.isFalse(err);
test.equal(collection.find().count(), 2);
}));
},
// yet a third insert executes. this one has canRemove and
// cantRemove set for later.
function (test, expect) {
canRemoveId = collection.insert(
{canInsert: true, canRemove: true, cantRemove: true},
expect(function (err, res) {
test.isFalse(err);
test.equal(collection.find().count(), 3);
}));
},
// can't update with a non-operator mutation
function (test, expect) {
collection.update(
canUpdateId, {newObject: 1},
expect(function (err, res) {
test.equal(err.error, 403);
test.equal(collection.find().count(), 3);
}));
},
// updating dotted fields works as if we are changing their
// top part
function (test, expect) {
collection.update(
canUpdateId, {$set: {"dotted.field": 1}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.findOne(canUpdateId).dotted.field, 1);
}));
},
function (test, expect) {
collection.update(
canUpdateId, {$set: {"verySecret.field": 1}},
expect(function (err, res) {
test.equal(err.error, 403);
test.equal(collection.find({verySecret: {$exists: true}}).count(), 0);
}));
},
// update doesn't do anything if no docs match
function (test, expect) {
collection.update(
"doesn't exist",
{$set: {updated: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 0);
// nothing has changed
test.equal(collection.find().count(), 3);
test.equal(collection.find({updated: true}).count(), 0);
}));
},
// update fails when access is denied trying to set `verySecret`
function (test, expect) {
collection.update(
canUpdateId, {$set: {verySecret: true}},
expect(function (err, res) {
test.equal(err.error, 403);
// nothing has changed
test.equal(collection.find().count(), 3);
test.equal(collection.find({updated: true}).count(), 0);
}));
},
// update fails when trying to set two fields, one of which is
// `verySecret`
function (test, expect) {
collection.update(
canUpdateId, {$set: {updated: true, verySecret: true}},
expect(function (err, res) {
test.equal(err.error, 403);
// nothing has changed
test.equal(collection.find().count(), 3);
test.equal(collection.find({updated: true}).count(), 0);
}));
},
// update fails when trying to modify docs that don't
// have `canUpdate` set
function (test, expect) {
collection.update(
canRemoveId,
{$set: {updated: true}},
expect(function (err, res) {
test.equal(err.error, 403);
// nothing has changed
test.equal(collection.find().count(), 3);
test.equal(collection.find({updated: true}).count(), 0);
}));
},
// update executes when it should
function (test, expect) {
collection.update(
canUpdateId,
{$set: {updated: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.find({updated: true}).count(), 1);
}));
},
// remove fails when trying to modify a doc with no `canRemove` set
function (test, expect) {
collection.remove(canUpdateId,
expect(function (err, res) {
test.equal(err.error, 403);
// nothing has changed
test.equal(collection.find().count(), 3);
}));
},
// remove fails when trying to modify an doc with `cantRemove`
// set
function (test, expect) {
collection.remove(canRemoveId,
expect(function (err, res) {
test.equal(err.error, 403);
// nothing has changed
test.equal(collection.find().count(), 3);
}));
},
// update the doc to remove cantRemove.
function (test, expect) {
collection.update(
canRemoveId,
{$set: {cantRemove: false, canUpdate2: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.find({cantRemove: true}).count(), 0);
}));
},
// now remove can remove it.
function (test, expect) {
collection.remove(canRemoveId,
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
// successfully removed
test.equal(collection.find().count(), 2);
}));
},
// try to remove a doc that doesn't exist. see we remove no docs.
function (test, expect) {
collection.remove('some-random-id-that-never-matches',
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 0);
// nothing removed
test.equal(collection.find().count(), 2);
}));
},
// methods can still bypass restrictions
function (test, expect) {
collection.callClearMethod(
expect(function (err, res) {
test.isFalse(err);
// successfully removed
test.equal(collection.find().count(), 0);
}));
}
]);
});
testAsyncMulti(
"collection - allow/deny transform must return object, " + idGeneration,
[function (test, expect) {
restrictedCollectionForInvalidTransformTest.insert({}, expect(function (err, res) {
test.isTrue(err);
}));
}]);
testAsyncMulti(
"collection - restricted collection allows client-side id, " + idGeneration,
[function (test, expect) {
var self = this;
self.id = Random.id();
restrictedCollectionForClientIdTest.insert({_id: self.id}, expect(function (err, res) {
test.isFalse(err);
test.equal(res, self.id);
test.equal(restrictedCollectionForClientIdTest.findOne(self.id),
{_id: self.id});
}));
}]);
}); // end idGeneration loop
} // end if isClient
// A few simple server-only tests which don't need to coordinate collections
// with the client..
if (Meteor.isServer) {
Tinytest.add("collection - allow and deny validate options", function (test) {
var collection = new Mongo.Collection(null);
test.throws(function () {
collection.allow({invalidOption: true});
});
test.throws(function () {
collection.deny({invalidOption: true});
});
_.each(['insert', 'update', 'remove', 'fetch'], function (key) {
var options = {};
options[key] = true;
test.throws(function () {
collection.allow(options);
});
test.throws(function () {
collection.deny(options);
});
});
_.each(['insert', 'update', 'remove'], function (key) {
var options = {};
options[key] = false;
test.throws(function () {
collection.allow(options);
});
test.throws(function () {
collection.deny(options);
});
});
_.each(['insert', 'update', 'remove'], function (key) {
var options = {};
options[key] = undefined;
test.throws(function () {
collection.allow(options);
});
test.throws(function () {
collection.deny(options);
});
});
_.each(['insert', 'update', 'remove'], function (key) {
var options = {};
options[key] = ['an array']; // this should be a function, not an array
test.throws(function () {
collection.allow(options);
});
test.throws(function () {
collection.deny(options);
});
});
test.throws(function () {
collection.allow({fetch: function () {}}); // this should be an array
});
});
Tinytest.add("collection - calling allow restricts", function (test) {
var collection = new Mongo.Collection(null);
test.equal(collection._restricted, false);
collection.allow({
insert: function() {}
});
test.equal(collection._restricted, true);
});
Tinytest.add("collection - global insecure", function (test) {
// note: This test alters the global insecure status, by sneakily hacking
// the global Package object!
var insecurePackage = Package.insecure;
Package.insecure = {};
var collection = new Mongo.Collection(null);
test.equal(collection._isInsecure(), true);
Package.insecure = undefined;
test.equal(collection._isInsecure(), false);
delete Package.insecure;
test.equal(collection._isInsecure(), false);
collection._insecure = true;
test.equal(collection._isInsecure(), true);
if (insecurePackage)
Package.insecure = insecurePackage;
else
delete Package.insecure;
});
}

View File

@@ -1,923 +0,0 @@
// options.connection, if given, is a LivedataClient or LivedataServer
// XXX presently there is no way to destroy/clean up a Collection
import {
ASYNC_COLLECTION_METHODS,
getAsyncMethodName
} from "meteor/minimongo/constants";
import { normalizeProjection } from "./mongo_utils";
/**
* @summary Namespace for MongoDB-related items
* @namespace
*/
Mongo = {};
console.log('Using package: mongo-async');
/**
* @summary Constructor for a Collection
* @locus Anywhere
* @instancename collection
* @class
* @param {String} name The name of the collection. If null, creates an unmanaged (unsynchronized) local collection.
* @param {Object} [options]
* @param {Object} options.connection The server connection that will manage this collection. Uses the default connection if not specified. Pass the return value of calling [`DDP.connect`](#ddp_connect) to specify a different server. Pass `null` to specify no connection. Unmanaged (`name` is null) collections cannot specify a connection.
* @param {String} options.idGeneration The method of generating the `_id` fields of new documents in this collection. Possible values:
- **`'STRING'`**: random strings
- **`'MONGO'`**: random [`Mongo.ObjectID`](#mongo_object_id) values
The default id generation technique is `'STRING'`.
* @param {Function} options.transform An optional transformation function. Documents will be passed through this function before being returned from `fetch` or `findOne`, and before being passed to callbacks of `observe`, `map`, `forEach`, `allow`, and `deny`. Transforms are *not* applied for the callbacks of `observeChanges` or to cursors returned from publish functions.
* @param {Boolean} options.defineMutationMethods Set to `false` to skip setting up the mutation methods that enable insert/update/remove from client code. Default `true`.
*/
Mongo.Collection = function Collection(name, options) {
if (!name && name !== null) {
Meteor._debug(
'Warning: creating anonymous collection. It will not be ' +
'saved or synchronized over the network. (Pass null for ' +
'the collection name to turn off this warning.)'
);
name = null;
}
if (name !== null && typeof name !== 'string') {
throw new Error(
'First argument to new Mongo.Collection must be a string or null'
);
}
if (options && options.methods) {
// Backwards compatibility hack with original signature (which passed
// "connection" directly instead of in options. (Connections must have a "methods"
// method.)
// XXX remove before 1.0
options = { connection: options };
}
// Backwards compatibility: "connection" used to be called "manager".
if (options && options.manager && !options.connection) {
options.connection = options.manager;
}
options = {
connection: undefined,
idGeneration: 'STRING',
transform: null,
_driver: undefined,
_preventAutopublish: false,
...options,
};
switch (options.idGeneration) {
case 'MONGO':
this._makeNewID = function() {
var src = name
? DDP.randomStream('/collection/' + name)
: Random.insecure;
return new Mongo.ObjectID(src.hexString(24));
};
break;
case 'STRING':
default:
this._makeNewID = function() {
var src = name
? DDP.randomStream('/collection/' + name)
: Random.insecure;
return src.id();
};
break;
}
this._transform = LocalCollection.wrapTransform(options.transform);
if (!name || options.connection === null)
// note: nameless collections never have a connection
this._connection = null;
else if (options.connection) this._connection = options.connection;
else if (Meteor.isClient) this._connection = Meteor.connection;
else this._connection = Meteor.server;
if (!options._driver) {
// XXX This check assumes that webapp is loaded so that Meteor.server !==
// null. We should fully support the case of "want to use a Mongo-backed
// collection from Node code without webapp", but we don't yet.
// #MeteorServerNull
if (
name &&
this._connection === Meteor.server &&
typeof MongoInternals !== 'undefined' &&
MongoInternals.defaultRemoteCollectionDriver
) {
options._driver = MongoInternals.defaultRemoteCollectionDriver();
} else {
const { LocalCollectionDriver } = require('./local_collection_driver.js');
options._driver = LocalCollectionDriver;
}
}
this._collection = options._driver.open(name, this._connection);
this._name = name;
this._driver = options._driver;
this._maybeSetUpReplication(name, options);
// XXX don't define these until allow or deny is actually used for this
// collection. Could be hard if the security rules are only defined on the
// server.
if (options.defineMutationMethods !== false) {
try {
this._defineMutationMethods({
useExisting: options._suppressSameNameError === true,
});
} catch (error) {
// Throw a more understandable error on the server for same collection name
if (
error.message === `A method named '/${name}/insert' is already defined`
)
throw new Error(`There is already a collection named "${name}"`);
throw error;
}
}
// autopublish
if (
Package.autopublish &&
!options._preventAutopublish &&
this._connection &&
this._connection.publish
) {
this._connection.publish(null, () => this.find(), {
is_auto: true,
});
}
};
Object.assign(Mongo.Collection.prototype, {
_maybeSetUpReplication(name, { _suppressSameNameError = false }) {
const self = this;
if (!(self._connection && self._connection.registerStore)) {
return;
}
// OK, we're going to be a slave, replicating some remote
// database, except possibly with some temporary divergence while
// we have unacknowledged RPC's.
const ok = self._connection.registerStore(name, {
// Called at the beginning of a batch of updates. batchSize is the number
// of update calls to expect.
//
// XXX This interface is pretty janky. reset probably ought to go back to
// being its own function, and callers shouldn't have to calculate
// batchSize. The optimization of not calling pause/remove should be
// delayed until later: the first call to update() should buffer its
// message, and then we can either directly apply it at endUpdate time if
// it was the only update, or do pauseObservers/apply/apply at the next
// update() if there's another one.
beginUpdate(batchSize, reset) {
// pause observers so users don't see flicker when updating several
// objects at once (including the post-reconnect reset-and-reapply
// stage), and so that a re-sorting of a query can take advantage of the
// full _diffQuery moved calculation instead of applying change one at a
// time.
if (batchSize > 1 || reset) self._collection.pauseObservers();
if (reset) self._collection.remove({});
},
// Apply an update.
// XXX better specify this interface (not in terms of a wire message)?
update(msg) {
var mongoId = MongoID.idParse(msg.id);
var doc = self._collection._docs.get(mongoId);
//When the server's mergebox is disabled for a collection, the client must gracefully handle it when:
// *We receive an added message for a document that is already there. Instead, it will be changed
// *We reeive a change message for a document that is not there. Instead, it will be added
// *We receive a removed messsage for a document that is not there. Instead, noting wil happen.
//Code is derived from client-side code originally in peerlibrary:control-mergebox
//https://github.com/peerlibrary/meteor-control-mergebox/blob/master/client.coffee
//For more information, refer to discussion "Initial support for publication strategies in livedata server":
//https://github.com/meteor/meteor/pull/11151
if (Meteor.isClient) {
if (msg.msg === 'added' && doc) {
msg.msg = 'changed';
} else if (msg.msg === 'removed' && !doc) {
return;
} else if (msg.msg === 'changed' && !doc) {
msg.msg = 'added';
_ref = msg.fields;
for (field in _ref) {
value = _ref[field];
if (value === void 0) {
delete msg.fields[field];
}
}
}
}
// Is this a "replace the whole doc" message coming from the quiescence
// of method writes to an object? (Note that 'undefined' is a valid
// value meaning "remove it".)
if (msg.msg === 'replace') {
var replace = msg.replace;
if (!replace) {
if (doc) self._collection.remove(mongoId);
} else if (!doc) {
self._collection.insert(replace);
} else {
// XXX check that replace has no $ ops
self._collection.update(mongoId, replace);
}
return;
} else if (msg.msg === 'added') {
if (doc) {
throw new Error(
'Expected not to find a document already present for an add'
);
}
self._collection.insert({ _id: mongoId, ...msg.fields });
} else if (msg.msg === 'removed') {
if (!doc)
throw new Error(
'Expected to find a document already present for removed'
);
self._collection.remove(mongoId);
} else if (msg.msg === 'changed') {
if (!doc) throw new Error('Expected to find a document to change');
const keys = Object.keys(msg.fields);
if (keys.length > 0) {
var modifier = {};
keys.forEach(key => {
const value = msg.fields[key];
if (EJSON.equals(doc[key], value)) {
return;
}
if (typeof value === 'undefined') {
if (!modifier.$unset) {
modifier.$unset = {};
}
modifier.$unset[key] = 1;
} else {
if (!modifier.$set) {
modifier.$set = {};
}
modifier.$set[key] = value;
}
});
if (Object.keys(modifier).length > 0) {
self._collection.update(mongoId, modifier);
}
}
} else {
throw new Error("I don't know how to deal with this message");
}
},
// Called at the end of a batch of updates.
endUpdate() {
self._collection.resumeObservers();
},
// Called around method stub invocations to capture the original versions
// of modified documents.
saveOriginals() {
self._collection.saveOriginals();
},
retrieveOriginals() {
return self._collection.retrieveOriginals();
},
// Used to preserve current versions of documents across a store reset.
getDoc(id) {
return self.findOne(id);
},
// To be able to get back to the collection from the store.
_getCollection() {
return self;
},
});
if (!ok) {
const message = `There is already a collection named "${name}"`;
if (_suppressSameNameError === true) {
// XXX In theory we do not have to throw when `ok` is falsy. The
// store is already defined for this collection name, but this
// will simply be another reference to it and everything should
// work. However, we have historically thrown an error here, so
// for now we will skip the error only when _suppressSameNameError
// is `true`, allowing people to opt in and give this some real
// world testing.
console.warn ? console.warn(message) : console.log(message);
} else {
throw new Error(message);
}
}
},
///
/// Main collection API
///
_getFindSelector(args) {
if (args.length == 0) return {};
else return args[0];
},
_getFindOptions(args) {
const [, options] = args || [];
const newOptions = normalizeProjection(options);
var self = this;
if (args.length < 2) {
return { transform: self._transform };
} else {
check(
newOptions,
Match.Optional(
Match.ObjectIncluding({
projection: Match.Optional(Match.OneOf(Object, undefined)),
sort: Match.Optional(
Match.OneOf(Object, Array, Function, undefined)
),
limit: Match.Optional(Match.OneOf(Number, undefined)),
skip: Match.Optional(Match.OneOf(Number, undefined)),
})
)
);
return {
transform: self._transform,
...newOptions,
};
}
},
/**
* @summary Find the documents in a collection that match the selector.
* @locus Anywhere
* @method find
* @memberof Mongo.Collection
* @instance
* @param {MongoSelector} [selector] A query describing the documents to find
* @param {Object} [options]
* @param {MongoSortSpecifier} options.sort Sort order (default: natural order)
* @param {Number} options.skip Number of results to skip at the beginning
* @param {Number} options.limit Maximum number of results to return
* @param {MongoFieldSpecifier} options.fields Dictionary of fields to return or exclude.
* @param {Boolean} options.reactive (Client only) Default `true`; pass `false` to disable reactivity
* @param {Function} options.transform Overrides `transform` on the [`Collection`](#collections) for this cursor. Pass `null` to disable transformation.
* @param {Boolean} options.disableOplog (Server only) Pass true to disable oplog-tailing on this query. This affects the way server processes calls to `observe` on this query. Disabling the oplog can be useful when working with data that updates in large batches.
* @param {Number} options.pollingIntervalMs (Server only) When oplog is disabled (through the use of `disableOplog` or when otherwise not available), the frequency (in milliseconds) of how often to poll this query when observing on the server. Defaults to 10000ms (10 seconds).
* @param {Number} options.pollingThrottleMs (Server only) When oplog is disabled (through the use of `disableOplog` or when otherwise not available), the minimum time (in milliseconds) to allow between re-polling when observing on the server. Increasing this will save CPU and mongo load at the expense of slower updates to users. Decreasing this is not recommended. Defaults to 50ms.
* @param {Number} options.maxTimeMs (Server only) If set, instructs MongoDB to set a time limit for this cursor's operations. If the operation reaches the specified time limit (in milliseconds) without the having been completed, an exception will be thrown. Useful to prevent an (accidental or malicious) unoptimized query from causing a full collection scan that would disrupt other database users, at the expense of needing to handle the resulting error.
* @param {String|Object} options.hint (Server only) Overrides MongoDB's default index selection and query optimization process. Specify an index to force its use, either by its name or index specification. You can also specify `{ $natural : 1 }` to force a forwards collection scan, or `{ $natural : -1 }` for a reverse collection scan. Setting this is only recommended for advanced users.
* @param {String} options.readPreference (Server only) Specifies a custom MongoDB [`readPreference`](https://docs.mongodb.com/manual/core/read-preference) for this particular cursor. Possible values are `primary`, `primaryPreferred`, `secondary`, `secondaryPreferred` and `nearest`.
* @returns {Mongo.Cursor}
*/
find(...args) {
// Collection.find() (return all docs) behaves differently
// from Collection.find(undefined) (return 0 docs). so be
// careful about the length of arguments.
return this._collection.find(
this._getFindSelector(args),
this._getFindOptions(args)
);
},
/**
* @summary Finds the first document that matches the selector, as ordered by sort and skip options. Returns `undefined` if no matching document is found.
* @locus Anywhere
* @method findOne
* @memberof Mongo.Collection
* @instance
* @param {MongoSelector} [selector] A query describing the documents to find
* @param {Object} [options]
* @param {MongoSortSpecifier} options.sort Sort order (default: natural order)
* @param {Number} options.skip Number of results to skip at the beginning
* @param {MongoFieldSpecifier} options.fields Dictionary of fields to return or exclude.
* @param {Boolean} options.reactive (Client only) Default true; pass false to disable reactivity
* @param {Function} options.transform Overrides `transform` on the [`Collection`](#collections) for this cursor. Pass `null` to disable transformation.
* @param {String} options.readPreference (Server only) Specifies a custom MongoDB [`readPreference`](https://docs.mongodb.com/manual/core/read-preference) for fetching the document. Possible values are `primary`, `primaryPreferred`, `secondary`, `secondaryPreferred` and `nearest`.
* @returns {Object}
*/
findOne(...args) {
return this._collection.findOne(
this._getFindSelector(args),
this._getFindOptions(args)
);
},
});
Object.assign(Mongo.Collection, {
async _publishCursor(cursor, sub, collection) {
var observeHandle = await cursor.observeChanges(
{
added: function(id, fields) {
sub.added(collection, id, fields);
},
changed: function(id, fields) {
sub.changed(collection, id, fields);
},
removed: function(id) {
sub.removed(collection, id);
},
},
// Publications don't mutate the documents
// This is tested by the `livedata - publish callbacks clone` test
{ nonMutatingCallbacks: true }
);
// We don't call sub.ready() here: it gets called in livedata_server, after
// possibly calling _publishCursor on multiple returned cursors.
// register stop callback (expects lambda w/ no args).
sub.onStop(function() {
return observeHandle.stop();
});
// return the observeHandle in case it needs to be stopped early
return observeHandle;
},
// protect against dangerous selectors. falsey and {_id: falsey} are both
// likely programmer error, and not what you want, particularly for destructive
// operations. If a falsey _id is sent in, a new string _id will be
// generated and returned; if a fallbackId is provided, it will be returned
// instead.
_rewriteSelector(selector, { fallbackId } = {}) {
// shorthand -- scalars match _id
if (LocalCollection._selectorIsId(selector)) selector = { _id: selector };
if (Array.isArray(selector)) {
// This is consistent with the Mongo console itself; if we don't do this
// check passing an empty array ends up selecting all items
throw new Error("Mongo selector can't be an array.");
}
if (!selector || ('_id' in selector && !selector._id)) {
// can't match anything
return { _id: fallbackId || Random.id() };
}
return selector;
},
});
Object.assign(Mongo.Collection.prototype, {
// 'insert' immediately returns the inserted document's new _id.
// The others return values immediately if you are in a stub, an in-memory
// unmanaged collection, or a mongo-backed collection and you don't pass a
// callback. 'update' and 'remove' return the number of affected
// documents. 'upsert' returns an object with keys 'numberAffected' and, if an
// insert happened, 'insertedId'.
//
// Otherwise, the semantics are exactly like other methods: they take
// a callback as an optional last argument; if no callback is
// provided, they block until the operation is complete, and throw an
// exception if it fails; if a callback is provided, then they don't
// necessarily block, and they call the callback when they finish with error and
// result arguments. (The insert method provides the document ID as its result;
// update and remove provide the number of affected docs as the result; upsert
// provides an object with numberAffected and maybe insertedId.)
//
// On the client, blocking is impossible, so if a callback
// isn't provided, they just return immediately and any error
// information is lost.
//
// There's one more tweak. On the client, if you don't provide a
// callback, then if there is an error, a message will be logged with
// Meteor._debug.
//
// The intent (though this is actually determined by the underlying
// drivers) is that the operations should be done synchronously, not
// generating their result until the database has acknowledged
// them. In the future maybe we should provide a flag to turn this
// off.
_insert(doc, callback) {
// Make sure we were passed a document to insert
if (!doc) {
throw new Error('insert requires an argument');
}
// Make a shallow clone of the document, preserving its prototype.
doc = Object.create(
Object.getPrototypeOf(doc),
Object.getOwnPropertyDescriptors(doc)
);
if ('_id' in doc) {
if (
!doc._id ||
!(typeof doc._id === 'string' || doc._id instanceof Mongo.ObjectID)
) {
throw new Error(
'Meteor requires document _id fields to be non-empty strings or ObjectIDs'
);
}
} else {
let generateId = true;
// Don't generate the id if we're the client and the 'outermost' call
// This optimization saves us passing both the randomSeed and the id
// Passing both is redundant.
if (this._isRemoteCollection()) {
const enclosing = DDP._CurrentMethodInvocation.get();
if (!enclosing) {
generateId = false;
}
}
if (generateId) {
doc._id = this._makeNewID();
}
}
// On inserts, always return the id that we generated; on all other
// operations, just return the result from the collection.
var chooseReturnValueFromCollectionResult = function(result) {
if (Meteor._isPromise(result)) return result;
if (doc._id) {
return doc._id;
}
// XXX what is this for??
// It's some iteraction between the callback to _callMutatorMethod and
// the return value conversion
doc._id = result;
return result;
};
const wrappedCallback = wrapCallback(
callback,
chooseReturnValueFromCollectionResult
);
if (this._isRemoteCollection()) {
const result = this._callMutatorMethod('insert', [doc], wrappedCallback);
return chooseReturnValueFromCollectionResult(result);
}
// it's my collection. descend into the collection object
// and propagate any exception.
try {
// If the user provided a callback and the collection implements this
// operation asynchronously, then queryRet will be undefined, and the
// result will be returned through the callback instead.
let result;
if (!!wrappedCallback) {
this._collection.insert(doc, wrappedCallback);
} else {
// If we don't have the callback, we assume the user is using the promise.
// We can't just pass this._collection.insert to the promisify because it would lose the context.
result = Meteor.promisify((cb) => this._collection.insert(doc, cb))();
}
return chooseReturnValueFromCollectionResult(result);
} catch (e) {
if (callback) {
callback(e);
return null;
}
throw e;
}
},
/**
* @summary Insert a document in the collection. Returns its unique _id.
* @locus Anywhere
* @method insert
* @memberof Mongo.Collection
* @instance
* @param {Object} doc The document to insert. May not yet have an _id attribute, in which case Meteor will generate one for you.
* @param {Function} [callback] Optional. If present, called with an error object as the first argument and, if no error, the _id as the second.
*/
insert(doc, callback) {
return this._insert(doc, callback);
},
/**
* @summary Modify one or more documents in the collection. Returns the number of matched documents.
* @locus Anywhere
* @method update
* @memberof Mongo.Collection
* @instance
* @param {MongoSelector} selector Specifies which documents to modify
* @param {MongoModifier} modifier Specifies how to modify the documents
* @param {Object} [options]
* @param {Boolean} options.multi True to modify all matching documents; false to only modify one of the matching documents (the default).
* @param {Boolean} options.upsert True to insert a document if no matching documents are found.
* @param {Array} options.arrayFilters Optional. Used in combination with MongoDB [filtered positional operator](https://docs.mongodb.com/manual/reference/operator/update/positional-filtered/) to specify which elements to modify in an array field.
* @param {Function} [callback] Optional. If present, called with an error object as the first argument and, if no error, the number of affected documents as the second.
*/
update(selector, modifier, ...optionsAndCallback) {
const callback = popCallbackFromArgs(optionsAndCallback);
// We've already popped off the callback, so we are left with an array
// of one or zero items
const options = { ...(optionsAndCallback[0] || null) };
let insertedId;
if (options && options.upsert) {
// set `insertedId` if absent. `insertedId` is a Meteor extension.
if (options.insertedId) {
if (
!(
typeof options.insertedId === 'string' ||
options.insertedId instanceof Mongo.ObjectID
)
)
throw new Error('insertedId must be string or ObjectID');
insertedId = options.insertedId;
} else if (!selector || !selector._id) {
insertedId = this._makeNewID();
options.generatedId = true;
options.insertedId = insertedId;
}
}
selector = Mongo.Collection._rewriteSelector(selector, {
fallbackId: insertedId,
});
const wrappedCallback = wrapCallback(callback);
if (this._isRemoteCollection()) {
const args = [selector, modifier, options];
return this._callMutatorMethod('update', args, wrappedCallback);
}
// it's my collection. descend into the collection object
// and propagate any exception.
try {
// If the user provided a callback and the collection implements this
// operation asynchronously, then queryRet will be undefined, and the
// result will be returned through the callback instead.
return this._collection.update(
selector,
modifier,
options,
wrappedCallback
);
} catch (e) {
if (callback) {
callback(e);
return null;
}
throw e;
}
},
/**
* @summary Remove documents from the collection
* @locus Anywhere
* @method remove
* @memberof Mongo.Collection
* @instance
* @param {MongoSelector} selector Specifies which documents to remove
* @param {Function} [callback] Optional. If present, called with an error object as its argument.
*/
remove(selector, callback) {
selector = Mongo.Collection._rewriteSelector(selector);
const wrappedCallback = wrapCallback(callback);
if (this._isRemoteCollection()) {
return this._callMutatorMethod('remove', [selector], wrappedCallback);
}
// it's my collection. descend into the collection1 object
// and propagate any exception.
try {
// If the user provided a callback and the collection implements this
// operation asynchronously, then queryRet will be undefined, and the
// result will be returned through the callback instead.
return this._collection.remove(selector, wrappedCallback);
} catch (e) {
if (callback) {
callback(e);
return null;
}
throw e;
}
},
// Determine if this collection is simply a minimongo representation of a real
// database on another server
_isRemoteCollection() {
// XXX see #MeteorServerNull
return this._connection && this._connection !== Meteor.server;
},
/**
* @summary Modify one or more documents in the collection, or insert one if no matching documents were found. Returns an object with keys `numberAffected` (the number of documents modified) and `insertedId` (the unique _id of the document that was inserted, if any).
* @locus Anywhere
* @method upsert
* @memberof Mongo.Collection
* @instance
* @param {MongoSelector} selector Specifies which documents to modify
* @param {MongoModifier} modifier Specifies how to modify the documents
* @param {Object} [options]
* @param {Boolean} options.multi True to modify all matching documents; false to only modify one of the matching documents (the default).
* @param {Function} [callback] Optional. If present, called with an error object as the first argument and, if no error, the number of affected documents as the second.
*/
upsert(selector, modifier, options, callback) {
if (!callback && typeof options === 'function') {
callback = options;
options = {};
}
return this.update(
selector,
modifier,
{
...options,
_returnObject: true,
upsert: true,
},
callback
);
},
// We'll actually design an index API later. For now, we just pass through to
// Mongo's, but make it synchronous.
/**
* @summary Creates the specified index on the collection.
* @locus server
* @method _ensureIndex
* @deprecated in 3.0
* @memberof Mongo.Collection
* @instance
* @param {Object} index A document that contains the field and value pairs where the field is the index key and the value describes the type of index for that field. For an ascending index on a field, specify a value of `1`; for descending index, specify a value of `-1`. Use `text` for text indexes.
* @param {Object} [options] All options are listed in [MongoDB documentation](https://docs.mongodb.com/manual/reference/method/db.collection.createIndex/#options)
* @param {String} options.name Name of the index
* @param {Boolean} options.unique Define that the index values must be unique, more at [MongoDB documentation](https://docs.mongodb.com/manual/core/index-unique/)
* @param {Boolean} options.sparse Define that the index is sparse, more at [MongoDB documentation](https://docs.mongodb.com/manual/core/index-sparse/)
*/
async _ensureIndex(index, options) {
var self = this;
if (!self._collection._ensureIndex || !self._collection.createIndex)
throw new Error('Can only call createIndex on server collections');
if (self._collection.createIndex) {
await self._collection.createIndex(index, options);
} else {
import { Log } from 'meteor/logging';
Log.debug(`_ensureIndex has been deprecated, please use the new 'createIndex' instead${ options?.name ? `, index name: ${ options.name }` : `, index: ${ JSON.stringify(index) }` }`)
await self._collection._ensureIndex(index, options);
}
},
/**
* @summary Creates the specified index on the collection.
* @locus server
* @method createIndex
* @memberof Mongo.Collection
* @instance
* @param {Object} index A document that contains the field and value pairs where the field is the index key and the value describes the type of index for that field. For an ascending index on a field, specify a value of `1`; for descending index, specify a value of `-1`. Use `text` for text indexes.
* @param {Object} [options] All options are listed in [MongoDB documentation](https://docs.mongodb.com/manual/reference/method/db.collection.createIndex/#options)
* @param {String} options.name Name of the index
* @param {Boolean} options.unique Define that the index values must be unique, more at [MongoDB documentation](https://docs.mongodb.com/manual/core/index-unique/)
* @param {Boolean} options.sparse Define that the index is sparse, more at [MongoDB documentation](https://docs.mongodb.com/manual/core/index-sparse/)
*/
async createIndex(index, options) {
var self = this;
if (!self._collection.createIndex)
throw new Error('Can only call createIndex on server collections');
try {
await self._collection.createIndex(index, options);
} catch (e) {
if (e.message.includes('An equivalent index already exists with the same name but different options.') && Meteor.settings?.packages?.mongo?.reCreateIndexOnOptionMismatch) {
import { Log } from 'meteor/logging';
Log.info(`Re-creating index ${ index } for ${ self._name } due to options mismatch.`);
await self._collection._dropIndex(index);
await self._collection.createIndex(index, options);
} else {
console.error(e);
throw new Meteor.Error(`An error occurred when creating an index for collection "${ self._name }: ${ e.message }`);
}
}
},
async _dropIndex(index) {
var self = this;
if (!self._collection._dropIndex)
throw new Error('Can only call _dropIndex on server collections');
self._collection._dropIndex(index);
},
async _dropCollection() {
var self = this;
if (!self._collection.dropCollection)
throw new Error('Can only call _dropCollection on server collections');
await self._collection.dropCollection();
},
_createCappedCollection(byteSize, maxDocuments) {
var self = this;
if (!self._collection._createCappedCollection)
throw new Error(
'Can only call _createCappedCollection on server collections'
);
self._collection._createCappedCollection(byteSize, maxDocuments);
},
/**
* @summary Returns the [`Collection`](http://mongodb.github.io/node-mongodb-native/3.0/api/Collection.html) object corresponding to this collection from the [npm `mongodb` driver module](https://www.npmjs.com/package/mongodb) which is wrapped by `Mongo.Collection`.
* @locus Server
* @memberof Mongo.Collection
* @instance
*/
rawCollection() {
var self = this;
if (!self._collection.rawCollection) {
throw new Error('Can only call rawCollection on server collections');
}
return self._collection.rawCollection();
},
/**
* @summary Returns the [`Db`](http://mongodb.github.io/node-mongodb-native/3.0/api/Db.html) object corresponding to this collection's database connection from the [npm `mongodb` driver module](https://www.npmjs.com/package/mongodb) which is wrapped by `Mongo.Collection`.
* @locus Server
* @memberof Mongo.Collection
* @instance
*/
rawDatabase() {
var self = this;
if (!(self._driver.mongo && self._driver.mongo.db)) {
throw new Error('Can only call rawDatabase on server collections');
}
return self._driver.mongo.db;
},
});
// Convert the callback to not return a result if there is an error
function wrapCallback(callback, convertResult) {
return (
callback &&
function(error, result) {
if (error) {
callback(error);
} else if (typeof convertResult === 'function') {
callback(error, convertResult(result));
} else {
callback(error, result);
}
}
);
}
/**
* @summary Create a Mongo-style `ObjectID`. If you don't specify a `hexString`, the `ObjectID` will generated randomly (not using MongoDB's ID construction rules).
* @locus Anywhere
* @class
* @param {String} [hexString] Optional. The 24-character hexadecimal contents of the ObjectID to create
*/
Mongo.ObjectID = MongoID.ObjectID;
/**
* @summary To create a cursor, use find. To access the documents in a cursor, use forEach, map, or fetch.
* @class
* @instanceName cursor
*/
Mongo.Cursor = LocalCollection.Cursor;
/**
* @deprecated in 0.9.1
*/
Mongo.Collection.Cursor = Mongo.Cursor;
/**
* @deprecated in 0.9.1
*/
Mongo.Collection.ObjectID = Mongo.ObjectID;
/**
* @deprecated in 0.9.1
*/
Meteor.Collection = Mongo.Collection;
// Allow deny stuff is now in the allow-deny package
Object.assign(Meteor.Collection.prototype, AllowDeny.CollectionPrototype);
function popCallbackFromArgs(args) {
// Pull off any callback (or perhaps a 'callback' variable that was passed
// in undefined, like how 'upsert' does it).
if (
args.length &&
(args[args.length - 1] === undefined ||
args[args.length - 1] instanceof Function)
) {
return args.pop();
}
}
ASYNC_COLLECTION_METHODS.forEach(methodName => {
const methodNameAsync = getAsyncMethodName(methodName);
Mongo.Collection.prototype[methodNameAsync] = function(...args) {
return Promise.resolve(this[methodName](...args));
};
});

View File

@@ -1,21 +0,0 @@
Tinytest.add('async collection - check for methods presence', function (test) {
const isFunction = fn => test.equal(typeof fn, 'function');
const collection = new Mongo.Collection('myAsyncCollection' + test.id);
isFunction(collection.createCappedCollectionAsync);
isFunction(collection.createIndexAsync);
isFunction(collection.dropCollectionAsync);
isFunction(collection.dropIndexAsync);
isFunction(collection.findOneAsync);
isFunction(collection.insertAsync);
isFunction(collection.removeAsync);
isFunction(collection.updateAsync);
isFunction(collection.upsertAsync);
const cursor = collection.find();
isFunction(cursor.countAsync);
isFunction(cursor.fetchAsync);
isFunction(cursor.forEachAsync);
isFunction(cursor.mapAsync);
isFunction(cursor[Symbol.asyncIterator]);
});

View File

@@ -1,386 +0,0 @@
var MongoDB = NpmModuleMongodb;
Tinytest.add(
'collection - call Mongo.Collection without new',
function (test) {
test.throws(function () {
Mongo.Collection(null);
});
}
);
Tinytest.add('collection - call new Mongo.Collection multiple times',
function (test) {
var collectionName = 'multiple_times_1_' + test.id;
new Mongo.Collection(collectionName);
test.throws(
function () {
new Mongo.Collection(collectionName);
},
/There is already a collection named/
);
}
);
Tinytest.add('collection - call new Mongo.Collection multiple times with _suppressSameNameError=true',
function (test) {
var collectionName = 'multiple_times_2_' + test.id;
new Mongo.Collection(collectionName);
try {
new Mongo.Collection(collectionName, {_suppressSameNameError: true});
test.ok();
} catch (error) {
console.log(error);
test.fail('Expected new Mongo.Collection not to throw an error when called twice with the same name');
}
}
);
Tinytest.add('collection - call new Mongo.Collection with defineMutationMethods=false',
function (test) {
var handlerPropName = Meteor.isClient ? '_methodHandlers' : 'method_handlers';
var methodCollectionName = 'hasmethods' + test.id;
var hasmethods = new Mongo.Collection(methodCollectionName);
test.equal(typeof hasmethods._connection[handlerPropName]['/' + methodCollectionName + '/insert'], 'function');
var noMethodCollectionName = 'nomethods' + test.id;
var nomethods = new Mongo.Collection(noMethodCollectionName, {defineMutationMethods: false});
test.equal(nomethods._connection[handlerPropName]['/' + noMethodCollectionName + '/insert'], undefined);
}
);
Tinytest.addAsync('collection - call find with sort function',
async function (test) {
var initialize = async function (collection) {
await collection.insert({a: 2});
await collection.insert({a: 3});
await collection.insert({a: 1});
};
var sorter = function (a, b) {
return a.a - b.a;
};
var getSorted = function (collection) {
return collection.find({}, {sort: sorter}).map(function (doc) { return doc.a; });
};
var collectionName = 'sort' + test.id;
var localCollection = new Mongo.Collection(null);
var namedCollection = new Mongo.Collection(collectionName, {connection: null});
await initialize(localCollection);
test.equal(await getSorted(localCollection), [1, 2, 3]);
await initialize(namedCollection);
test.equal(await getSorted(namedCollection), [1, 2, 3]);
}
);
Tinytest.addAsync('collection - call native find with sort function',
async function (test) {
var collectionName = 'sortNative' + test.id;
var nativeCollection = new Mongo.Collection(collectionName);
if (Meteor.isServer) {
await test.throwsAsync(
function () {
return nativeCollection
.find({}, {
sort: function () {},
})
.map(function (doc) {
return doc.a;
});
},
/Invalid sort format: undefined Sort must be a valid object/
);
}
}
);
Tinytest.addAsync('collection - calling native find with maxTimeMs should timeout',
async function(test) {
var collectionName = 'findOptions1' + test.id;
var collection = new Mongo.Collection(collectionName);
await collection.insert({a: 1});
function doTest() {
return collection.find({$where: "sleep(100) || true"}, {maxTimeMs: 50}).count();
}
if (Meteor.isServer) {
await test.throwsAsync(doTest);
}
}
);
Tinytest.addAsync('collection - calling native find with $reverse hint should reverse on server',
async function(test) {
var collectionName = 'findOptions2' + test.id;
var collection = new Mongo.Collection(collectionName);
await collection.insert({a: 1});
await collection.insert({a: 2});
function m(doc) { return doc.a; }
var fwd = await collection.find({}, {hint: {$natural: 1}}).map(m);
var rev = await collection.find({}, {hint: {$natural: -1}}).map(m);
if (Meteor.isServer) {
test.equal(fwd, rev.reverse());
} else {
// NOTE: should be documented that hints don't work on client
test.equal(fwd, rev);
}
}
);
Tinytest.addAsync('collection - calling native find with good hint and maxTimeMs should succeed',
async function(test, done) {
var collectionName = 'findOptions3' + test.id;
var collection = new Mongo.Collection(collectionName);
await collection.insert({a: 1});
Promise.resolve(
Meteor.isServer &&
collection.rawCollection().createIndex({ a: 1 })
).then(async () => {
test.equal(await collection.find({}, {
hint: {a: 1},
maxTimeMs: 1000
}).count(), 1);
done();
}).catch(error => test.fail(error.message));
}
);
Tinytest.addAsync('collection - calling find with a valid readPreference',
async function(test) {
if (Meteor.isServer) {
const defaultReadPreference = 'primary';
const customReadPreference = 'secondaryPreferred';
const collection = new Mongo.Collection('readPreferenceTest' + test.id);
const defaultCursor = collection.find();
const customCursor = collection.find(
{},
{ readPreference: customReadPreference }
);
// Trigger the creation of _synchronousCursor
await defaultCursor.count();
await customCursor.count();
// defaultCursor._synchronousCursor._dbCursor.operation is not an option anymore
// as the cursor options are now private
// You can check on abstract_cursor.ts the exposed public getters
test.equal(
defaultCursor._synchronousCursor._dbCursor.readPreference
.mode,
defaultReadPreference
);
test.equal(
customCursor._synchronousCursor._dbCursor.readPreference.mode,
customReadPreference
);
}
}
);
Tinytest.addAsync('collection - calling find with an invalid readPreference',
function(test) {
if (Meteor.isServer) {
const invalidReadPreference = 'INVALID';
const collection = new Mongo.Collection('readPreferenceTest2' + test.id);
const cursor = collection.find(
{},
{ readPreference: invalidReadPreference }
);
return test.throwsAsync(function() {
// Trigger the creation of _synchronousCursor
return cursor.count();
}, `Invalid read preference mode "${invalidReadPreference}"`);
}
}
);
Tinytest.addAsync('collection - inserting a document with a binary should return a document with a binary',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary1');
const _id = Random.id();
await collection.insert({
_id,
binary: new MongoDB.Binary(Buffer.from('hello world'), 6)
});
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof MongoDB.Binary
);
test.equal(
doc.binary.buffer,
Buffer.from('hello world')
);
}
}
);
Tinytest.addAsync('collection - inserting a document with a binary (sub type 0) should return a document with a uint8array',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary8');
const _id = Random.id();
await collection.insert({
_id,
binary: new MongoDB.Binary(Buffer.from('hello world'), 0)
});
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof Uint8Array
);
test.equal(
doc.binary,
new Uint8Array(Buffer.from('hello world'))
);
}
}
);
Tinytest.addAsync('collection - updating a document with a binary should return a document with a binary',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary2');
const _id = Random.id();
await collection.insert({
_id
});
await collection.update({ _id }, { $set: { binary: new MongoDB.Binary(Buffer.from('hello world'), 6) } });
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof MongoDB.Binary
);
test.equal(
doc.binary.buffer,
Buffer.from('hello world')
);
}
}
);
Tinytest.addAsync('collection - updating a document with a binary (sub type 0) should return a document with a uint8array',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary7');
const _id = Random.id();
await collection.insert({
_id
});
await collection.update({ _id }, { $set: { binary: new MongoDB.Binary(Buffer.from('hello world'), 0) } });
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof Uint8Array
);
test.equal(
doc.binary,
new Uint8Array(Buffer.from('hello world'))
);
}
}
);
Tinytest.addAsync('collection - inserting a document with a uint8array should return a document with a uint8array',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary3');
const _id = Random.id();
await collection.insert({
_id,
binary: new Uint8Array(Buffer.from('hello world'))
});
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof Uint8Array
);
test.equal(
doc.binary,
new Uint8Array(Buffer.from('hello world'))
);
}
}
);
Tinytest.addAsync('collection - updating a document with a uint8array should return a document with a uint8array',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary4');
const _id = Random.id();
await collection.insert({
_id
});
await collection.update(
{ _id },
{ $set: { binary: new Uint8Array(Buffer.from('hello world')) } }
)
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof Uint8Array
);
test.equal(
doc.binary,
new Uint8Array(Buffer.from('hello world'))
);
}
}
);
Tinytest.addAsync('collection - finding with a query with a uint8array field should return the correct document',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary5');
const _id = Random.id();
await collection.insert({
_id,
binary: new Uint8Array(Buffer.from('hello world'))
});
const doc = await collection.findOne({ binary: new Uint8Array(Buffer.from('hello world')) });
test.equal(
doc._id,
_id
);
await collection.remove({});
}
}
);
Tinytest.addAsync('collection - finding with a query with a binary field should return the correct document',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary6');
const _id = Random.id();
await collection.insert({
_id,
binary: new MongoDB.Binary(Buffer.from('hello world'), 6)
});
const doc = await collection.findOne({ binary: new MongoDB.Binary(Buffer.from('hello world'), 6) });
test.equal(
doc._id,
_id
);
await collection.remove({});
}
}
);

View File

@@ -1,10 +0,0 @@
/**
* @summary Allows for user specified connection options
* @example http://mongodb.github.io/node-mongodb-native/3.0/reference/connecting/connection-settings/
* @locus Server
* @param {Object} options User specified Mongo connection options
*/
Mongo.setConnectionOptions = function setConnectionOptions (options) {
check(options, Object);
Mongo._connectionOptions = options;
};

View File

@@ -1,57 +0,0 @@
export class DocFetcher {
constructor(mongoConnection) {
this._mongoConnection = mongoConnection;
// Map from op -> [callback]
this._callbacksForOp = new Map;
}
// Fetches document "id" from collectionName, returning it or null if not
// found.
//
// If you make multiple calls to fetch() with the same op reference,
// DocFetcher may assume that they all return the same document. (It does
// not check to see if collectionName/id match.)
//
// You may assume that callback is never called synchronously (and in fact
// OplogObserveDriver does so).
fetch(collectionName, id, op, callback) {
const self = this;
check(collectionName, String);
check(op, Object);
// If there's already an in-progress fetch for this cache key, yield until
// it's done and return whatever it returns.
if (self._callbacksForOp.has(op)) {
self._callbacksForOp.get(op).push(callback);
return;
}
const callbacks = [callback];
self._callbacksForOp.set(op, callbacks);
return Meteor._runAsync(async function () {
try {
var doc = await self._mongoConnection.findOne(
collectionName, {_id: id}) || null;
// Return doc to all relevant callbacks. Note that this array can
// continue to grow during callback excecution.
while (callbacks.length > 0) {
// Clone the document so that the various calls to fetch don't return
// objects that are intertwingled with each other. Clone before
// popping the future, so that if clone throws, the error gets passed
// to the next callback.
await callbacks.pop()(null, EJSON.clone(doc));
}
} catch (e) {
while (callbacks.length > 0) {
await callbacks.pop()(e);
}
} finally {
// XXX consider keeping the doc around for a period of time before
// removing from the cache
self._callbacksForOp.delete(op);
}
});
}
}

View File

@@ -1,39 +0,0 @@
import { DocFetcher } from "./doc_fetcher.js";
testAsyncMulti("mongo-livedata - doc fetcher", [
async function (test, expect) {
var self = this;
var collName = "docfetcher-" + Random.id();
var collection = new Mongo.Collection(collName);
var id1 = await collection.insert({x: 1});
var id2 = await collection.insert({y: 2});
var fetcher = new DocFetcher(
MongoInternals.defaultRemoteCollectionDriver().mongo);
// Test basic operation.
const fakeOp1 = {};
const fakeOp2 = {};
fetcher.fetch(collName, id1, fakeOp1, expect(null, {_id: id1, x: 1}));
fetcher.fetch(collName, "nonexistent!", fakeOp2, expect(null, null));
var fetched = false;
var fakeOp3 = {};
var expected = {_id: id2, y: 2};
fetcher.fetch(collName, id2, fakeOp3, expect(function (e, d) {
fetched = true;
test.isFalse(e);
test.equal(d, expected);
}));
// The fetcher yields.
test.isFalse(fetched);
// Now ask for another document with the same op reference. Because a
// fetch for that op is in flight, we will get the other fetch's
// document, not this random document.
fetcher.fetch(collName, Random.id(), fakeOp3, expect(function (e, d) {
test.isFalse(e);
test.equal(d, expected);
}));
}
]);

View File

@@ -1,30 +0,0 @@
// singleton
export const LocalCollectionDriver = new (class LocalCollectionDriver {
constructor() {
this.noConnCollections = Object.create(null);
}
open(name, conn) {
if (! name) {
return new LocalCollection;
}
if (! conn) {
return ensureCollection(name, this.noConnCollections);
}
if (! conn._mongo_livedata_collections) {
conn._mongo_livedata_collections = Object.create(null);
}
// XXX is there a way to keep track of a connection's collections without
// dangling it off the connection object?
return ensureCollection(name, conn._mongo_livedata_collections);
}
});
function ensureCollection(name, collections) {
return (name in collections)
? collections[name]
: collections[name] = new LocalCollection(name);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +0,0 @@
export const normalizeProjection = options => {
// transform fields key in projection
const { fields, projection, ...otherOptions } = options || {};
// TODO: enable this comment when deprecating the fields option
// Log.debug(`fields option has been deprecated, please use the new 'projection' instead`)
return {
...otherOptions,
...(projection || fields ? { projection: fields || projection } : {}),
};
};

View File

@@ -1,394 +0,0 @@
var makeCollection = function () {
if (Meteor.isServer) {
return new Mongo.Collection(Random.id());
} else {
return new Mongo.Collection(null);
}
};
_.each ([{added: 'added', forceOrdered: true},
{added: 'added', forceOrdered: false},
{added: 'addedBefore', forceOrdered: false}], function (options) {
var added = options.added;
var forceOrdered = options.forceOrdered;
Tinytest.addAsync("observeChanges - single id - basics " + added
+ (forceOrdered ? " force ordered" : ""),
async function (test, onComplete) {
var c = makeCollection();
var counter = 0;
var callbacks = [added, "changed", "removed"];
if (forceOrdered)
callbacks.push("movedBefore");
await withCallbackLogger(test,
callbacks,
Meteor.isServer,
async function (logger) {
var barid = await c.insert({thing: "stuff"});
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var handle = await c.find(fooid).observeChanges(logger);
if (added === 'added') {
logger.expectResult(added, [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
} else {
logger.expectResult(added,
[fooid, {noodles: "good", bacon: "bad", apples: "ok"}, null]);
}
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
logger.expectResult("changed",
[fooid, {noodles: "alright", potatoes: "tasty", bacon: undefined}]);
await c.remove(fooid);
logger.expectResult("removed", [fooid]);
await logger.expectNoResult(async () => {
await c.remove(barid);
await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
});
await handle.stop();
const badCursor = c.find({}, {fields: {noodles: 1, _id: false}});
await test.throwsAsync(function () {
return badCursor.observeChanges(logger);
});
});
});
});
Tinytest.addAsync("observeChanges - callback isolation", async function (test) {
var c = makeCollection();
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handles = [];
var cursor = c.find();
handles.push(await cursor.observeChanges(logger));
// fields-tampering observer
handles.push(await cursor.observeChanges({
added: function(id, fields) {
fields.apples = 'green';
},
changed: function(id, fields) {
fields.apples = 'green';
},
}));
var fooid = await c.insert({apples: "ok"});
logger.expectResult("added", [fooid, {apples: "ok"}]);
await c.update(fooid, {apples: "not ok"});
logger.expectResult("changed", [fooid, {apples: "not ok"}]);
test.equal((await c.findOne(fooid)).apples, "not ok");
await Promise.all(handles.map(h => h.stop()));
});
});
Tinytest.addAsync("observeChanges - single id - initial adds", async function (test) {
var c = makeCollection();
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var handle = await c.find(fooid).observeChanges(logger);
logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
await logger.expectNoResult();
await handle.stop();
});
});
Tinytest.addAsync("observeChanges - unordered - initial adds", async function (test) {
var c = makeCollection();
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var barid = await c.insert({noodles: "good", bacon: "weird", apples: "ok"});
var handle = await c.find().observeChanges(logger);
logger.expectResultUnordered([
{callback: "added",
args: [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]},
{callback: "added",
args: [barid, {noodles: "good", bacon: "weird", apples: "ok"}]}
]);
await logger.expectNoResult();
await handle.stop();
});
});
Tinytest.addAsync("observeChanges - unordered - basics", async function (test) {
var c = makeCollection();
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find().observeChanges(logger);
var barid = await c.insert({thing: "stuff"});
logger.expectResultOnly("added", [barid, {thing: "stuff"}]);
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
logger.expectResultOnly("changed",
[fooid, {noodles: "alright", potatoes: "tasty", bacon: undefined}]);
await c.remove(fooid);
logger.expectResultOnly("removed", [fooid]);
await c.remove(barid);
logger.expectResultOnly("removed", [barid]);
fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
await logger.expectNoResult();
await handle.stop();
});
});
if (Meteor.isServer) {
Tinytest.addAsync("observeChanges - unordered - specific fields", async function (test, onComplete) {
var c = makeCollection();
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find({}, {fields:{noodles: 1, bacon: 1}}).observeChanges(logger);
var barid = await c.insert({thing: "stuff"});
logger.expectResultOnly("added", [barid, {}]);
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad"}]);
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
logger.expectResultOnly("changed",
[fooid, {noodles: "alright", bacon: undefined}]);
await c.update(fooid, {noodles: "alright", potatoes: "meh", apples: "ok"});
await c.remove(fooid);
logger.expectResultOnly("removed", [fooid]);
await c.remove(barid);
logger.expectResultOnly("removed", [barid]);
fooid = await c.insert({noodles: "good", bacon: "bad"});
logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad"}]);
await logger.expectNoResult();
await handle.stop();
});
});
Tinytest.addAsync("observeChanges - unordered - specific fields + selector on excluded fields", async function (test) {
var c = makeCollection();
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find({ mac: 1, cheese: 2 },
{fields:{noodles: 1, bacon: 1, eggs: 1}}).observeChanges(logger);
var barid = await c.insert({thing: "stuff", mac: 1, cheese: 2});
logger.expectResultOnly("added", [barid, {}]);
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok", mac: 1, cheese: 2});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad"}]);
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok", mac: 1, cheese: 2});
logger.expectResultOnly("changed",
[fooid, {noodles: "alright", bacon: undefined}]);
// Doesn't get update event, since modifies only hidden fields
await logger.expectNoResult(() =>
c.update(fooid, {
noodles: "alright",
potatoes: "meh",
apples: "ok",
mac: 1,
cheese: 2
})
);
await c.remove(fooid);
logger.expectResultOnly("removed", [fooid]);
await c.remove(barid);
logger.expectResultOnly("removed", [barid]);
fooid = await c.insert({noodles: "good", bacon: "bad", mac: 1, cheese: 2});
logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad"}]);
await logger.expectNoResult();
handle.stop();
});
});
}
Tinytest.addAsync("observeChanges - unordered - specific fields + modify on excluded fields", async function (test, onComplete) {
var c = makeCollection();
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find({ mac: 1, cheese: 2 },
{fields:{noodles: 1, bacon: 1, eggs: 1}}).observeChanges(logger);
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok", mac: 1, cheese: 2});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad"}]);
// Noodles go into shadow, mac appears as eggs
await c.update(fooid, {$rename: { noodles: 'shadow', apples: 'eggs' }});
logger.expectResultOnly("changed",
[fooid, {eggs:"ok", noodles: undefined}]);
await c.remove(fooid);
logger.expectResultOnly("removed", [fooid]);
await logger.expectNoResult();
await handle.stop();
});
});
Tinytest.addAsync(
"observeChanges - unordered - unset parent of observed field",
async function (test) {
var c = makeCollection();
await withCallbackLogger(
test, ['added', 'changed', 'removed'], Meteor.isServer,
async function (logger) {
var handle = await c.find({}, {fields: {'type.name': 1}}).observeChanges(logger);
var id = await c.insert({ type: { name: 'foobar' } });
logger.expectResultOnly('added', [id, { type: { name: 'foobar' } }]);
await c.update(id, { $unset: { type: 1 } });
test.equal(await c.find().fetch(), [{ _id: id }]);
logger.expectResultOnly('changed', [id, { type: undefined }]);
await handle.stop();
}
);
}
);
Tinytest.addAsync("observeChanges - unordered - enters and exits result set through change", async function (test) {
var c = makeCollection();
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find({noodles: "good"}).observeChanges(logger);
var barid = await c.insert({thing: "stuff"});
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
logger.expectResultOnly("removed",
[fooid]);
await c.remove(fooid);
await c.remove(barid);
fooid = await c.insert({noodles: "ok", bacon: "bad", apples: "ok"});
await c.update(fooid, {noodles: "good", potatoes: "tasty", apples: "ok"});
logger.expectResult("added", [fooid, {noodles: "good", potatoes: "tasty", apples: "ok"}]);
await logger.expectNoResult();
await handle.stop();
});
});
if (Meteor.isServer) {
testAsyncMulti("observeChanges - tailable", [
async function (test, expect) {
var self = this;
var collName = "cap_" + Random.id();
var coll = new Mongo.Collection(collName);
coll._createCappedCollection(1000000);
self.xs = [];
self.expects = [];
self.insert = function (fields) {
coll.insert(_.extend({ts: new MongoInternals.MongoTimestamp(0, 0)},
fields));
};
// Tailable observe shouldn't show things that are in the initial
// contents.
self.insert({x: 1});
// Wait for one added call before going to the next test function.
self.expects.push(expect());
var cursor = coll.find({y: {$ne: 7}}, {tailable: true});
self.handle = await cursor.observeChanges({
added: function (id, fields) {
self.xs.push(fields.x);
test.notEqual(self.expects.length, 0);
self.expects.pop()();
},
changed: function () {
test.fail({unexpected: "changed"});
},
removed: function () {
test.fail({unexpected: "removed"});
}
});
// Nothing happens synchronously.
test.equal(self.xs, []);
},
function (test, expect) {
var self = this;
// The cursors sees the first element.
test.equal(self.xs, [1]);
self.xs = [];
self.insert({x: 2, y: 3});
self.insert({x: 3, y: 7}); // filtered out by the query
self.insert({x: 4});
// Expect two added calls to happen.
self.expects = [expect(), expect()];
},
function (test, expect) {
var self = this;
test.equal(self.xs, [2, 4]);
self.xs = [];
self.handle.stop();
self.insert({x: 5});
// XXX This timeout isn't perfect but it's pretty hard to prove that an
// event WON'T happen without something like a write fence.
Meteor.setTimeout(expect(), 1000);
},
function (test, expect) {
var self = this;
test.equal(self.xs, []);
}
]);
}
testAsyncMulti("observeChanges - bad query", [
async function (test, expect) {
var c = makeCollection();
var observeThrows = function () {
return test.throwsAsync(function () {
return c.find({__id: {$in: null}}).observeChanges({
added: function () {
test.fail("added shouldn't be called");
}
});
}, '$in needs an array');
};
if (Meteor.isClient) {
await observeThrows();
return;
}
// Test that if two copies of the same bad observeChanges run in parallel
// and are de-duped, both observeChanges calls will throw.
await Promise.all(['ob1', 'ob2'].map(() => observeThrows()));
}
]);
if (Meteor.isServer) {
Tinytest.addAsync(
"observeChanges - EnvironmentVariable",
async function (test) {
var c = makeCollection();
var environmentVariable = new Meteor.EnvironmentVariable;
await environmentVariable.withValue(true, async function() {
var handle = await c.find({}, { fields: { 'type.name': 1 }}).observeChanges({
added: function() {
test.isTrue(environmentVariable.get());
handle.stop();
}
});
});
await c.insert({ type: { name: 'foobar' } });
}
);
}

View File

@@ -1,231 +0,0 @@
let nextObserveHandleId = 1;
ObserveMultiplexer = class {
constructor({ ordered, onStop = () => {} } = {}) {
if (ordered === undefined) throw Error("must specify ordered");
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-multiplexers", 1);
this._ordered = ordered;
this._onStop = onStop;
this._queue = new Meteor._AsynchronousQueue();
this._handles = {};
this._resolver = null;
this._readyPromise = new Promise(r => this._resolver = r).then(() => this._isReady = true);
this._cache = new LocalCollection._CachingChangeObserver({
ordered});
// Number of addHandleAndSendInitialAdds tasks scheduled but not yet
// running. removeHandle uses this to know if it's time to call the onStop
// callback.
this._addHandleTasksScheduledButNotPerformed = 0;
const self = this;
this.callbackNames().forEach(callbackName => {
this[callbackName] = function(/* ... */) {
self._applyCallback(callbackName, _.toArray(arguments));
};
});
}
addHandleAndSendInitialAdds(handle) {
return this._addHandleAndSendInitialAdds(handle);
}
async _addHandleAndSendInitialAdds(handle) {
++this._addHandleTasksScheduledButNotPerformed;
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-handles", 1);
const self = this;
await this._queue.runTask(function () {
self._handles[handle._id] = handle;
// Send out whatever adds we have so far (whether the
// multiplexer is ready).
self._sendAdds(handle);
--self._addHandleTasksScheduledButNotPerformed;
});
await this._readyPromise;
}
// Remove an observe handle. If it was the last observe handle, call the
// onStop callback; you cannot add any more observe handles after this.
//
// This is not synchronized with polls and handle additions: this means that
// you can safely call it from within an observe callback, but it also means
// that we have to be careful when we iterate over _handles.
async removeHandle(id) {
// This should not be possible: you can only call removeHandle by having
// access to the ObserveHandle, which isn't returned to user code until the
// multiplex is ready.
if (!this._ready())
throw new Error("Can't remove handles until the multiplex is ready");
delete this._handles[id];
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-handles", -1);
if (_.isEmpty(this._handles) &&
this._addHandleTasksScheduledButNotPerformed === 0) {
await this._stop();
}
}
async _stop(options) {
options = options || {};
// It shouldn't be possible for us to stop when all our handles still
// haven't been returned from observeChanges!
if (! this._ready() && ! options.fromQueryError)
throw Error("surprising _stop: not ready");
// Call stop callback (which kills the underlying process which sends us
// callbacks and removes us from the connection's dictionary).
await this._onStop();
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-multiplexers", -1);
// Cause future addHandleAndSendInitialAdds calls to throw (but the onStop
// callback should make our connection forget about us).
this._handles = null;
}
// Allows all addHandleAndSendInitialAdds calls to return, once all preceding
// adds have been processed. Does not block.
ready() {
const self = this;
this._queue.queueTask(function () {
if (self._ready())
throw Error("can't make ObserveMultiplex ready twice!");
if (!self._resolver) {
throw new Error("Missing resolver");
}
self._resolver();
self._isReady = true;
});
}
// If trying to execute the query results in an error, call this. This is
// intended for permanent errors, not transient network errors that could be
// fixed. It should only be called before ready(), because if you called ready
// that meant that you managed to run the query once. It will stop this
// ObserveMultiplex and cause addHandleAndSendInitialAdds calls (and thus
// observeChanges calls) to throw the error.
async queryError(err) {
var self = this;
await this._queue.runTask(function () {
if (self._ready())
throw Error("can't claim query has an error after it worked!");
self._stop({fromQueryError: true});
throw err;
});
}
// Calls "cb" once the effects of all "ready", "addHandleAndSendInitialAdds"
// and observe callbacks which came before this call have been propagated to
// all handles. "ready" must have already been called on this multiplexer.
onFlush(cb) {
var self = this;
return this._queue.queueTask(async function () {
if (!self._ready())
throw Error("only call onFlush on a multiplexer that will be ready");
await cb();
});
}
callbackNames() {
if (this._ordered)
return ["addedBefore", "changed", "movedBefore", "removed"];
else
return ["added", "changed", "removed"];
}
_ready() {
return !!this._isReady;
}
_applyCallback(callbackName, args) {
const self = this;
this._queue.queueTask(async function () {
// If we stopped in the meantime, do nothing.
if (!self._handles)
return;
// First, apply the change to the cache.
await self._cache.applyChange[callbackName].apply(null, args);
// If we haven't finished the initial adds, then we should only be getting
// adds.
if (!self._ready() &&
(callbackName !== 'added' && callbackName !== 'addedBefore')) {
throw new Error("Got " + callbackName + " during initial adds");
}
// Now multiplex the callbacks out to all observe handles. It's OK if
// these calls yield; since we're inside a task, no other use of our queue
// can continue until these are done. (But we do have to be careful to not
// use a handle that got removed, because removeHandle does not use the
// queue; thus, we iterate over an array of keys that we control.)
const toAwait = Object.keys(self._handles).map(async (handleId) => {
var handle = self._handles && self._handles[handleId];
if (!handle)
return;
var callback = handle['_' + callbackName];
// clone arguments so that callbacks can mutate their arguments
callback && await callback.apply(null,
handle.nonMutatingCallbacks ? args : EJSON.clone(args));
});
await Promise.all(toAwait);
});
}
// Sends initial adds to a handle. It should only be called from within a task
// (the task that is processing the addHandleAndSendInitialAdds call). It
// synchronously invokes the handle's added or addedBefore; there's no need to
// flush the queue afterwards to ensure that the callbacks get out.
async _sendAdds(handle) {
var add = this._ordered ? handle._addedBefore : handle._added;
if (!add)
return;
// note: docs may be an _IdMap or an OrderedDict
await this._cache.docs.forEachAsync(async (doc, id) => {
if (!_.has(this._handles, handle._id))
throw Error("handle got removed before sending initial adds!");
const { _id, ...fields } = handle.nonMutatingCallbacks ? doc
: EJSON.clone(doc);
if (this._ordered)
await add(id, fields, null); // we're going in order, so add at end
else
await add(id, fields);
});
}
};
// When the callbacks do not mutate the arguments, we can skip a lot of data clones
ObserveHandle = class {
constructor(multiplexer, callbacks, nonMutatingCallbacks = false) {
this._multiplexer = multiplexer;
multiplexer.callbackNames().forEach((name) => {
if (callbacks[name]) {
this['_' + name] = callbacks[name];
} else if (name === "addedBefore" && callbacks.added) {
// Special case: if you specify "added" and "movedBefore", you get an
// ordered observe where for some reason you don't get ordering data on
// the adds. I dunno, we wrote tests for it, there must have been a
// reason.
this._addedBefore = function (id, fields, before) {
callbacks.added(id, fields);
};
}
});
this._stopped = false;
this._id = nextObserveHandleId++;
this.nonMutatingCallbacks = nonMutatingCallbacks;
}
async stop() {
if (this._stopped) return;
this._stopped = true;
await this._multiplexer.removeHandle(this._id);
}
};

File diff suppressed because it is too large Load Diff

View File

@@ -1,381 +0,0 @@
import { NpmModuleMongodb } from "meteor/npm-mongo";
const { Long } = NpmModuleMongodb;
OPLOG_COLLECTION = 'oplog.rs';
var TOO_FAR_BEHIND = process.env.METEOR_OPLOG_TOO_FAR_BEHIND || 2000;
var TAIL_TIMEOUT = +process.env.METEOR_OPLOG_TAIL_TIMEOUT || 30000;
idForOp = function (op) {
if (op.op === 'd')
return op.o._id;
else if (op.op === 'i')
return op.o._id;
else if (op.op === 'u')
return op.o2._id;
else if (op.op === 'c')
throw Error("Operator 'c' doesn't supply an object with id: " +
EJSON.stringify(op));
else
throw Error("Unknown op: " + EJSON.stringify(op));
};
OplogHandle = function (oplogUrl, dbName) {
var self = this;
self._oplogUrl = oplogUrl;
self._dbName = dbName;
self._oplogLastEntryConnection = null;
self._oplogTailConnection = null;
self._stopped = false;
self._tailHandle = null;
self._readyPromiseResolver = null;
self._readyPromise = new Promise(r => self._readyPromiseResolver = r);
self._crossbar = new DDPServer._Crossbar({
factPackage: "mongo-livedata", factName: "oplog-watchers"
});
self._baseOplogSelector = {
ns: new RegExp("^(?:" + [
Meteor._escapeRegExp(self._dbName + "."),
Meteor._escapeRegExp("admin.$cmd"),
].join("|") + ")"),
$or: [
{ op: { $in: ['i', 'u', 'd'] } },
// drop collection
{ op: 'c', 'o.drop': { $exists: true } },
{ op: 'c', 'o.dropDatabase': 1 },
{ op: 'c', 'o.applyOps': { $exists: true } },
]
};
// Data structures to support waitUntilCaughtUp(). Each oplog entry has a
// MongoTimestamp object on it (which is not the same as a Date --- it's a
// combination of time and an incrementing counter; see
// http://docs.mongodb.org/manual/reference/bson-types/#timestamps).
//
// _catchingUpFutures is an array of {ts: MongoTimestamp, future: Future}
// objects, sorted by ascending timestamp. _lastProcessedTS is the
// MongoTimestamp of the last oplog entry we've processed.
//
// Each time we call waitUntilCaughtUp, we take a peek at the final oplog
// entry in the db. If we've already processed it (ie, it is not greater than
// _lastProcessedTS), waitUntilCaughtUp immediately returns. Otherwise,
// waitUntilCaughtUp makes a new Future and inserts it along with the final
// timestamp entry that it read, into _catchingUpFutures. waitUntilCaughtUp
// then waits on that future, which is resolved once _lastProcessedTS is
// incremented to be past its timestamp by the worker fiber.
//
// XXX use a priority queue or something else that's faster than an array
self._catchingUpResolvers = [];
self._lastProcessedTS = null;
self._onSkippedEntriesHook = new Hook({
debugPrintExceptions: "onSkippedEntries callback"
});
self._entryQueue = new Meteor._DoubleEndedQueue();
self._workerActive = false;
const shouldAwait = self._startTailing();
//TODO Why wait?
};
Object.assign(OplogHandle.prototype, {
stop: function () {
var self = this;
if (self._stopped)
return;
self._stopped = true;
if (self._tailHandle)
self._tailHandle.stop();
// XXX should close connections too
},
_onOplogEntry: async function(trigger, callback) {
var self = this;
if (self._stopped)
throw new Error("Called onOplogEntry on stopped handle!");
// Calling onOplogEntry requires us to wait for the tailing to be ready.
await self._readyPromise;
var originalCallback = callback;
callback = Meteor.bindEnvironment(function (notification) {
originalCallback(notification);
}, function (err) {
Meteor._debug("Error in oplog callback", err);
});
var listenHandle = self._crossbar.listen(trigger, callback);
return {
stop: function () {
listenHandle.stop();
}
};
},
onOplogEntry: function (trigger, callback) {
return this._onOplogEntry(trigger, callback);
},
// Register a callback to be invoked any time we skip oplog entries (eg,
// because we are too far behind).
onSkippedEntries: function (callback) {
var self = this;
if (self._stopped)
throw new Error("Called onSkippedEntries on stopped handle!");
return self._onSkippedEntriesHook.register(callback);
},
async _waitUntilCaughtUp() {
var self = this;
if (self._stopped)
throw new Error("Called waitUntilCaughtUp on stopped handle!");
// Calling waitUntilCaughtUp requries us to wait for the oplog connection to
// be ready.
await self._readyPromise;
var lastEntry;
while (!self._stopped) {
// We need to make the selector at least as restrictive as the actual
// tailing selector (ie, we need to specify the DB name) or else we might
// find a TS that won't show up in the actual tail stream.
try {
lastEntry = await self._oplogLastEntryConnection.findOne(
OPLOG_COLLECTION, self._baseOplogSelector,
{fields: {ts: 1}, sort: {$natural: -1}});
break;
} catch (e) {
// During failover (eg) if we get an exception we should log and retry
// instead of crashing.
Meteor._debug("Got exception while reading last entry", e);
await Meteor._sleepForMs(100);
}
}
if (self._stopped)
return;
if (!lastEntry) {
// Really, nothing in the oplog? Well, we've processed everything.
return;
}
var ts = lastEntry.ts;
if (!ts)
throw Error("oplog entry without ts: " + EJSON.stringify(lastEntry));
if (self._lastProcessedTS && ts.lessThanOrEqual(self._lastProcessedTS)) {
// We've already caught up to here.
return;
}
// Insert the future into our list. Almost always, this will be at the end,
// but it's conceivable that if we fail over from one primary to another,
// the oplog entries we see will go backwards.
var insertAfter = self._catchingUpResolvers.length;
while (insertAfter - 1 > 0 && self._catchingUpResolvers[insertAfter - 1].ts.greaterThan(ts)) {
insertAfter--;
}
let promiseResolver = null;
const promiseToAwait = new Promise(r => promiseResolver = r);
self._catchingUpResolvers.splice(insertAfter, 0, {ts: ts, resolver: promiseResolver});
await promiseToAwait;
},
// Calls `callback` once the oplog has been processed up to a point that is
// roughly "now": specifically, once we've processed all ops that are
// currently visible.
// XXX become convinced that this is actually safe even if oplogConnection
// is some kind of pool
waitUntilCaughtUp: function () {
return this._waitUntilCaughtUp();
},
_startTailing: async function () {
var self = this;
// First, make sure that we're talking to the local database.
var mongodbUri = Npm.require('mongodb-uri');
if (mongodbUri.parse(self._oplogUrl).database !== 'local') {
throw Error("$MONGO_OPLOG_URL must be set to the 'local' database of " +
"a Mongo replica set");
}
// We make two separate connections to Mongo. The Node Mongo driver
// implements a naive round-robin connection pool: each "connection" is a
// pool of several (5 by default) TCP connections, and each request is
// rotated through the pools. Tailable cursor queries block on the server
// until there is some data to return (or until a few seconds have
// passed). So if the connection pool used for tailing cursors is the same
// pool used for other queries, the other queries will be delayed by seconds
// 1/5 of the time.
//
// The tail connection will only ever be running a single tail command, so
// it only needs to make one underlying TCP connection.
self._oplogTailConnection = new MongoConnection(
self._oplogUrl, {maxPoolSize: 1});
// XXX better docs, but: it's to get monotonic results
// XXX is it safe to say "if there's an in flight query, just use its
// results"? I don't think so but should consider that
self._oplogLastEntryConnection = new MongoConnection(
self._oplogUrl, {maxPoolSize: 1});
const isMasterDoc = await Meteor.promisify((cb) => {
self._oplogLastEntryConnection.db.admin().command({ismaster: 1}, cb);
})();
if (!(isMasterDoc && isMasterDoc.setName)) {
throw Error("$MONGO_OPLOG_URL must be set to the 'local' database of " +
"a Mongo replica set");
}
// Find the last oplog entry.
var lastOplogEntry = await self._oplogLastEntryConnection.findOne(
OPLOG_COLLECTION, {}, {sort: {$natural: -1}, fields: {ts: 1}});
var oplogSelector = Object.assign({}, self._baseOplogSelector);
if (lastOplogEntry) {
// Start after the last entry that currently exists.
oplogSelector.ts = {$gt: lastOplogEntry.ts};
// If there are any calls to callWhenProcessedLatest before any other
// oplog entries show up, allow callWhenProcessedLatest to call its
// callback immediately.
self._lastProcessedTS = lastOplogEntry.ts;
}
var cursorDescription = new CursorDescription(
OPLOG_COLLECTION, oplogSelector, {tailable: true});
// Start tailing the oplog.
//
// We restart the low-level oplog query every 30 seconds if we didn't get a
// doc. This is a workaround for #8598: the Node Mongo driver has at least
// one bug that can lead to query callbacks never getting called (even with
// an error) when leadership failover occur.
self._tailHandle = self._oplogTailConnection.tail(
cursorDescription,
function (doc) {
self._entryQueue.push(doc);
self._maybeStartWorker();
},
TAIL_TIMEOUT
);
self._readyPromiseResolver();
},
_maybeStartWorker: function () {
var self = this;
if (self._workerActive) return;
self._workerActive = true;
Meteor.defer(function () {
// May be called recursively in case of transactions.
function handleDoc(doc) {
if (doc.ns === "admin.$cmd") {
if (doc.o.applyOps) {
// This was a successful transaction, so we need to apply the
// operations that were involved.
let nextTimestamp = doc.ts;
doc.o.applyOps.forEach(op => {
// See https://github.com/meteor/meteor/issues/10420.
if (!op.ts) {
op.ts = nextTimestamp;
nextTimestamp = nextTimestamp.add(Long.ONE);
}
handleDoc(op);
});
return;
}
throw new Error("Unknown command " + EJSON.stringify(doc));
}
const trigger = {
dropCollection: false,
dropDatabase: false,
op: doc,
};
if (typeof doc.ns === "string" &&
doc.ns.startsWith(self._dbName + ".")) {
trigger.collection = doc.ns.slice(self._dbName.length + 1);
}
// Is it a special command and the collection name is hidden
// somewhere in operator?
if (trigger.collection === "$cmd") {
if (doc.o.dropDatabase) {
delete trigger.collection;
trigger.dropDatabase = true;
} else if (_.has(doc.o, "drop")) {
trigger.collection = doc.o.drop;
trigger.dropCollection = true;
trigger.id = null;
} else {
throw Error("Unknown command " + EJSON.stringify(doc));
}
} else {
// All other ops have an id.
trigger.id = idForOp(doc);
}
self._crossbar.fire(trigger);
}
try {
while (! self._stopped &&
! self._entryQueue.isEmpty()) {
// Are we too far behind? Just tell our observers that they need to
// repoll, and drop our queue.
if (self._entryQueue.length > TOO_FAR_BEHIND) {
var lastEntry = self._entryQueue.pop();
self._entryQueue.clear();
self._onSkippedEntriesHook.each(function (callback) {
callback();
return true;
});
// Free any waitUntilCaughtUp() calls that were waiting for us to
// pass something that we just skipped.
self._setLastProcessedTS(lastEntry.ts);
continue;
}
const doc = self._entryQueue.shift();
// Fire trigger(s) for this doc.
handleDoc(doc);
// Now that we've processed this operation, process pending
// sequencers.
if (doc.ts) {
self._setLastProcessedTS(doc.ts);
} else {
throw Error("oplog entry without ts: " + EJSON.stringify(doc));
}
}
} finally {
self._workerActive = false;
}
});
},
_setLastProcessedTS: function (ts) {
var self = this;
self._lastProcessedTS = ts;
while (!_.isEmpty(self._catchingUpResolvers) && self._catchingUpResolvers[0].ts.lessThanOrEqual(self._lastProcessedTS)) {
var sequencer = self._catchingUpResolvers.shift();
sequencer.resolver();
}
},
//Methods used on tests to dinamically change TOO_FAR_BEHIND
_defineTooFarBehind: function(value) {
TOO_FAR_BEHIND = value;
},
_resetTooFarBehind: function() {
TOO_FAR_BEHIND = process.env.METEOR_OPLOG_TOO_FAR_BEHIND || 2000;
}
});

View File

@@ -1,193 +0,0 @@
var OplogCollection = new Mongo.Collection("oplog-" + Random.id());
Tinytest.addAsync("mongo-livedata - oplog - cursorSupported", async function (test) {
var oplogEnabled =
!!MongoInternals.defaultRemoteCollectionDriver().mongo._oplogHandle;
var supported = async function (expected, selector, options) {
var cursor = OplogCollection.find(selector, options);
var handle = await cursor.observeChanges({
added: function () {
}
});
// If there's no oplog at all, we shouldn't ever use it.
if (!oplogEnabled)
expected = false;
test.equal(!!handle._multiplexer._observeDriver._usesOplog, expected);
await handle.stop();
};
await supported(true, "asdf");
await supported(true, 1234);
await supported(true, new Mongo.ObjectID());
await supported(true, { _id: "asdf" });
await supported(true, { _id: 1234 });
await supported(true, { _id: new Mongo.ObjectID() });
await supported(true, {
foo: "asdf",
bar: 1234,
baz: new Mongo.ObjectID(),
eeney: true,
miney: false,
moe: null
});
await supported(true, {});
await supported(true, { $and: [{ foo: "asdf" }, { bar: "baz" }] });
await supported(true, { foo: { x: 1 } });
await supported(true, { foo: { $gt: 1 } });
await supported(true, { foo: [1, 2, 3] });
// No $where.
await supported(false, { $where: "xxx" });
await supported(false, { $and: [{ foo: "adsf" }, { $where: "xxx" }] });
// No geoqueries.
await supported(false, { x: { $near: [1, 1] } });
// Nothing Minimongo doesn't understand. (Minimongo happens to fail to
// implement $elemMatch inside $all which MongoDB supports.)
await supported(false, { x: { $all: [{ $elemMatch: { y: 2 } }] } });
await supported(true, {}, { sort: { x: 1 } });
await supported(true, {}, { sort: { x: 1 }, limit: 5 });
await supported(false, {}, { sort: { $natural: 1 }, limit: 5 });
await supported(false, {}, { limit: 5 });
await supported(false, {}, { skip: 2, limit: 5 });
await supported(false, {}, { skip: 2 });
});
process.env.MONGO_OPLOG_URL && testAsyncMulti(
"mongo-livedata - oplog - entry skipping", [
async function (test, expect) {
var self = this;
self.collectionName = Random.id();
self.collection = new Mongo.Collection(self.collectionName);
await self.collection.createIndex({ species: 1 });
// Fill collection with lots of irrelevant objects (red cats) and some
// relevant ones (blue dogs).
// After updating to mongo 3.2 with the 2.1.18 driver it was no longer
// possible to make this test fail with TOO_FAR_BEHIND = 2000.
// The documents waiting to be processed would hardly go beyond 1000
// using mongo 3.2 with WiredTiger
MongoInternals.defaultRemoteCollectionDriver()
.mongo._oplogHandle._defineTooFarBehind(500);
self.IRRELEVANT_SIZE = 15000;
self.RELEVANT_SIZE = 10;
var docs = [];
var i;
for (i = 0; i < self.IRRELEVANT_SIZE; ++i) {
docs.push({
name: "cat " + i,
species: 'cat',
color: 'red'
});
}
for (i = 0; i < self.RELEVANT_SIZE; ++i) {
docs.push({
name: "dog " + i,
species: 'dog',
color: 'blue'
});
}
// XXX implement bulk insert #1255
var rawCollection = self.collection.rawCollection();
rawCollection.insertMany(docs, Meteor.bindEnvironment(expect(function (err) {
test.isFalse(err);
})));
},
async function (test, expect) {
var self = this;
test.equal((await self.collection.find().count()),
self.IRRELEVANT_SIZE + self.RELEVANT_SIZE);
var blueDog5Id = null;
var gotSpot = false;
let resolver; const gotSpotPromise = new Promise(resolve => resolver = resolve)
let resolver2; const gotSpotPromise2 = new Promise(resolve => resolver2 = resolve)
self.subHandle = await self.collection.find({
species: 'dog',
color: 'blue',
}).observeChanges({
added(id, fields) {
if (fields.name === 'dog 5') {
blueDog5Id = id
resolver2()
}
},
changed(id, fields) {
if (EJSON.equals(id, blueDog5Id) &&
fields.name === 'spot') {
gotSpot = true;
resolver();
}
},
});
test.isTrue(self.subHandle._multiplexer._observeDriver._usesOplog);
self.skipped = false;
self.skipHandle = MongoInternals.defaultRemoteCollectionDriver()
.mongo._oplogHandle.onSkippedEntries(function () {
self.skipped = true;
});
// Dye all the cats blue. This adds lots of oplog mentries that look like
// they might in theory be relevant (since they say "something you didn't
// know about is now blue", and who knows, maybe it's a dog) which puts
// the OplogObserveDriver into FETCHING mode, which performs poorly.
await self.collection.update({ species: 'cat' },
{ $set: { color: 'blue' } },
{ multi: true });
test.isTrue(blueDog5Id);
test.isFalse(gotSpot);
await self.collection.update(blueDog5Id, { $set: { name: 'spot' } });
// We ought to see the spot change soon!
return Promise.all([gotSpotPromise, gotSpotPromise2]);
},
async function (test, expect) {
var self = this;
test.isTrue(self.skipped);
//This gets the TOO_FAR_BEHIND back to its initial value
MongoInternals.defaultRemoteCollectionDriver()
.mongo._oplogHandle._resetTooFarBehind();
await self.skipHandle.stop();
await self.subHandle.stop();
await self.collection.remove({});
}
]
);
Meteor.isServer && Tinytest.addAsync(
"mongo-livedata - oplog - _onFailover",
async function (test) {
const driver = MongoInternals.defaultRemoteCollectionDriver();
const failoverPromise = new Promise(resolve => {
driver.mongo._onFailover(() => {
resolve(true);
});
});
await driver.mongo.db.admin().command({
replSetStepDown: 1,
force: true
});
try {
const result = await failoverPromise;
test.isTrue(result);
} catch (e) {
test.fail({ message: "Error waiting on Promise", value: JSON.stringify(e) });
}
});

View File

@@ -1,124 +0,0 @@
// Converter of the new MongoDB Oplog format (>=5.0) to the one that Meteor
// handles well, i.e., `$set` and `$unset`. The new format is completely new,
// and looks as follows:
//
// { $v: 2, diff: Diff }
//
// where `Diff` is a recursive structure:
//
// {
// // Nested updates (sometimes also represented with an s-field).
// // Example: `{ $set: { 'foo.bar': 1 } }`.
// i: { <key>: <value>, ... },
//
// // Top-level updates.
// // Example: `{ $set: { foo: { bar: 1 } } }`.
// u: { <key>: <value>, ... },
//
// // Unsets.
// // Example: `{ $unset: { foo: '' } }`.
// d: { <key>: false, ... },
//
// // Array operations.
// // Example: `{ $push: { foo: 'bar' } }`.
// s<key>: { a: true, u<index>: <value>, ... },
// ...
//
// // Nested operations (sometimes also represented in the `i` field).
// // Example: `{ $set: { 'foo.bar': 1 } }`.
// s<key>: Diff,
// ...
// }
//
// (all fields are optional).
function join(prefix, key) {
return prefix ? `${prefix}.${key}` : key;
}
const arrayOperatorKeyRegex = /^(a|u\d+)$/;
function isArrayOperatorKey(field) {
return arrayOperatorKeyRegex.test(field);
}
function isArrayOperator(operator) {
return operator.a === true && Object.keys(operator).every(isArrayOperatorKey);
}
function flattenObjectInto(target, source, prefix) {
if (Array.isArray(source) || typeof source !== 'object' || source === null) {
target[prefix] = source;
} else {
const entries = Object.entries(source);
if (entries.length) {
entries.forEach(([key, value]) => {
flattenObjectInto(target, value, join(prefix, key));
});
} else {
target[prefix] = source;
}
}
}
const logDebugMessages = !!process.env.OPLOG_CONVERTER_DEBUG;
function convertOplogDiff(oplogEntry, diff, prefix) {
if (logDebugMessages) {
console.log(`convertOplogDiff(${JSON.stringify(oplogEntry)}, ${JSON.stringify(diff)}, ${JSON.stringify(prefix)})`);
}
Object.entries(diff).forEach(([diffKey, value]) => {
if (diffKey === 'd') {
// Handle `$unset`s.
oplogEntry.$unset ??= {};
Object.keys(value).forEach(key => {
oplogEntry.$unset[join(prefix, key)] = true;
});
} else if (diffKey === 'i') {
// Handle (potentially) nested `$set`s.
oplogEntry.$set ??= {};
flattenObjectInto(oplogEntry.$set, value, prefix);
} else if (diffKey === 'u') {
// Handle flat `$set`s.
oplogEntry.$set ??= {};
Object.entries(value).forEach(([key, value]) => {
oplogEntry.$set[join(prefix, key)] = value;
});
} else {
// Handle s-fields.
const key = diffKey.slice(1);
if (isArrayOperator(value)) {
// Array operator.
Object.entries(value).forEach(([position, value]) => {
if (position === 'a') {
return;
}
const positionKey = join(join(prefix, key), position.slice(1));
if (value === null) {
oplogEntry.$unset ??= {};
oplogEntry.$unset[positionKey] = true;
} else {
oplogEntry.$set ??= {};
oplogEntry.$set[positionKey] = value;
}
});
} else if (key) {
// Nested object.
convertOplogDiff(oplogEntry, value, join(prefix, key));
}
}
});
}
export function oplogV2V1Converter(oplogEntry) {
// Pass-through v1 and (probably) invalid entries.
if (oplogEntry.$v !== 2 || !oplogEntry.diff) {
return oplogEntry;
}
const convertedOplogEntry = { $v: 2 };
convertOplogDiff(convertedOplogEntry, oplogEntry.diff, '');
return convertedOplogEntry;
}

View File

@@ -1,86 +0,0 @@
import { oplogV2V1Converter } from './oplog_v2_converter';
const cases = [
[
{ $v: 2, diff: { scustom: { sEJSON$value: { u: { EJSONtail: 'd' } } } } },
{ $v: 2, $set: { 'custom.EJSON$value.EJSONtail': 'd' } },
],
[
{ $v: 2, diff: { u: { d: '2', oi: 'asdas' } } },
{ $v: 2, $set: { d: '2', oi: 'asdas' } },
],
[
{ $v: 2, diff: { sasd: { a: true, u0: 2 } } },
{ $v: 2, $set: { 'asd.0': 2 } },
],
[
{ $v: 2, diff: { sasd: { a: true, u0: null } } },
{ $v: 2, $unset: { 'asd.0': true } },
],
[
{ $v: 2, diff: { i: { a: { b: 2 } } } },
{ $v: 2, $set: { 'a.b': 2 } },
],
[
{ $v: 2, diff: { u: { count: 1 }, i: { nested: { state: {} } } } },
{ $v: 2, $set: { 'nested.state': {}, count: 1 } },
],
[
{ $v: 2, diff: { sa: { i: { b: 3, c: 1 } } } },
{ $v: 2, $set: { 'a.b': 3, 'a.c': 1 } },
],
[
{ $v: 2, diff: { sa: { d: { b: false } } } },
{ $v: 2, $unset: { 'a.b': true } },
],
[
{ $v: 2, diff: { u: { c: 'bar' }, sb: { a: true, u0: 2 } } },
{ $v: 2, $set: { 'b.0': 2, c: 'bar' } },
],
[
{ $v: 2, diff: { sservices: { sresume: { u: { loginTokens: [] } } } } },
{ $v: 2, $set: { 'services.resume.loginTokens': [] } },
],
[
{ $v: 2, diff: { i: { tShirt: { sizes: ['small', 'medium', 'large'] } } } },
{ $v: 2, $set: { 'tShirt.sizes': ['small', 'medium', 'large'] } },
],
[
{ $v: 2, diff: { slist: { a: true, u3: 'i', u4: 'h' } } },
{ $v: 2, $set: { 'list.3': 'i', 'list.4': 'h' } },
],
[
{ $v: 2, $set: { 'services.resume.loginTokens': [ { when: '2022-01-06T23:58:35.704Z', hashedToken: 'RlalW6ZSvPPJLH6sW3B1b+vrUnPy+Ox5oMv3O3S7jwg=' }, { when: '2022-01-06T23:58:35.704Z', hashedToken: 'DWG0Qw/+nZ48wAIhKR2r9H41wLpth9BM+Br6aZsl2bU=' }, ], }, },
{ $v: 2, $set: { 'services.resume.loginTokens': [ { when: '2022-01-06T23:58:35.704Z', hashedToken: 'RlalW6ZSvPPJLH6sW3B1b+vrUnPy+Ox5oMv3O3S7jwg=' }, { when: '2022-01-06T23:58:35.704Z', hashedToken: 'DWG0Qw/+nZ48wAIhKR2r9H41wLpth9BM+Br6aZsl2bU=' }, ], }, },
],
[
{ $v: 2, diff: { sobject: { u: { array: ['2', '2', '4', '3'] } } } },
{ $v: 2, $set: { 'object.array': ['2', '2', '4', '3'] } },
],
[
{ $v: 2, diff: { slayout: { sjourneyStepIds: { sj4aqp3tiK6xCPCYu8: { a: true, u2: 'zTkxivNrKuBi2iJ2m' } } } } },
{ $v: 2, $set: { 'layout.journeyStepIds.j4aqp3tiK6xCPCYu8.2': 'zTkxivNrKuBi2iJ2m' } },
],
[
{ $v: 2, diff: { sarray: { a: true, s2: { u: { a: 'something' } } } } },
{ $v: 2, $set: { 'array.2.a': 'something' } },
],
[
{ $v: 2, diff: { u: { params: { d: 5 } } } },
{ $v: 2, $set: { params: { d: 5 } } },
],
[
{ $v: 2, diff: { u: { params: { a: 5, d: 5 } } } },
{ $v: 2, $set: { params: { a: 5, d: 5 } } },
],
[
{ $v: 2, diff: { u: { params: { e: { _str: '5f953cde8ceca90030bdb86f' } } } } },
{ $v: 2, $set: { params: { e: { _str: '5f953cde8ceca90030bdb86f' } } } },
],
];
Tinytest.add('oplog - v2/v1 conversion', function (test) {
cases.forEach(([input, output]) => {
test.equal(oplogV2V1Converter(input), output);
});
});

View File

@@ -1,105 +0,0 @@
// XXX We should revisit how we factor MongoDB support into (1) the
// server-side node.js driver [which you might use independently of
// livedata, after all], (2) minimongo [ditto], and (3) Collection,
// which is the class that glues the two of them to Livedata, but also
// is generally the "public interface for newbies" to Mongo in the
// Meteor universe. We want to allow the components to be used
// independently, but we don't want to overwhelm the user with
// minutiae.
Package.describe({
summary: "Adaptor for using MongoDB and Minimongo over DDP",
version: '1.16.0'
});
Npm.depends({
"mongodb-uri": "0.9.7"
});
Npm.strip({
mongodb: ["test/"]
});
Package.onUse(function (api) {
api.use('npm-mongo', 'server');
api.use('allow-deny');
api.use([
'random',
'ejson',
'minimongo',
'ddp',
'tracker',
'diff-sequence',
'mongo-id',
'check',
'ecmascript',
'mongo-dev-server',
'logging'
]);
// Make weak use of Decimal type on client
api.use('mongo-decimal', 'client', {weak: true});
api.use('mongo-decimal', 'server');
api.use('underscore', 'server');
// Binary Heap data structure is used to optimize oplog observe driver
// performance.
api.use('binary-heap', 'server');
// Allow us to detect 'insecure'.
api.use('insecure', {weak: true});
// Allow us to detect 'autopublish', and publish collections if it's loaded.
api.use('autopublish', 'server', {weak: true});
// Allow us to detect 'disable-oplog', which turns off oplog tailing for your
// app even if it's configured in the environment. (This package will be
// probably be removed before 1.0.)
api.use('disable-oplog', 'server', {weak: true});
// defaultRemoteCollectionDriver gets its deployConfig from something that is
// (for questionable reasons) initialized by the webapp package.
api.use('webapp', 'server', {weak: true});
// If the facts package is loaded, publish some statistics.
api.use('facts-base', 'server', {weak: true});
api.use('callback-hook', 'server');
// Stuff that should be exposed via a real API, but we haven't yet.
api.export('MongoInternals', 'server');
api.export("Mongo");
api.export('ObserveMultiplexer', 'server', {testOnly: true});
api.addFiles(['mongo_driver.js', 'oplog_tailing.js',
'observe_multiplex.js', 'doc_fetcher.js',
'polling_observe_driver.js','oplog_observe_driver.js', 'oplog_v2_converter.js'],
'server');
api.addFiles('local_collection_driver.js', ['client', 'server']);
api.addFiles('remote_collection_driver.js', 'server');
api.addFiles('collection.js', ['client', 'server']);
api.addFiles('connection_options.js', 'server');
});
Package.onTest(function (api) {
api.use('mongo');
api.use('check');
api.use('ecmascript');
api.use('npm-mongo', 'server');
api.use(['tinytest', 'underscore', 'test-helpers', 'ejson', 'random',
'ddp', 'base64']);
// XXX test order dependency: the allow_tests "partial allow" test
// fails if it is run before mongo_livedata_tests.
api.addFiles('mongo_livedata_tests.js', ['client', 'server']);
api.addFiles('upsert_compatibility_test.js', 'server');
api.addFiles('allow_tests.js', ['client', 'server']);
api.addFiles('collection_tests.js', ['client', 'server']);
api.addFiles('collection_async_tests.js', ['client', 'server']);
api.addFiles('observe_changes_tests.js', ['client', 'server']);
api.addFiles('oplog_tests.js', 'server');
api.addFiles('oplog_v2_converter_tests.js', 'server');
api.addFiles('doc_fetcher_tests.js', 'server');
});

View File

@@ -1,227 +0,0 @@
var POLLING_THROTTLE_MS = +process.env.METEOR_POLLING_THROTTLE_MS || 50;
var POLLING_INTERVAL_MS = +process.env.METEOR_POLLING_INTERVAL_MS || 10 * 1000;
PollingObserveDriver = function (options) {
var self = this;
self._cursorDescription = options.cursorDescription;
self._mongoHandle = options.mongoHandle;
self._ordered = options.ordered;
self._multiplexer = options.multiplexer;
self._stopCallbacks = [];
self._stopped = false;
self._cursor = self._mongoHandle._createSynchronousCursor(
self._cursorDescription);
// previous results snapshot. on each poll cycle, diffs against
// results drives the callbacks.
self._results = null;
// The number of _pollMongo calls that have been added to self._taskQueue but
// have not started running. Used to make sure we never schedule more than one
// _pollMongo (other than possibly the one that is currently running). It's
// also used by _suspendPolling to pretend there's a poll scheduled. Usually,
// it's either 0 (for "no polls scheduled other than maybe one currently
// running") or 1 (for "a poll scheduled that isn't running yet"), but it can
// also be 2 if incremented by _suspendPolling.
self._pollsScheduledButNotStarted = 0;
self._pendingWrites = []; // people to notify when polling completes
// Make sure to create a separately throttled function for each
// PollingObserveDriver object.
self._ensurePollIsScheduled = _.throttle(
self._unthrottledEnsurePollIsScheduled,
self._cursorDescription.options.pollingThrottleMs || POLLING_THROTTLE_MS /* ms */);
// XXX figure out if we still need a queue
self._taskQueue = new Meteor._SynchronousQueue();
var listenersHandle = listenAll(
self._cursorDescription, function (notification) {
// When someone does a transaction that might affect us, schedule a poll
// of the database. If that transaction happens inside of a write fence,
// block the fence until we've polled and notified observers.
var fence = DDPServer._CurrentWriteFence.get();
if (fence)
self._pendingWrites.push(fence.beginWrite());
// Ensure a poll is scheduled... but if we already know that one is,
// don't hit the throttled _ensurePollIsScheduled function (which might
// lead to us calling it unnecessarily in <pollingThrottleMs> ms).
if (self._pollsScheduledButNotStarted === 0)
self._ensurePollIsScheduled();
}
);
self._stopCallbacks.push(function () { listenersHandle.stop(); });
// every once and a while, poll even if we don't think we're dirty, for
// eventual consistency with database writes from outside the Meteor
// universe.
//
// For testing, there's an undocumented callback argument to observeChanges
// which disables time-based polling and gets called at the beginning of each
// poll.
if (options._testOnlyPollCallback) {
self._testOnlyPollCallback = options._testOnlyPollCallback;
} else {
var pollingInterval =
self._cursorDescription.options.pollingIntervalMs ||
self._cursorDescription.options._pollingInterval || // COMPAT with 1.2
POLLING_INTERVAL_MS;
var intervalHandle = Meteor.setInterval(
_.bind(self._ensurePollIsScheduled, self), pollingInterval);
self._stopCallbacks.push(function () {
Meteor.clearInterval(intervalHandle);
});
}
};
_.extend(PollingObserveDriver.prototype, {
_init: async function () {
// Make sure we actually poll soon!
await this._unthrottledEnsurePollIsScheduled();
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-drivers-polling", 1);
},
// This is always called through _.throttle (except once at startup).
_unthrottledEnsurePollIsScheduled: function () {
var self = this;
if (self._pollsScheduledButNotStarted > 0)
return;
++self._pollsScheduledButNotStarted;
self._taskQueue.queueTask(function () {
self._pollMongo();
});
},
// test-only interface for controlling polling.
//
// _suspendPolling blocks until any currently running and scheduled polls are
// done, and prevents any further polls from being scheduled. (new
// ObserveHandles can be added and receive their initial added callbacks,
// though.)
//
// _resumePolling immediately polls, and allows further polls to occur.
_suspendPolling: function() {
var self = this;
// Pretend that there's another poll scheduled (which will prevent
// _ensurePollIsScheduled from queueing any more polls).
++self._pollsScheduledButNotStarted;
// Now block until all currently running or scheduled polls are done.
self._taskQueue.runTask(function() {});
// Confirm that there is only one "poll" (the fake one we're pretending to
// have) scheduled.
if (self._pollsScheduledButNotStarted !== 1)
throw new Error("_pollsScheduledButNotStarted is " +
self._pollsScheduledButNotStarted);
},
_resumePolling: function() {
var self = this;
// We should be in the same state as in the end of _suspendPolling.
if (self._pollsScheduledButNotStarted !== 1)
throw new Error("_pollsScheduledButNotStarted is " +
self._pollsScheduledButNotStarted);
// Run a poll synchronously (which will counteract the
// ++_pollsScheduledButNotStarted from _suspendPolling).
self._taskQueue.runTask(function () {
self._pollMongo();
});
},
async _pollMongo() {
var self = this;
--self._pollsScheduledButNotStarted;
if (self._stopped)
return;
var first = false;
var newResults;
var oldResults = self._results;
if (!oldResults) {
first = true;
// XXX maybe use OrderedDict instead?
oldResults = self._ordered ? [] : new LocalCollection._IdMap;
}
self._testOnlyPollCallback && self._testOnlyPollCallback();
// Save the list of pending writes which this round will commit.
var writesForCycle = self._pendingWrites;
self._pendingWrites = [];
// Get the new query results. (This yields.)
try {
newResults = await self._cursor.getRawObjects(self._ordered);
} catch (e) {
if (first && typeof(e.code) === 'number') {
// This is an error document sent to us by mongod, not a connection
// error generated by the client. And we've never seen this query work
// successfully. Probably it's a bad selector or something, so we should
// NOT retry. Instead, we should halt the observe (which ends up calling
// `stop` on us).
self._multiplexer.queryError(
new Error(
"Exception while polling query " +
JSON.stringify(self._cursorDescription) + ": " + e.message));
return;
}
// getRawObjects can throw if we're having trouble talking to the
// database. That's fine --- we will repoll later anyway. But we should
// make sure not to lose track of this cycle's writes.
// (It also can throw if there's just something invalid about this query;
// unfortunately the ObserveDriver API doesn't provide a good way to
// "cancel" the observe from the inside in this case.
Array.prototype.push.apply(self._pendingWrites, writesForCycle);
Meteor._debug("Exception while polling query " +
JSON.stringify(self._cursorDescription), e);
return;
}
// Run diffs.
if (!self._stopped) {
LocalCollection._diffQueryChanges(
self._ordered, oldResults, newResults, self._multiplexer);
}
// Signals the multiplexer to allow all observeChanges calls that share this
// multiplexer to return. (This happens asynchronously, via the
// multiplexer's queue.)
if (first)
self._multiplexer.ready();
// Replace self._results atomically. (This assignment is what makes `first`
// stay through on the next cycle, so we've waited until after we've
// committed to ready-ing the multiplexer.)
self._results = newResults;
// Once the ObserveMultiplexer has processed everything we've done in this
// round, mark all the writes which existed before this call as
// commmitted. (If new writes have shown up in the meantime, there'll
// already be another _pollMongo task scheduled.)
self._multiplexer.onFlush(function () {
_.each(writesForCycle, function (w) {
w.committed();
});
});
},
stop: function () {
var self = this;
self._stopped = true;
const stopCallbacksCaller = async function(c) {
await c();
};
_.each(self._stopCallbacks, stopCallbacksCaller);
// Release any write fences that are waiting on us.
_.each(self._pendingWrites, function (w) {
w.committed();
});
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-drivers-polling", -1);
}
});

View File

@@ -1,48 +0,0 @@
MongoInternals.RemoteCollectionDriver = function (
mongo_url, options) {
var self = this;
self.mongo = new MongoConnection(mongo_url, options);
};
Object.assign(MongoInternals.RemoteCollectionDriver.prototype, {
open: function (name) {
var self = this;
var ret = {};
['find', 'findOne', 'insert', 'update', 'upsert',
'remove', '_ensureIndex', 'createIndex', '_dropIndex', '_createCappedCollection',
'dropCollection', 'rawCollection'].forEach(
function (m) {
ret[m] = _.bind(self.mongo[m], self.mongo, name);
});
return ret;
}
});
// Create the singleton RemoteCollectionDriver only on demand, so we
// only require Mongo configuration if it's actually used (eg, not if
// you're only trying to receive data from a remote DDP server.)
MongoInternals.defaultRemoteCollectionDriver = _.once(function () {
var connectionOptions = {};
var mongoUrl = process.env.MONGO_URL;
if (process.env.MONGO_OPLOG_URL) {
connectionOptions.oplogUrl = process.env.MONGO_OPLOG_URL;
}
if (! mongoUrl)
throw new Error("MONGO_URL must be set in environment");
const driver = new MongoInternals.RemoteCollectionDriver(mongoUrl, connectionOptions);
// As many deployment tools, including Meteor Up, send requests to the app in
// order to confirm that the deployment finished successfully, it's required
// to know about a database connection problem before the app starts. Doing so
// in a `Meteor.startup` is fine, as the `WebApp` handles requests only after
// all are finished.
Meteor.startup(async () => {
await driver.mongo.client.connect();
});
return driver;
});

View File

@@ -1,151 +0,0 @@
Tinytest.addAsync('mongo livedata - native upsert - id type MONGO with MODIFIERS update', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
coll.insert({foo: 1});
var result = await coll.upsert({foo: 1}, {$set: {foo:2}});
var updated = await coll.findOne({foo: 2});
test.equal(result.insertedId, undefined);
test.equal(result.numberAffected, 1);
test.isTrue(updated._id instanceof Mongo.ObjectID);
delete updated['_id'];
test.equal(EJSON.equals(updated, {foo: 2}), true);
});
Tinytest.addAsync('mongo livedata - native upsert - id type MONGO with MODIFIERS insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
var result = await coll.upsert({foo: 1}, {$set: {bar:2}});
var inserted = await coll.findOne({foo: 1});
test.isTrue(result.insertedId !== undefined);
test.equal(result.numberAffected, 1);
test.isTrue(inserted._id instanceof Mongo.ObjectID);
test.equal(inserted._id, result.insertedId);
delete inserted['_id'];
test.equal(EJSON.equals(inserted, {foo: 1, bar: 2}), true);
});
Tinytest.addAsync('mongo livedata - native upsert - id type MONGO PLAIN OBJECT update', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
coll.insert({foo: 1, baz: 42});
var result = await coll.upsert({foo: 1}, {bar:2});
var updated = await coll.findOne({bar: 2});
test.isTrue(result.insertedId === undefined);
test.equal(result.numberAffected, 1);
test.isTrue(updated._id instanceof Mongo.ObjectID);
delete updated['_id'];
test.equal(EJSON.equals(updated, {bar: 2}), true);
});
Tinytest.addAsync('mongo livedata - native upsert - id type MONGO PLAIN OBJECT insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
var result = await coll.upsert({foo: 1}, {bar:2});
var inserted = await coll.findOne({bar: 2});
test.isTrue(result.insertedId !== undefined);
test.equal(result.numberAffected, 1);
test.isTrue(inserted._id instanceof Mongo.ObjectID);
test.isTrue(result.insertedId instanceof Mongo.ObjectID);
test.equal(inserted._id, result.insertedId);
delete inserted['_id'];
test.equal(EJSON.equals(inserted, {bar: 2}), true);
});
Tinytest.addAsync('mongo livedata - native upsert - id type STRING with MODIFIERS update', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'STRING'});
await coll.insert({foo: 1});
var result = await coll.upsert({foo: 1}, {$set: {foo:2}});
var updated = await coll.findOne({foo: 2});
test.equal(result.insertedId, undefined);
test.equal(result.numberAffected, 1);
test.isTrue(typeof updated._id === 'string');
delete updated['_id'];
test.equal(EJSON.equals(updated, {foo: 2}), true);
});
Tinytest.addAsync('mongo livedata - native upsert - id type STRING with MODIFIERS insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'STRING'});
var result = await coll.upsert({foo: 1}, {$set: {bar:2}});
var inserted = await coll.findOne({foo: 1});
test.isTrue(result.insertedId !== undefined);
test.equal(result.numberAffected, 1);
test.isTrue(typeof inserted._id === 'string');
test.equal(inserted._id, result.insertedId);
delete inserted['_id'];
test.equal(EJSON.equals(inserted, {foo: 1, bar: 2}), true);
});
Tinytest.addAsync('mongo livedata - native upsert - id type STRING PLAIN OBJECT update', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'STRING'});
await coll.insert({foo: 1, baz: 42});
var result = await coll.upsert({foo: 1}, {bar:2});
var updated = await coll.findOne({bar: 2});
test.isTrue(result.insertedId === undefined);
test.equal(result.numberAffected, 1);
test.isTrue(typeof updated._id === 'string');
delete updated['_id'];
test.equal(EJSON.equals(updated, {bar: 2}), true);
});
Tinytest.addAsync('mongo livedata - native upsert - id type STRING PLAIN OBJECT insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'STRING'});
var result = await coll.upsert({foo: 1}, {bar:2});
var inserted = await coll.findOne({bar: 2});
test.isTrue(result.insertedId !== undefined);
test.equal(result.numberAffected, 1);
test.isTrue(typeof inserted._id === 'string');
test.equal(inserted._id, result.insertedId);
delete inserted['_id'];
test.equal(EJSON.equals(inserted, {bar: 2}), true);
});
Tinytest.addAsync('mongo livedata - native upsert - MONGO passing id insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
var result = await coll.upsert({foo: 1}, {_id: 'meu id'});
var inserted = await coll.findOne({_id: 'meu id'});
test.equal(result.insertedId, 'meu id');
test.equal(result.numberAffected, 1);
test.isTrue(typeof inserted._id === 'string');
test.equal(EJSON.equals(inserted, {_id: 'meu id'}), true);
});

View File

@@ -13,7 +13,6 @@ import { normalizeProjection } from "./mongo_utils";
*/
Mongo = {};
console.log('Using package: mongo');
/**
* @summary Constructor for a Collection
* @locus Anywhere
@@ -320,33 +319,6 @@ Object.assign(Mongo.Collection.prototype, {
///
/// Main collection API
///
/**
* @summary Gets the number of documents matching the filter. For a fast count of the total documents in a collection see `estimatedDocumentCount`.
* @locus Anywhere
* @method countDocuments
* @memberof Mongo.Collection
* @instance
* @param {MongoSelector} [selector] A query describing the documents to count
* @param {Object} [options] All options are listed in [MongoDB documentation](https://mongodb.github.io/node-mongodb-native/4.11/interfaces/CountDocumentsOptions.html). Please note that not all of them are available on the client.
* @returns {Promise<number>}
*/
countDocuments(...args) {
return this._collection.countDocuments(...args);
},
/**
* @summary Gets an estimate of the count of documents in a collection using collection metadata. For an exact count of the documents in a collection see `countDocuments`.
* @locus Anywhere
* @method estimatedDocumentCount
* @memberof Mongo.Collection
* @instance
* @param {MongoSelector} [selector] A query describing the documents to count
* @param {Object} [options] All options are listed in [MongoDB documentation](https://mongodb.github.io/node-mongodb-native/4.11/interfaces/EstimatedDocumentCountOptions.html). Please note that not all of them are available on the client.
* @returns {Promise<number>}
*/
estimatedDocumentCount(...args) {
return this._collection.estimatedDocumentCount(...args);
},
_getFindSelector(args) {
if (args.length == 0) return {};
@@ -440,22 +412,22 @@ Object.assign(Mongo.Collection.prototype, {
});
Object.assign(Mongo.Collection, {
_publishCursor(cursor, sub, collection) {
var observeHandle = cursor.observeChanges(
{
added: function(id, fields) {
sub.added(collection, id, fields);
async _publishCursor(cursor, sub, collection) {
var observeHandle = await cursor.observeChanges(
{
added: function(id, fields) {
sub.added(collection, id, fields);
},
changed: function(id, fields) {
sub.changed(collection, id, fields);
},
removed: function(id) {
sub.removed(collection, id);
},
},
changed: function(id, fields) {
sub.changed(collection, id, fields);
},
removed: function(id) {
sub.removed(collection, id);
},
},
// Publications don't mutate the documents
// This is tested by the `livedata - publish callbacks clone` test
{ nonMutatingCallbacks: true }
// Publications don't mutate the documents
// This is tested by the `livedata - publish callbacks clone` test
{ nonMutatingCallbacks: true }
);
// We don't call sub.ready() here: it gets called in livedata_server, after
@@ -463,7 +435,7 @@ Object.assign(Mongo.Collection, {
// register stop callback (expects lambda w/ no args).
sub.onStop(function() {
observeHandle.stop();
return observeHandle.stop();
});
// return the observeHandle in case it needs to be stopped early
@@ -524,17 +496,7 @@ Object.assign(Mongo.Collection.prototype, {
// generating their result until the database has acknowledged
// them. In the future maybe we should provide a flag to turn this
// off.
/**
* @summary Insert a document in the collection. Returns its unique _id.
* @locus Anywhere
* @method insert
* @memberof Mongo.Collection
* @instance
* @param {Object} doc The document to insert. May not yet have an _id attribute, in which case Meteor will generate one for you.
* @param {Function} [callback] Optional. If present, called with an error object as the first argument and, if no error, the _id as the second.
*/
insert(doc, callback) {
_insert(doc, callback) {
// Make sure we were passed a document to insert
if (!doc) {
throw new Error('insert requires an argument');
@@ -542,17 +504,17 @@ Object.assign(Mongo.Collection.prototype, {
// Make a shallow clone of the document, preserving its prototype.
doc = Object.create(
Object.getPrototypeOf(doc),
Object.getOwnPropertyDescriptors(doc)
Object.getPrototypeOf(doc),
Object.getOwnPropertyDescriptors(doc)
);
if ('_id' in doc) {
if (
!doc._id ||
!(typeof doc._id === 'string' || doc._id instanceof Mongo.ObjectID)
!doc._id ||
!(typeof doc._id === 'string' || doc._id instanceof Mongo.ObjectID)
) {
throw new Error(
'Meteor requires document _id fields to be non-empty strings or ObjectIDs'
'Meteor requires document _id fields to be non-empty strings or ObjectIDs'
);
}
} else {
@@ -576,6 +538,8 @@ Object.assign(Mongo.Collection.prototype, {
// On inserts, always return the id that we generated; on all other
// operations, just return the result from the collection.
var chooseReturnValueFromCollectionResult = function(result) {
if (Meteor._isPromise(result)) return result;
if (doc._id) {
return doc._id;
}
@@ -589,8 +553,8 @@ Object.assign(Mongo.Collection.prototype, {
};
const wrappedCallback = wrapCallback(
callback,
chooseReturnValueFromCollectionResult
callback,
chooseReturnValueFromCollectionResult
);
if (this._isRemoteCollection()) {
@@ -604,7 +568,15 @@ Object.assign(Mongo.Collection.prototype, {
// If the user provided a callback and the collection implements this
// operation asynchronously, then queryRet will be undefined, and the
// result will be returned through the callback instead.
const result = this._collection.insert(doc, wrappedCallback);
let result;
if (!!wrappedCallback) {
this._collection.insert(doc, wrappedCallback);
} else {
// If we don't have the callback, we assume the user is using the promise.
// We can't just pass this._collection.insert to the promisify because it would lose the context.
result = Meteor.promisify((cb) => this._collection.insert(doc, cb))();
}
return chooseReturnValueFromCollectionResult(result);
} catch (e) {
if (callback) {
@@ -615,6 +587,19 @@ Object.assign(Mongo.Collection.prototype, {
}
},
/**
* @summary Insert a document in the collection. Returns its unique _id.
* @locus Anywhere
* @method insert
* @memberof Mongo.Collection
* @instance
* @param {Object} doc The document to insert. May not yet have an _id attribute, in which case Meteor will generate one for you.
* @param {Function} [callback] Optional. If present, called with an error object as the first argument and, if no error, the _id as the second.
*/
insert(doc, callback) {
return this._insert(doc, callback);
},
/**
* @summary Modify one or more documents in the collection. Returns the number of matched documents.
* @locus Anywhere
@@ -705,7 +690,7 @@ Object.assign(Mongo.Collection.prototype, {
return this._callMutatorMethod('remove', [selector], wrappedCallback);
}
// it's my collection. descend into the collection object
// it's my collection. descend into the collection1 object
// and propagate any exception.
try {
// If the user provided a callback and the collection implements this
@@ -760,16 +745,29 @@ Object.assign(Mongo.Collection.prototype, {
// We'll actually design an index API later. For now, we just pass through to
// Mongo's, but make it synchronous.
_ensureIndex(index, options) {
/**
* @summary Creates the specified index on the collection.
* @locus server
* @method _ensureIndex
* @deprecated in 3.0
* @memberof Mongo.Collection
* @instance
* @param {Object} index A document that contains the field and value pairs where the field is the index key and the value describes the type of index for that field. For an ascending index on a field, specify a value of `1`; for descending index, specify a value of `-1`. Use `text` for text indexes.
* @param {Object} [options] All options are listed in [MongoDB documentation](https://docs.mongodb.com/manual/reference/method/db.collection.createIndex/#options)
* @param {String} options.name Name of the index
* @param {Boolean} options.unique Define that the index values must be unique, more at [MongoDB documentation](https://docs.mongodb.com/manual/core/index-unique/)
* @param {Boolean} options.sparse Define that the index is sparse, more at [MongoDB documentation](https://docs.mongodb.com/manual/core/index-sparse/)
*/
async _ensureIndex(index, options) {
var self = this;
if (!self._collection._ensureIndex || !self._collection.createIndex)
throw new Error('Can only call createIndex on server collections');
if (self._collection.createIndex) {
self._collection.createIndex(index, options);
await self._collection.createIndex(index, options);
} else {
import { Log } from 'meteor/logging';
Log.debug(`_ensureIndex has been deprecated, please use the new 'createIndex' instead${options?.name ? `, index name: ${options.name}` : `, index: ${JSON.stringify(index)}`}`)
self._collection._ensureIndex(index, options);
Log.debug(`_ensureIndex has been deprecated, please use the new 'createIndex' instead${ options?.name ? `, index name: ${ options.name }` : `, index: ${ JSON.stringify(index) }` }`)
await self._collection._ensureIndex(index, options);
}
},
@@ -785,37 +783,37 @@ Object.assign(Mongo.Collection.prototype, {
* @param {Boolean} options.unique Define that the index values must be unique, more at [MongoDB documentation](https://docs.mongodb.com/manual/core/index-unique/)
* @param {Boolean} options.sparse Define that the index is sparse, more at [MongoDB documentation](https://docs.mongodb.com/manual/core/index-sparse/)
*/
createIndex(index, options) {
async createIndex(index, options) {
var self = this;
if (!self._collection.createIndex)
throw new Error('Can only call createIndex on server collections');
try {
self._collection.createIndex(index, options);
await self._collection.createIndex(index, options);
} catch (e) {
if (e.message.includes('An equivalent index already exists with the same name but different options.') && Meteor.settings?.packages?.mongo?.reCreateIndexOnOptionMismatch) {
import { Log } from 'meteor/logging';
Log.info(`Re-creating index ${index} for ${self._name} due to options mismatch.`);
self._collection._dropIndex(index);
self._collection.createIndex(index, options);
Log.info(`Re-creating index ${ index } for ${ self._name } due to options mismatch.`);
await self._collection._dropIndex(index);
await self._collection.createIndex(index, options);
} else {
throw new Meteor.Error(`An error occurred when creating an index for collection "${self._name}: ${e.message}`);
console.error(e);
throw new Meteor.Error(`An error occurred when creating an index for collection "${ self._name }: ${ e.message }`);
}
}
},
_dropIndex(index) {
async _dropIndex(index) {
var self = this;
if (!self._collection._dropIndex)
throw new Error('Can only call _dropIndex on server collections');
self._collection._dropIndex(index);
},
_dropCollection() {
async _dropCollection() {
var self = this;
if (!self._collection.dropCollection)
throw new Error('Can only call _dropCollection on server collections');
self._collection.dropCollection();
await self._collection.dropCollection();
},
_createCappedCollection(byteSize, maxDocuments) {

View File

@@ -19,14 +19,3 @@ Tinytest.add('async collection - check for methods presence', function (test) {
isFunction(cursor.mapAsync);
isFunction(cursor[Symbol.asyncIterator]);
});
['countDocuments', 'estimatedDocumentCount'].forEach(method => {
Tinytest.addAsync(`async collection - ${method}`, async test => {
const collection = new Mongo.Collection(method + test.id);
for (let index = 0; index < 10; ++index) {
test.instanceOf(collection[method](), Promise);
test.equal(await collection[method](), index);
collection.insert({});
}
});
});

View File

@@ -53,12 +53,12 @@ Tinytest.add('collection - call new Mongo.Collection with defineMutationMethods=
}
);
Tinytest.add('collection - call find with sort function',
function (test) {
var initialize = function (collection) {
collection.insert({a: 2});
collection.insert({a: 3});
collection.insert({a: 1});
Tinytest.addAsync('collection - call find with sort function',
async function (test) {
var initialize = async function (collection) {
await collection.insert({a: 2});
await collection.insert({a: 3});
await collection.insert({a: 1});
};
var sorter = function (a, b) {
@@ -73,23 +73,23 @@ Tinytest.add('collection - call find with sort function',
var localCollection = new Mongo.Collection(null);
var namedCollection = new Mongo.Collection(collectionName, {connection: null});
initialize(localCollection);
test.equal(getSorted(localCollection), [1, 2, 3]);
await initialize(localCollection);
test.equal(await getSorted(localCollection), [1, 2, 3]);
initialize(namedCollection);
test.equal(getSorted(namedCollection), [1, 2, 3]);
await initialize(namedCollection);
test.equal(await getSorted(namedCollection), [1, 2, 3]);
}
);
Tinytest.add('collection - call native find with sort function',
function (test) {
Tinytest.addAsync('collection - call native find with sort function',
async function (test) {
var collectionName = 'sortNative' + test.id;
var nativeCollection = new Mongo.Collection(collectionName);
if (Meteor.isServer) {
test.throws(
await test.throwsAsync(
function () {
nativeCollection
return nativeCollection
.find({}, {
sort: function () {},
})
@@ -103,32 +103,32 @@ Tinytest.add('collection - call native find with sort function',
}
);
Tinytest.add('collection - calling native find with maxTimeMs should timeout',
function(test) {
Tinytest.addAsync('collection - calling native find with maxTimeMs should timeout',
async function(test) {
var collectionName = 'findOptions1' + test.id;
var collection = new Mongo.Collection(collectionName);
collection.insert({a: 1});
await collection.insert({a: 1});
function doTest() {
return collection.find({$where: "sleep(100) || true"}, {maxTimeMs: 50}).count();
}
if (Meteor.isServer) {
test.throws(doTest);
await test.throwsAsync(doTest);
}
}
);
Tinytest.add('collection - calling native find with $reverse hint should reverse on server',
function(test) {
Tinytest.addAsync('collection - calling native find with $reverse hint should reverse on server',
async function(test) {
var collectionName = 'findOptions2' + test.id;
var collection = new Mongo.Collection(collectionName);
collection.insert({a: 1});
collection.insert({a: 2});
await collection.insert({a: 1});
await collection.insert({a: 2});
function m(doc) { return doc.a; }
var fwd = collection.find({}, {hint: {$natural: 1}}).map(m);
var rev = collection.find({}, {hint: {$natural: -1}}).map(m);
var fwd = await collection.find({}, {hint: {$natural: 1}}).map(m);
var rev = await collection.find({}, {hint: {$natural: -1}}).map(m);
if (Meteor.isServer) {
test.equal(fwd, rev.reverse());
} else {
@@ -139,16 +139,16 @@ Tinytest.add('collection - calling native find with $reverse hint should reverse
);
Tinytest.addAsync('collection - calling native find with good hint and maxTimeMs should succeed',
function(test, done) {
async function(test, done) {
var collectionName = 'findOptions3' + test.id;
var collection = new Mongo.Collection(collectionName);
collection.insert({a: 1});
await collection.insert({a: 1});
Promise.resolve(
Meteor.isServer &&
collection.rawCollection().createIndex({ a: 1 })
).then(() => {
test.equal(collection.find({}, {
).then(async () => {
test.equal(await collection.find({}, {
hint: {a: 1},
maxTimeMs: 1000
}).count(), 1);
@@ -157,8 +157,8 @@ Tinytest.addAsync('collection - calling native find with good hint and maxTimeMs
}
);
Tinytest.add('collection - calling find with a valid readPreference',
function(test) {
Tinytest.addAsync('collection - calling find with a valid readPreference',
async function(test) {
if (Meteor.isServer) {
const defaultReadPreference = 'primary';
const customReadPreference = 'secondaryPreferred';
@@ -170,8 +170,8 @@ Tinytest.add('collection - calling find with a valid readPreference',
);
// Trigger the creation of _synchronousCursor
defaultCursor.fetch();
customCursor.fetch();
await defaultCursor.count();
await customCursor.count();
// defaultCursor._synchronousCursor._dbCursor.operation is not an option anymore
// as the cursor options are now private
@@ -189,7 +189,7 @@ Tinytest.add('collection - calling find with a valid readPreference',
}
);
Tinytest.add('collection - calling find with an invalid readPreference',
Tinytest.addAsync('collection - calling find with an invalid readPreference',
function(test) {
if (Meteor.isServer) {
const invalidReadPreference = 'INVALID';
@@ -199,25 +199,25 @@ Tinytest.add('collection - calling find with an invalid readPreference',
{ readPreference: invalidReadPreference }
);
test.throws(function() {
return test.throwsAsync(function() {
// Trigger the creation of _synchronousCursor
cursor.count();
return cursor.count();
}, `Invalid read preference mode "${invalidReadPreference}"`);
}
}
);
Tinytest.add('collection - inserting a document with a binary should return a document with a binary',
function(test) {
Tinytest.addAsync('collection - inserting a document with a binary should return a document with a binary',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary1');
const _id = Random.id();
collection.insert({
await collection.insert({
_id,
binary: new MongoDB.Binary(Buffer.from('hello world'), 6)
});
const doc = collection.findOne({ _id });
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof MongoDB.Binary
);
@@ -229,17 +229,17 @@ Tinytest.add('collection - inserting a document with a binary should return a do
}
);
Tinytest.add('collection - inserting a document with a binary (sub type 0) should return a document with a uint8array',
function(test) {
Tinytest.addAsync('collection - inserting a document with a binary (sub type 0) should return a document with a uint8array',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary8');
const _id = Random.id();
collection.insert({
await collection.insert({
_id,
binary: new MongoDB.Binary(Buffer.from('hello world'), 0)
});
const doc = collection.findOne({ _id });
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof Uint8Array
);
@@ -251,18 +251,18 @@ Tinytest.add('collection - inserting a document with a binary (sub type 0) shoul
}
);
Tinytest.add('collection - updating a document with a binary should return a document with a binary',
function(test) {
Tinytest.addAsync('collection - updating a document with a binary should return a document with a binary',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary2');
const _id = Random.id();
collection.insert({
await collection.insert({
_id
});
collection.update({ _id }, { $set: { binary: new MongoDB.Binary(Buffer.from('hello world'), 6) } });
await collection.update({ _id }, { $set: { binary: new MongoDB.Binary(Buffer.from('hello world'), 6) } });
const doc = collection.findOne({ _id });
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof MongoDB.Binary
);
@@ -274,18 +274,18 @@ Tinytest.add('collection - updating a document with a binary should return a doc
}
);
Tinytest.add('collection - updating a document with a binary (sub type 0) should return a document with a uint8array',
function(test) {
Tinytest.addAsync('collection - updating a document with a binary (sub type 0) should return a document with a uint8array',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary7');
const _id = Random.id();
collection.insert({
await collection.insert({
_id
});
collection.update({ _id }, { $set: { binary: new MongoDB.Binary(Buffer.from('hello world'), 0) } });
await collection.update({ _id }, { $set: { binary: new MongoDB.Binary(Buffer.from('hello world'), 0) } });
const doc = collection.findOne({ _id });
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof Uint8Array
);
@@ -297,17 +297,17 @@ Tinytest.add('collection - updating a document with a binary (sub type 0) should
}
);
Tinytest.add('collection - inserting a document with a uint8array should return a document with a uint8array',
function(test) {
Tinytest.addAsync('collection - inserting a document with a uint8array should return a document with a uint8array',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary3');
const _id = Random.id();
collection.insert({
await collection.insert({
_id,
binary: new Uint8Array(Buffer.from('hello world'))
});
const doc = collection.findOne({ _id });
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof Uint8Array
);
@@ -319,21 +319,21 @@ Tinytest.add('collection - inserting a document with a uint8array should return
}
);
Tinytest.add('collection - updating a document with a uint8array should return a document with a uint8array',
function(test) {
Tinytest.addAsync('collection - updating a document with a uint8array should return a document with a uint8array',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary4');
const _id = Random.id();
collection.insert({
await collection.insert({
_id
});
collection.update(
await collection.update(
{ _id },
{ $set: { binary: new Uint8Array(Buffer.from('hello world')) } }
)
const doc = collection.findOne({ _id });
const doc = await collection.findOne({ _id });
test.ok(
doc.binary instanceof Uint8Array
);
@@ -345,72 +345,42 @@ Tinytest.add('collection - updating a document with a uint8array should return a
}
);
Tinytest.add('collection - finding with a query with a uint8array field should return the correct document',
function(test) {
Tinytest.addAsync('collection - finding with a query with a uint8array field should return the correct document',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary5');
const _id = Random.id();
collection.insert({
await collection.insert({
_id,
binary: new Uint8Array(Buffer.from('hello world'))
});
const doc = collection.findOne({ binary: new Uint8Array(Buffer.from('hello world')) });
const doc = await collection.findOne({ binary: new Uint8Array(Buffer.from('hello world')) });
test.equal(
doc._id,
_id
);
collection.remove({});
await collection.remove({});
}
}
);
Tinytest.add('collection - finding with a query with a binary field should return the correct document',
function(test) {
Tinytest.addAsync('collection - finding with a query with a binary field should return the correct document',
async function(test) {
if (Meteor.isServer) {
const collection = new Mongo.Collection('testBinary6');
const _id = Random.id();
collection.insert({
await collection.insert({
_id,
binary: new MongoDB.Binary(Buffer.from('hello world'), 6)
});
const doc = collection.findOne({ binary: new MongoDB.Binary(Buffer.from('hello world'), 6) });
const doc = await collection.findOne({ binary: new MongoDB.Binary(Buffer.from('hello world'), 6) });
test.equal(
doc._id,
_id
);
collection.remove({});
await collection.remove({});
}
}
);
Tinytest.add('collection - count should release the session',
function(test) {
const client = MongoInternals.defaultRemoteCollectionDriver().mongo.client;
var collectionName = 'count' + test.id;
var collection = new Mongo.Collection(collectionName);
collection.insert({ _id: '1' });
collection.insert({ _id: '2' });
collection.insert({ _id: '3' });
const preCount = client.s.activeSessions.size;
test.equal(collection.find().count(), 3);
// options and selector still work
test.equal(collection.find({ _id: { $ne: '1' } }, { skip: 1 }).count(), 1);
// cursor reuse
const cursor1 = collection.find({ _id: { $ne: '1' } }, { skip: 1 });
test.equal(cursor1.count(), 1);
test.equal(cursor1.fetch().length, 1);
const cursor2 = collection.find({ _id: { $ne: '1' } }, { skip: 1 });
test.equal(cursor2.fetch().length, 1);
test.equal(cursor2.count(), 1);
const postCount = client.s.activeSessions.size;
test.equal(preCount, postCount);
}
);

View File

@@ -1,5 +1,3 @@
var Fiber = Npm.require('fibers');
export class DocFetcher {
constructor(mongoConnection) {
this._mongoConnection = mongoConnection;
@@ -32,9 +30,9 @@ export class DocFetcher {
const callbacks = [callback];
self._callbacksForOp.set(op, callbacks);
Fiber(function () {
return Meteor._runAsync(async function () {
try {
var doc = self._mongoConnection.findOne(
var doc = await self._mongoConnection.findOne(
collectionName, {_id: id}) || null;
// Return doc to all relevant callbacks. Note that this array can
// continue to grow during callback excecution.
@@ -43,17 +41,17 @@ export class DocFetcher {
// objects that are intertwingled with each other. Clone before
// popping the future, so that if clone throws, the error gets passed
// to the next callback.
callbacks.pop()(null, EJSON.clone(doc));
await callbacks.pop()(null, EJSON.clone(doc));
}
} catch (e) {
while (callbacks.length > 0) {
callbacks.pop()(e);
await callbacks.pop()(e);
}
} finally {
// XXX consider keeping the doc around for a period of time before
// removing from the cache
self._callbacksForOp.delete(op);
}
}).run();
});
}
}

View File

@@ -1,14 +1,12 @@
var Fiber = Npm.require('fibers');
var Future = Npm.require('fibers/future');
import { DocFetcher } from "./doc_fetcher.js";
testAsyncMulti("mongo-livedata - doc fetcher", [
function (test, expect) {
async function (test, expect) {
var self = this;
var collName = "docfetcher-" + Random.id();
var collection = new Mongo.Collection(collName);
var id1 = collection.insert({x: 1});
var id2 = collection.insert({y: 2});
var id1 = await collection.insert({x: 1});
var id2 = await collection.insert({y: 2});
var fetcher = new DocFetcher(
MongoInternals.defaultRemoteCollectionDriver().mongo);

View File

@@ -14,7 +14,6 @@ const util = require("util");
/** @type {import('mongodb')} */
var MongoDB = NpmModuleMongodb;
var Future = Npm.require('fibers/future');
import { DocFetcher } from "./doc_fetcher.js";
import {
ASYNC_CURSOR_METHODS,
@@ -23,8 +22,7 @@ import {
MongoInternals = {};
// TODO remove after test
MongoInternals.__packageName = 'mongo'
MongoInternals.__packageName = 'mongo';
MongoInternals.NpmModules = {
mongodb: {
@@ -215,7 +213,7 @@ MongoConnection = function (url, options) {
}
};
MongoConnection.prototype.close = function() {
MongoConnection.prototype._close = async function() {
var self = this;
if (! self.db)
@@ -225,12 +223,16 @@ MongoConnection.prototype.close = function() {
var oplogHandle = self._oplogHandle;
self._oplogHandle = null;
if (oplogHandle)
oplogHandle.stop();
await oplogHandle.stop();
// Use Future.wrap so that errors get thrown. This happens to
// work even outside a fiber since the 'close' method is not
// actually asynchronous.
Future.wrap(_.bind(self.client.close, self.client))(true).wait();
await self.client.close();
};
MongoConnection.prototype.close = function () {
return this._close();
};
// Returns the Mongo Collection object; may yield.
@@ -243,19 +245,15 @@ MongoConnection.prototype.rawCollection = function (collectionName) {
return self.db.collection(collectionName);
};
MongoConnection.prototype._createCappedCollection = function (
MongoConnection.prototype._createCappedCollection = async function (
collectionName, byteSize, maxDocuments) {
var self = this;
if (! self.db)
throw Error("_createCappedCollection called before Connection created?");
var future = new Future();
self.db.createCollection(
collectionName,
{ capped: true, size: byteSize, max: maxDocuments },
future.resolver());
future.wait();
await self.db.createCollection(collectionName,
{ capped: true, size: byteSize, max: maxDocuments });
};
// This should be called synchronously with a write, to create a
@@ -364,7 +362,7 @@ MongoConnection.prototype._insert = function (collection_name, document,
).then(({insertedId}) => {
callback(null, insertedId);
}).catch((e) => {
callback(e, null)
callback(e, null);
});
} catch (err) {
write.committed();
@@ -427,19 +425,25 @@ MongoConnection.prototype._remove = function (collection_name, selector,
}
};
MongoConnection.prototype._dropCollection = function (collectionName, cb) {
MongoConnection.prototype._dropCollection = async function (collectionName, cb) {
var self = this;
var write = self._maybeBeginWrite();
var refresh = function () {
Meteor.refresh({collection: collectionName, id: null,
dropCollection: true});
return Meteor.refresh({
collection: collectionName,
id: null,
dropCollection: true
});
};
cb = bindEnvironmentForWrite(writeCallback(write, refresh, cb));
// TODO[FIBERS]: Check if this is correct after the DDP changes.
const fn = bindEnvironmentForWrite(
writeCallback(write, refresh, cb)
);
try {
var collection = self.rawCollection(collectionName);
collection.drop(cb);
await Meteor.promisify(collection.drop)(fn);
} catch (e) {
write.committed();
throw e;
@@ -448,17 +452,17 @@ MongoConnection.prototype._dropCollection = function (collectionName, cb) {
// For testing only. Slightly better than `c.rawDatabase().dropDatabase()`
// because it lets the test's fence wait for it to be complete.
MongoConnection.prototype._dropDatabase = function (cb) {
MongoConnection.prototype._dropDatabase = async function (cb) {
var self = this;
var write = self._maybeBeginWrite();
var refresh = function () {
Meteor.refresh({ dropDatabase: true });
};
cb = bindEnvironmentForWrite(writeCallback(write, refresh, cb));
const fn = Meteor.bindEnvironment(writeCallback(write, refresh, cb))
try {
self.db.dropDatabase(cb);
await Meteor.promisify(self.db.dropDatabase)(fn);
} catch (e) {
write.committed();
throw e;
@@ -489,14 +493,27 @@ MongoConnection.prototype._update = function (collection_name, selector, mod,
// non-object modifier in that they don't crash, they are not
// meaningful operations and do not do anything. Defensively throw an
// error here.
if (!mod || typeof mod !== 'object')
throw new Error("Invalid modifier. Modifier must be an object.");
if (!mod || typeof mod !== 'object') {
const error = new Error("Invalid modifier. Modifier must be an object.");
if (callback) {
return callback(error);
} else {
throw error;
}
}
if (!(LocalCollection._isPlainObject(mod) &&
!EJSON._isCustomType(mod))) {
throw new Error(
"Only plain objects may be used as replacement" +
const error = new Error(
"Only plain objects may be used as replacement" +
" documents in MongoDB");
if (callback) {
return callback(error);
} else {
throw error;
}
}
if (!options) options = {};
@@ -772,7 +789,7 @@ var simulateUpsertWithInsertedId = function (collection, selector, mod,
_.each(["insert", "update", "remove", "dropCollection", "dropDatabase"], function (method) {
MongoConnection.prototype[method] = function (/* arguments */) {
var self = this;
return Meteor.wrapAsync(self["_" + method]).apply(self, arguments);
return Meteor.promisify(self[`_${method}`]).apply(self, arguments);
};
});
@@ -804,54 +821,41 @@ MongoConnection.prototype.find = function (collectionName, selector, options) {
self, new CursorDescription(collectionName, selector, options));
};
MongoConnection.prototype.findOne = function (collection_name, selector,
options) {
MongoConnection.prototype.findOne = async function (collection_name, selector, options) {
var self = this;
if (arguments.length === 1)
if (arguments.length === 1) {
selector = {};
}
options = options || {};
options.limit = 1;
return self.find(collection_name, selector, options).fetch()[0];
const results = await self.find(collection_name, selector, options).fetch();
return results[0];
};
// We'll actually design an index API later. For now, we just pass through to
// Mongo's, but make it synchronous.
MongoConnection.prototype.createIndex = function (collectionName, index,
MongoConnection.prototype.createIndex = async function (collectionName, index,
options) {
var self = this;
// We expect this function to be called at startup, not from within a method,
// so we don't interact with the write fence.
var collection = self.rawCollection(collectionName);
var future = new Future;
var indexName = collection.createIndex(index, options, future.resolver());
future.wait();
};
MongoConnection.prototype.countDocuments = function (collectionName, ...args) {
args = args.map(arg => replaceTypes(arg, replaceMeteorAtomWithMongo));
const collection = this.rawCollection(collectionName);
return collection.countDocuments(...args);
};
MongoConnection.prototype.estimatedDocumentCount = function (collectionName, ...args) {
args = args.map(arg => replaceTypes(arg, replaceMeteorAtomWithMongo));
const collection = this.rawCollection(collectionName);
return collection.estimatedDocumentCount(...args);
var collection = self.rawCollection(collectionName)
var indexName = await collection.createIndex(index, options)
};
MongoConnection.prototype._ensureIndex = MongoConnection.prototype.createIndex;
MongoConnection.prototype._dropIndex = function (collectionName, index) {
MongoConnection.prototype._dropIndex = async function (collectionName, index) {
var self = this;
// This function is only used by test code, not within a method, so we don't
// interact with the write fence.
var collection = self.rawCollection(collectionName);
var future = new Future;
var indexName = collection.dropIndex(index, future.resolver());
future.wait();
var indexName = await collection.dropIndex(index)
};
// CURSORS
@@ -922,24 +926,11 @@ function setupSynchronousCursor(cursor, method) {
return cursor._synchronousCursor;
}
Cursor.prototype.count = function () {
const collection = this._mongo.rawCollection(this._cursorDescription.collectionName);
return Promise.await(collection.countDocuments(
replaceTypes(this._cursorDescription.selector, replaceMeteorAtomWithMongo),
replaceTypes(this._cursorDescription.options, replaceMeteorAtomWithMongo),
));
};
[...ASYNC_CURSOR_METHODS, Symbol.iterator, Symbol.asyncIterator].forEach(methodName => {
// count is handled specially since we don't want to create a cursor.
// it is still included in ASYNC_CURSOR_METHODS because we still want an async version of it to exist.
if (methodName !== 'count') {
Cursor.prototype[methodName] = function (...args) {
const cursor = setupSynchronousCursor(this, methodName);
return cursor[methodName](...args);
};
}
Cursor.prototype[methodName] = function (...args) {
const cursor = setupSynchronousCursor(this, methodName);
return cursor[methodName](...args);
};
// These methods are handled separately.
if (methodName === Symbol.iterator || methodName === Symbol.asyncIterator) {
@@ -1054,9 +1045,156 @@ MongoConnection.prototype._createSynchronousCursor = function(
dbCursor = dbCursor.hint(cursorOptions.hint);
}
return new SynchronousCursor(dbCursor, cursorDescription, options, collection);
return new AsynchronousCursor(dbCursor, cursorDescription, options, collection);
};
/**
* This is just a light wrapper for the cursor. The goal here is to ensure compatibility even if
* there are breaking changes on the MongoDB driver.
*
* @constructor
*/
class AsynchronousCursor {
constructor(dbCursor, cursorDescription, options) {
this._dbCursor = dbCursor;
this._cursorDescription = cursorDescription;
this._selfForIteration = options.selfForIteration || this;
if (options.useTransform && cursorDescription.options.transform) {
this._transform = LocalCollection.wrapTransform(
cursorDescription.options.transform);
} else {
this._transform = null;
}
this._visitedIds = new LocalCollection._IdMap;
}
[Symbol.iterator]() {
return this._cursor[Symbol.iterator]();
}
// Returns a Promise for the next object from the underlying cursor (before
// the Mongo->Meteor type replacement).
async _rawNextObjectPromise() {
try {
return this._dbCursor.next();
} catch (e) {
console.error(e);
}
}
// Returns a Promise for the next object from the cursor, skipping those whose
// IDs we've already seen and replacing Mongo atoms with Meteor atoms.
async _nextObjectPromise () {
while (true) {
var doc = await this._rawNextObjectPromise();
if (!doc) return null;
doc = replaceTypes(doc, replaceMongoAtomWithMeteor);
if (!this._cursorDescription.options.tailable && _.has(doc, '_id')) {
// Did Mongo give us duplicate documents in the same cursor? If so,
// ignore this one. (Do this before the transform, since transform might
// return some unrelated value.) We don't do this for tailable cursors,
// because we want to maintain O(1) memory usage. And if there isn't _id
// for some reason (maybe it's the oplog), then we don't do this either.
// (Be careful to do this for falsey but existing _id, though.)
if (this._visitedIds.has(doc._id)) continue;
this._visitedIds.set(doc._id, true);
}
if (this._transform)
doc = this._transform(doc);
return doc;
}
}
// Returns a promise which is resolved with the next object (like with
// _nextObjectPromise) or rejected if the cursor doesn't return within
// timeoutMS ms.
_nextObjectPromiseWithTimeout(timeoutMS) {
if (!timeoutMS) {
return this._nextObjectPromise();
}
const nextObjectPromise = this._nextObjectPromise();
const timeoutErr = new Error('Client-side timeout waiting for next object');
const timeoutPromise = new Promise((resolve, reject) => {
setTimeout(() => {
reject(timeoutErr);
}, timeoutMS);
});
return Promise.race([nextObjectPromise, timeoutPromise])
.catch((err) => {
if (err === timeoutErr) {
this.close();
}
throw err;
});
}
async forEach(callback, thisArg) {
// Get back to the beginning.
this._rewind();
let idx = 0;
while (true) {
const doc = await this._nextObjectPromise();
if (!doc) return;
await callback.call(thisArg, doc, idx++, this._selfForIteration);
}
}
async map(callback, thisArg) {
const results = [];
await this.forEach(async (doc, index) => {
results.push(await callback.call(thisArg, doc, index, this._selfForIteration));
});
return results;
}
_rewind() {
// known to be synchronous
this._dbCursor.rewind();
this._visitedIds = new LocalCollection._IdMap;
}
// Mostly usable for tailable cursors.
close() {
this._dbCursor.close();
}
fetch() {
return this.map(_.identity);
}
/**
* FIXME: (node:34680) [MONGODB DRIVER] Warning: cursor.count is deprecated and will be
* removed in the next major version, please use `collection.estimatedDocumentCount` or
* `collection.countDocuments` instead.
*/
count() {
return this._dbCursor.count();
}
// This method is NOT wrapped in Cursor.
async getRawObjects(ordered) {
var self = this;
if (ordered) {
return self.fetch();
} else {
var results = new LocalCollection._IdMap;
await self.forEach(function (doc) {
results.set(doc._id, doc);
});
return results;
}
}
}
var SynchronousCursor = function (dbCursor, cursorDescription, options, collection) {
var self = this;
options = _.pick(options || {}, 'selfForIteration', 'useTransform');
@@ -1267,13 +1405,14 @@ MongoConnection.prototype.tail = function (cursorDescription, docCallback, timeo
var stopped = false;
var lastTS;
var loop = function () {
Meteor.defer(async function loop() {
var doc = null;
while (true) {
if (stopped)
return;
try {
doc = cursor._nextObjectPromiseWithTimeout(timeoutMS).await();
doc = await cursor._nextObjectPromiseWithTimeout(timeoutMS);
} catch (err) {
// There's no good way to figure out if this was actually an error from
// Mongo, or just client-side (including our own timeout error). Ah
@@ -1304,13 +1443,11 @@ MongoConnection.prototype.tail = function (cursorDescription, docCallback, timeo
// Mongo failover takes many seconds. Retry in a bit. (Without this
// setTimeout, we peg the CPU at 100% and never notice the actual
// failover.
Meteor.setTimeout(loop, 100);
setTimeout(loop, 100);
break;
}
}
};
Meteor.defer(loop);
});
return {
stop: function () {
@@ -1320,33 +1457,33 @@ MongoConnection.prototype.tail = function (cursorDescription, docCallback, timeo
};
};
MongoConnection.prototype._observeChanges = function (
cursorDescription, ordered, callbacks, nonMutatingCallbacks) {
var self = this;
Object.assign(MongoConnection.prototype, {
_observeChanges: async function (
cursorDescription, ordered, callbacks, nonMutatingCallbacks) {
var self = this;
if (cursorDescription.options.tailable) {
return self._observeChangesTailable(cursorDescription, ordered, callbacks);
}
if (cursorDescription.options.tailable) {
return self._observeChangesTailable(cursorDescription, ordered, callbacks);
}
// You may not filter out _id when observing changes, because the id is a core
// part of the observeChanges API.
const fieldsOptions = cursorDescription.options.projection || cursorDescription.options.fields;
if (fieldsOptions &&
(fieldsOptions._id === 0 ||
fieldsOptions._id === false)) {
throw Error("You may not observe a cursor with {fields: {_id: 0}}");
}
// You may not filter out _id when observing changes, because the id is a core
// part of the observeChanges API.
const fieldsOptions = cursorDescription.options.projection || cursorDescription.options.fields;
if (fieldsOptions &&
(fieldsOptions._id === 0 ||
fieldsOptions._id === false)) {
throw Error("You may not observe a cursor with {fields: {_id: 0}}");
}
var observeKey = EJSON.stringify(
_.extend({ordered: ordered}, cursorDescription));
var observeKey = EJSON.stringify(
_.extend({ordered: ordered}, cursorDescription));
var multiplexer, observeDriver;
var firstHandle = false;
var multiplexer, observeDriver;
var firstHandle = false;
// Find a matching ObserveMultiplexer, or create a new one. This next block is
// guaranteed to not yield (and it doesn't call anything that can observe a
// new query), so no other calls to this function can interleave with it.
Meteor._noYieldsAllowed(function () {
// Find a matching ObserveMultiplexer, or create a new one. This next block is
// guaranteed to not yield (and it doesn't call anything that can observe a
// new query), so no other calls to this function can interleave with it.
if (_.has(self._observeMultiplexers, observeKey)) {
multiplexer = self._observeMultiplexers[observeKey];
} else {
@@ -1356,76 +1493,82 @@ MongoConnection.prototype._observeChanges = function (
ordered: ordered,
onStop: function () {
delete self._observeMultiplexers[observeKey];
observeDriver.stop();
return observeDriver.stop();
}
});
self._observeMultiplexers[observeKey] = multiplexer;
}
});
var observeHandle = new ObserveHandle(multiplexer,
callbacks,
nonMutatingCallbacks,
);
var observeHandle = new ObserveHandle(multiplexer,
callbacks,
nonMutatingCallbacks,
);
if (firstHandle) {
var matcher, sorter;
var canUseOplog = _.all([
function () {
// At a bare minimum, using the oplog requires us to have an oplog, to
// want unordered callbacks, and to not want a callback on the polls
// that won't happen.
return self._oplogHandle && !ordered &&
!callbacks._testOnlyPollCallback;
}, function () {
// We need to be able to compile the selector. Fall back to polling for
// some newfangled $selector that minimongo doesn't support yet.
try {
matcher = new Minimongo.Matcher(cursorDescription.selector);
return true;
} catch (e) {
// XXX make all compilation errors MinimongoError or something
// so that this doesn't ignore unrelated exceptions
return false;
}
}, function () {
// ... and the selector itself needs to support oplog.
return OplogObserveDriver.cursorSupported(cursorDescription, matcher);
}, function () {
// And we need to be able to compile the sort, if any. eg, can't be
// {$natural: 1}.
if (!cursorDescription.options.sort)
return true;
try {
sorter = new Minimongo.Sorter(cursorDescription.options.sort);
return true;
} catch (e) {
// XXX make all compilation errors MinimongoError or something
// so that this doesn't ignore unrelated exceptions
return false;
}
}], function (f) { return f(); }); // invoke each function
if (firstHandle) {
var matcher, sorter;
var canUseOplog = _.all([
function () {
// At a bare minimum, using the oplog requires us to have an oplog, to
// want unordered callbacks, and to not want a callback on the polls
// that won't happen.
return self._oplogHandle && !ordered &&
!callbacks._testOnlyPollCallback;
}, function () {
// We need to be able to compile the selector. Fall back to polling for
// some newfangled $selector that minimongo doesn't support yet.
try {
matcher = new Minimongo.Matcher(cursorDescription.selector);
return true;
} catch (e) {
// XXX make all compilation errors MinimongoError or something
// so that this doesn't ignore unrelated exceptions
return false;
}
}, function () {
// ... and the selector itself needs to support oplog.
return OplogObserveDriver.cursorSupported(cursorDescription, matcher);
}, function () {
// And we need to be able to compile the sort, if any. eg, can't be
// {$natural: 1}.
if (!cursorDescription.options.sort)
return true;
try {
sorter = new Minimongo.Sorter(cursorDescription.options.sort);
return true;
} catch (e) {
// XXX make all compilation errors MinimongoError or something
// so that this doesn't ignore unrelated exceptions
return false;
}
}], function (f) { return f(); }); // invoke each function
var driverClass = canUseOplog ? OplogObserveDriver : PollingObserveDriver;
observeDriver = new driverClass({
cursorDescription: cursorDescription,
mongoHandle: self,
multiplexer: multiplexer,
ordered: ordered,
matcher: matcher, // ignored by polling
sorter: sorter, // ignored by polling
_testOnlyPollCallback: callbacks._testOnlyPollCallback
});
var driverClass = canUseOplog ? OplogObserveDriver : PollingObserveDriver;
observeDriver = new driverClass({
cursorDescription: cursorDescription,
mongoHandle: self,
multiplexer: multiplexer,
ordered: ordered,
matcher: matcher, // ignored by polling
sorter: sorter, // ignored by polling
_testOnlyPollCallback: callbacks._testOnlyPollCallback
});
// This field is only set for use in tests.
multiplexer._observeDriver = observeDriver;
}
if (observeDriver._init) {
await observeDriver._init();
}
// Blocks until the initial adds have been sent.
multiplexer.addHandleAndSendInitialAdds(observeHandle);
// This field is only set for use in tests.
multiplexer._observeDriver = observeDriver;
}
// Blocks until the initial adds have been sent.
await multiplexer.addHandleAndSendInitialAdds(observeHandle);
return observeHandle;
},
});
return observeHandle;
};
// Listen for the invalidation messages that will trigger us to poll the
// database for changes. If this selector specifies specific IDs, specify them

File diff suppressed because it is too large Load Diff

View File

@@ -14,58 +14,56 @@ _.each ([{added: 'added', forceOrdered: true},
Tinytest.addAsync("observeChanges - single id - basics " + added
+ (forceOrdered ? " force ordered" : ""),
function (test, onComplete) {
async function (test, onComplete) {
var c = makeCollection();
var counter = 0;
var callbacks = [added, "changed", "removed"];
if (forceOrdered)
callbacks.push("movedBefore");
withCallbackLogger(test,
await withCallbackLogger(test,
callbacks,
Meteor.isServer,
function (logger) {
var barid = c.insert({thing: "stuff"});
var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok"});
async function (logger) {
var barid = await c.insert({thing: "stuff"});
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var handle = c.find(fooid).observeChanges(logger);
var handle = await c.find(fooid).observeChanges(logger);
if (added === 'added') {
logger.expectResult(added, [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
} else {
logger.expectResult(added,
[fooid, {noodles: "good", bacon: "bad", apples: "ok"}, null]);
}
c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
logger.expectResult("changed",
[fooid, {noodles: "alright", potatoes: "tasty", bacon: undefined}]);
c.remove(fooid);
await c.remove(fooid);
logger.expectResult("removed", [fooid]);
logger.expectNoResult(() => {
c.remove(barid);
c.insert({noodles: "good", bacon: "bad", apples: "ok"});
await logger.expectNoResult(async () => {
await c.remove(barid);
await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
});
handle.stop();
await handle.stop();
const badCursor = c.find({}, {fields: {noodles: 1, _id: false}});
test.throws(function () {
badCursor.observeChanges(logger);
await test.throwsAsync(function () {
return badCursor.observeChanges(logger);
});
onComplete();
});
});
});
});
Tinytest.addAsync("observeChanges - callback isolation", function (test, onComplete) {
Tinytest.addAsync("observeChanges - callback isolation", async function (test) {
var c = makeCollection();
withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) {
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handles = [];
var cursor = c.find();
handles.push(cursor.observeChanges(logger));
handles.push(await cursor.observeChanges(logger));
// fields-tampering observer
handles.push(cursor.observeChanges({
handles.push(await cursor.observeChanges({
added: function(id, fields) {
fields.apples = 'green';
},
@@ -74,193 +72,184 @@ Tinytest.addAsync("observeChanges - callback isolation", function (test, onCompl
},
}));
var fooid = c.insert({apples: "ok"});
var fooid = await c.insert({apples: "ok"});
logger.expectResult("added", [fooid, {apples: "ok"}]);
c.update(fooid, {apples: "not ok"});
await c.update(fooid, {apples: "not ok"});
logger.expectResult("changed", [fooid, {apples: "not ok"}]);
test.equal(c.findOne(fooid).apples, "not ok");
test.equal((await c.findOne(fooid)).apples, "not ok");
_.each(handles, function(handle) { handle.stop(); });
onComplete();
await Promise.all(handles.map(h => h.stop()));
});
});
Tinytest.addAsync("observeChanges - single id - initial adds", function (test, onComplete) {
Tinytest.addAsync("observeChanges - single id - initial adds", async function (test) {
var c = makeCollection();
withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) {
var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var handle = c.find(fooid).observeChanges(logger);
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var handle = await c.find(fooid).observeChanges(logger);
logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
logger.expectNoResult();
handle.stop();
onComplete();
await logger.expectNoResult();
await handle.stop();
});
});
Tinytest.addAsync("observeChanges - unordered - initial adds", function (test, onComplete) {
Tinytest.addAsync("observeChanges - unordered - initial adds", async function (test) {
var c = makeCollection();
withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) {
var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var barid = c.insert({noodles: "good", bacon: "weird", apples: "ok"});
var handle = c.find().observeChanges(logger);
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var barid = await c.insert({noodles: "good", bacon: "weird", apples: "ok"});
var handle = await c.find().observeChanges(logger);
logger.expectResultUnordered([
{callback: "added",
args: [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]},
{callback: "added",
args: [barid, {noodles: "good", bacon: "weird", apples: "ok"}]}
]);
logger.expectNoResult();
handle.stop();
onComplete();
await logger.expectNoResult();
await handle.stop();
});
});
Tinytest.addAsync("observeChanges - unordered - basics", function (test, onComplete) {
Tinytest.addAsync("observeChanges - unordered - basics", async function (test) {
var c = makeCollection();
withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) {
var handle = c.find().observeChanges(logger);
var barid = c.insert({thing: "stuff"});
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find().observeChanges(logger);
var barid = await c.insert({thing: "stuff"});
logger.expectResultOnly("added", [barid, {thing: "stuff"}]);
var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
logger.expectResultOnly("changed",
[fooid, {noodles: "alright", potatoes: "tasty", bacon: undefined}]);
c.remove(fooid);
await c.remove(fooid);
logger.expectResultOnly("removed", [fooid]);
c.remove(barid);
await c.remove(barid);
logger.expectResultOnly("removed", [barid]);
fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok"});
fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
logger.expectNoResult();
handle.stop();
onComplete();
await logger.expectNoResult();
await handle.stop();
});
});
if (Meteor.isServer) {
Tinytest.addAsync("observeChanges - unordered - specific fields", function (test, onComplete) {
Tinytest.addAsync("observeChanges - unordered - specific fields", async function (test, onComplete) {
var c = makeCollection();
withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) {
var handle = c.find({}, {fields:{noodles: 1, bacon: 1}}).observeChanges(logger);
var barid = c.insert({thing: "stuff"});
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find({}, {fields:{noodles: 1, bacon: 1}}).observeChanges(logger);
var barid = await c.insert({thing: "stuff"});
logger.expectResultOnly("added", [barid, {}]);
var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad"}]);
c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
logger.expectResultOnly("changed",
[fooid, {noodles: "alright", bacon: undefined}]);
c.update(fooid, {noodles: "alright", potatoes: "meh", apples: "ok"});
c.remove(fooid);
await c.update(fooid, {noodles: "alright", potatoes: "meh", apples: "ok"});
await c.remove(fooid);
logger.expectResultOnly("removed", [fooid]);
c.remove(barid);
await c.remove(barid);
logger.expectResultOnly("removed", [barid]);
fooid = c.insert({noodles: "good", bacon: "bad"});
fooid = await c.insert({noodles: "good", bacon: "bad"});
logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad"}]);
logger.expectNoResult();
handle.stop();
onComplete();
await logger.expectNoResult();
await handle.stop();
});
});
Tinytest.addAsync("observeChanges - unordered - specific fields + selector on excluded fields", function (test, onComplete) {
Tinytest.addAsync("observeChanges - unordered - specific fields + selector on excluded fields", async function (test) {
var c = makeCollection();
withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) {
var handle = c.find({ mac: 1, cheese: 2 },
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find({ mac: 1, cheese: 2 },
{fields:{noodles: 1, bacon: 1, eggs: 1}}).observeChanges(logger);
var barid = c.insert({thing: "stuff", mac: 1, cheese: 2});
var barid = await c.insert({thing: "stuff", mac: 1, cheese: 2});
logger.expectResultOnly("added", [barid, {}]);
var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok", mac: 1, cheese: 2});
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok", mac: 1, cheese: 2});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad"}]);
c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok", mac: 1, cheese: 2});
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok", mac: 1, cheese: 2});
logger.expectResultOnly("changed",
[fooid, {noodles: "alright", bacon: undefined}]);
// Doesn't get update event, since modifies only hidden fields
logger.expectNoResult(() => {
await logger.expectNoResult(() =>
c.update(fooid, {
noodles: "alright",
potatoes: "meh",
apples: "ok",
mac: 1,
cheese: 2
});
});
})
);
c.remove(fooid);
await c.remove(fooid);
logger.expectResultOnly("removed", [fooid]);
c.remove(barid);
await c.remove(barid);
logger.expectResultOnly("removed", [barid]);
fooid = c.insert({noodles: "good", bacon: "bad", mac: 1, cheese: 2});
fooid = await c.insert({noodles: "good", bacon: "bad", mac: 1, cheese: 2});
logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad"}]);
logger.expectNoResult();
await logger.expectNoResult();
handle.stop();
onComplete();
});
});
}
Tinytest.addAsync("observeChanges - unordered - specific fields + modify on excluded fields", function (test, onComplete) {
Tinytest.addAsync("observeChanges - unordered - specific fields + modify on excluded fields", async function (test, onComplete) {
var c = makeCollection();
withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) {
var handle = c.find({ mac: 1, cheese: 2 },
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find({ mac: 1, cheese: 2 },
{fields:{noodles: 1, bacon: 1, eggs: 1}}).observeChanges(logger);
var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok", mac: 1, cheese: 2});
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok", mac: 1, cheese: 2});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad"}]);
// Noodles go into shadow, mac appears as eggs
c.update(fooid, {$rename: { noodles: 'shadow', apples: 'eggs' }});
await c.update(fooid, {$rename: { noodles: 'shadow', apples: 'eggs' }});
logger.expectResultOnly("changed",
[fooid, {eggs:"ok", noodles: undefined}]);
c.remove(fooid);
await c.remove(fooid);
logger.expectResultOnly("removed", [fooid]);
logger.expectNoResult();
handle.stop();
onComplete();
await logger.expectNoResult();
await handle.stop();
});
});
Tinytest.addAsync(
"observeChanges - unordered - unset parent of observed field",
function (test, onComplete) {
async function (test) {
var c = makeCollection();
withCallbackLogger(
await withCallbackLogger(
test, ['added', 'changed', 'removed'], Meteor.isServer,
function (logger) {
var handle = c.find({}, {fields: {'type.name': 1}}).observeChanges(logger);
var id = c.insert({ type: { name: 'foobar' } });
async function (logger) {
var handle = await c.find({}, {fields: {'type.name': 1}}).observeChanges(logger);
var id = await c.insert({ type: { name: 'foobar' } });
logger.expectResultOnly('added', [id, { type: { name: 'foobar' } }]);
c.update(id, { $unset: { type: 1 } });
test.equal(c.find().fetch(), [{ _id: id }]);
await c.update(id, { $unset: { type: 1 } });
test.equal(await c.find().fetch(), [{ _id: id }]);
logger.expectResultOnly('changed', [id, { type: undefined }]);
handle.stop();
onComplete();
await handle.stop();
}
);
}
@@ -268,34 +257,33 @@ Tinytest.addAsync(
Tinytest.addAsync("observeChanges - unordered - enters and exits result set through change", function (test, onComplete) {
Tinytest.addAsync("observeChanges - unordered - enters and exits result set through change", async function (test) {
var c = makeCollection();
withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) {
var handle = c.find({noodles: "good"}).observeChanges(logger);
var barid = c.insert({thing: "stuff"});
await withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, async function (logger) {
var handle = await c.find({noodles: "good"}).observeChanges(logger);
var barid = await c.insert({thing: "stuff"});
var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok"});
var fooid = await c.insert({noodles: "good", bacon: "bad", apples: "ok"});
logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad", apples: "ok"}]);
c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
await c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok"});
logger.expectResultOnly("removed",
[fooid]);
c.remove(fooid);
c.remove(barid);
await c.remove(fooid);
await c.remove(barid);
fooid = c.insert({noodles: "ok", bacon: "bad", apples: "ok"});
c.update(fooid, {noodles: "good", potatoes: "tasty", apples: "ok"});
fooid = await c.insert({noodles: "ok", bacon: "bad", apples: "ok"});
await c.update(fooid, {noodles: "good", potatoes: "tasty", apples: "ok"});
logger.expectResult("added", [fooid, {noodles: "good", potatoes: "tasty", apples: "ok"}]);
logger.expectNoResult();
handle.stop();
onComplete();
await logger.expectNoResult();
await handle.stop();
});
});
if (Meteor.isServer) {
testAsyncMulti("observeChanges - tailable", [
function (test, expect) {
async function (test, expect) {
var self = this;
var collName = "cap_" + Random.id();
var coll = new Mongo.Collection(collName);
@@ -314,7 +302,7 @@ if (Meteor.isServer) {
self.expects.push(expect());
var cursor = coll.find({y: {$ne: 7}}, {tailable: true});
self.handle = cursor.observeChanges({
self.handle = await cursor.observeChanges({
added: function (id, fields) {
self.xs.push(fields.x);
test.notEqual(self.expects.length, 0);
@@ -363,11 +351,11 @@ if (Meteor.isServer) {
testAsyncMulti("observeChanges - bad query", [
function (test, expect) {
async function (test, expect) {
var c = makeCollection();
var observeThrows = function () {
test.throws(function () {
c.find({__id: {$in: null}}).observeChanges({
return test.throwsAsync(function () {
return c.find({__id: {$in: null}}).observeChanges({
added: function () {
test.fail("added shouldn't be called");
}
@@ -376,49 +364,31 @@ testAsyncMulti("observeChanges - bad query", [
};
if (Meteor.isClient) {
observeThrows();
await observeThrows();
return;
}
// Test that if two copies of the same bad observeChanges run in parallel
// and are de-duped, both observeChanges calls will throw.
var Fiber = Npm.require('fibers');
var Future = Npm.require('fibers/future');
var f1 = new Future;
var f2 = new Future;
Fiber(function () {
// The observeChanges call in here will yield when we talk to mongod,
// which will allow the second Fiber to start and observe a duplicate
// query.
observeThrows();
f1['return']();
}).run();
Fiber(function () {
test.isFalse(f1.isResolved()); // first observe hasn't thrown yet
observeThrows();
f2['return']();
}).run();
f1.wait();
f2.wait();
await Promise.all(['ob1', 'ob2'].map(() => observeThrows()));
}
]);
if (Meteor.isServer) {
Tinytest.addAsync(
"observeChanges - EnvironmentVariable",
function (test, onComplete) {
async function (test) {
var c = makeCollection();
var environmentVariable = new Meteor.EnvironmentVariable;
environmentVariable.withValue(true, function() {
var handle = c.find({}, { fields: { 'type.name': 1 }}).observeChanges({
await environmentVariable.withValue(true, async function() {
var handle = await c.find({}, { fields: { 'type.name': 1 }}).observeChanges({
added: function() {
test.isTrue(environmentVariable.get());
handle.stop();
onComplete();
}
});
});
c.insert({ type: { name: 'foobar' } });
await c.insert({ type: { name: 'foobar' } });
}
);
}

View File

@@ -1,58 +1,53 @@
var Future = Npm.require('fibers/future');
let nextObserveHandleId = 1;
ObserveMultiplexer = function (options) {
var self = this;
if (!options || !_.has(options, 'ordered'))
throw Error("must specified ordered");
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-multiplexers", 1);
self._ordered = options.ordered;
self._onStop = options.onStop || function () {};
self._queue = new Meteor._SynchronousQueue();
self._handles = {};
self._readyFuture = new Future;
self._cache = new LocalCollection._CachingChangeObserver({
ordered: options.ordered});
// Number of addHandleAndSendInitialAdds tasks scheduled but not yet
// running. removeHandle uses this to know if it's time to call the onStop
// callback.
self._addHandleTasksScheduledButNotPerformed = 0;
_.each(self.callbackNames(), function (callbackName) {
self[callbackName] = function (/* ... */) {
self._applyCallback(callbackName, _.toArray(arguments));
};
});
};
_.extend(ObserveMultiplexer.prototype, {
addHandleAndSendInitialAdds: function (handle) {
var self = this;
// Check this before calling runTask (even though runTask does the same
// check) so that we don't leak an ObserveMultiplexer on error by
// incrementing _addHandleTasksScheduledButNotPerformed and never
// decrementing it.
if (!self._queue.safeToRunTask())
throw new Error("Can't call observeChanges from an observe callback on the same query");
++self._addHandleTasksScheduledButNotPerformed;
ObserveMultiplexer = class {
constructor({ ordered, onStop = () => {} } = {}) {
if (ordered === undefined) throw Error("must specify ordered");
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-handles", 1);
"mongo-livedata", "observe-multiplexers", 1);
self._queue.runTask(function () {
this._ordered = ordered;
this._onStop = onStop;
this._queue = new Meteor._AsynchronousQueue();
this._handles = {};
this._resolver = null;
this._readyPromise = new Promise(r => this._resolver = r).then(() => this._isReady = true);
this._cache = new LocalCollection._CachingChangeObserver({
ordered});
// Number of addHandleAndSendInitialAdds tasks scheduled but not yet
// running. removeHandle uses this to know if it's time to call the onStop
// callback.
this._addHandleTasksScheduledButNotPerformed = 0;
const self = this;
this.callbackNames().forEach(callbackName => {
this[callbackName] = function(/* ... */) {
self._applyCallback(callbackName, _.toArray(arguments));
};
});
}
addHandleAndSendInitialAdds(handle) {
return this._addHandleAndSendInitialAdds(handle);
}
async _addHandleAndSendInitialAdds(handle) {
++this._addHandleTasksScheduledButNotPerformed;
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-handles", 1);
const self = this;
await this._queue.runTask(function () {
self._handles[handle._id] = handle;
// Send out whatever adds we have so far (whether or not we the
// Send out whatever adds we have so far (whether the
// multiplexer is ready).
self._sendAdds(handle);
--self._addHandleTasksScheduledButNotPerformed;
});
// *outside* the task, since otherwise we'd deadlock
self._readyFuture.wait();
},
await this._readyPromise;
}
// Remove an observe handle. If it was the last observe handle, call the
// onStop callback; you cannot add any more observe handles after this.
@@ -60,55 +55,58 @@ _.extend(ObserveMultiplexer.prototype, {
// This is not synchronized with polls and handle additions: this means that
// you can safely call it from within an observe callback, but it also means
// that we have to be careful when we iterate over _handles.
removeHandle: function (id) {
var self = this;
async removeHandle(id) {
// This should not be possible: you can only call removeHandle by having
// access to the ObserveHandle, which isn't returned to user code until the
// multiplex is ready.
if (!self._ready())
if (!this._ready())
throw new Error("Can't remove handles until the multiplex is ready");
delete self._handles[id];
delete this._handles[id];
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-handles", -1);
"mongo-livedata", "observe-handles", -1);
if (_.isEmpty(self._handles) &&
self._addHandleTasksScheduledButNotPerformed === 0) {
self._stop();
if (_.isEmpty(this._handles) &&
this._addHandleTasksScheduledButNotPerformed === 0) {
await this._stop();
}
},
_stop: function (options) {
var self = this;
}
async _stop(options) {
options = options || {};
// It shouldn't be possible for us to stop when all our handles still
// haven't been returned from observeChanges!
if (! self._ready() && ! options.fromQueryError)
if (! this._ready() && ! options.fromQueryError)
throw Error("surprising _stop: not ready");
// Call stop callback (which kills the underlying process which sends us
// callbacks and removes us from the connection's dictionary).
self._onStop();
await this._onStop();
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-multiplexers", -1);
"mongo-livedata", "observe-multiplexers", -1);
// Cause future addHandleAndSendInitialAdds calls to throw (but the onStop
// callback should make our connection forget about us).
self._handles = null;
},
this._handles = null;
}
// Allows all addHandleAndSendInitialAdds calls to return, once all preceding
// adds have been processed. Does not block.
ready: function () {
var self = this;
self._queue.queueTask(function () {
ready() {
const self = this;
this._queue.queueTask(function () {
if (self._ready())
throw Error("can't make ObserveMultiplex ready twice!");
self._readyFuture.return();
if (!self._resolver) {
throw new Error("Missing resolver");
}
self._resolver();
self._isReady = true;
});
},
}
// If trying to execute the query results in an error, call this. This is
// intended for permanent errors, not transient network errors that could be
@@ -116,47 +114,45 @@ _.extend(ObserveMultiplexer.prototype, {
// that meant that you managed to run the query once. It will stop this
// ObserveMultiplex and cause addHandleAndSendInitialAdds calls (and thus
// observeChanges calls) to throw the error.
queryError: function (err) {
async queryError(err) {
var self = this;
self._queue.runTask(function () {
await this._queue.runTask(function () {
if (self._ready())
throw Error("can't claim query has an error after it worked!");
self._stop({fromQueryError: true});
self._readyFuture.throw(err);
throw err;
});
},
}
// Calls "cb" once the effects of all "ready", "addHandleAndSendInitialAdds"
// and observe callbacks which came before this call have been propagated to
// all handles. "ready" must have already been called on this multiplexer.
onFlush: function (cb) {
onFlush(cb) {
var self = this;
self._queue.queueTask(function () {
return this._queue.queueTask(async function () {
if (!self._ready())
throw Error("only call onFlush on a multiplexer that will be ready");
cb();
await cb();
});
},
callbackNames: function () {
var self = this;
if (self._ordered)
}
callbackNames() {
if (this._ordered)
return ["addedBefore", "changed", "movedBefore", "removed"];
else
return ["added", "changed", "removed"];
},
_ready: function () {
return this._readyFuture.isResolved();
},
_applyCallback: function (callbackName, args) {
var self = this;
self._queue.queueTask(function () {
}
_ready() {
return !!this._isReady;
}
_applyCallback(callbackName, args) {
const self = this;
this._queue.queueTask(async function () {
// If we stopped in the meantime, do nothing.
if (!self._handles)
return;
// First, apply the change to the cache.
self._cache.applyChange[callbackName].apply(null, args);
await self._cache.applyChange[callbackName].apply(null, args);
// If we haven't finished the initial adds, then we should only be getting
// adds.
if (!self._ready() &&
@@ -169,73 +165,67 @@ _.extend(ObserveMultiplexer.prototype, {
// can continue until these are done. (But we do have to be careful to not
// use a handle that got removed, because removeHandle does not use the
// queue; thus, we iterate over an array of keys that we control.)
_.each(_.keys(self._handles), function (handleId) {
const toAwait = Object.keys(self._handles).map(async (handleId) => {
var handle = self._handles && self._handles[handleId];
if (!handle)
return;
var callback = handle['_' + callbackName];
// clone arguments so that callbacks can mutate their arguments
callback && callback.apply(null,
handle.nonMutatingCallbacks ? args : EJSON.clone(args));
callback && await callback.apply(null,
handle.nonMutatingCallbacks ? args : EJSON.clone(args));
});
await Promise.all(toAwait);
});
},
}
// Sends initial adds to a handle. It should only be called from within a task
// (the task that is processing the addHandleAndSendInitialAdds call). It
// synchronously invokes the handle's added or addedBefore; there's no need to
// flush the queue afterwards to ensure that the callbacks get out.
_sendAdds: function (handle) {
var self = this;
if (self._queue.safeToRunTask())
throw Error("_sendAdds may only be called from within a task!");
var add = self._ordered ? handle._addedBefore : handle._added;
async _sendAdds(handle) {
var add = this._ordered ? handle._addedBefore : handle._added;
if (!add)
return;
// note: docs may be an _IdMap or an OrderedDict
self._cache.docs.forEach(function (doc, id) {
if (!_.has(self._handles, handle._id))
await this._cache.docs.forEachAsync(async (doc, id) => {
if (!_.has(this._handles, handle._id))
throw Error("handle got removed before sending initial adds!");
const { _id, ...fields } = handle.nonMutatingCallbacks ? doc
: EJSON.clone(doc);
if (self._ordered)
add(id, fields, null); // we're going in order, so add at end
: EJSON.clone(doc);
if (this._ordered)
await add(id, fields, null); // we're going in order, so add at end
else
add(id, fields);
await add(id, fields);
});
}
});
var nextObserveHandleId = 1;
};
// When the callbacks do not mutate the arguments, we can skip a lot of data clones
ObserveHandle = function (multiplexer, callbacks, nonMutatingCallbacks = false) {
var self = this;
// The end user is only supposed to call stop(). The other fields are
// accessible to the multiplexer, though.
self._multiplexer = multiplexer;
_.each(multiplexer.callbackNames(), function (name) {
if (callbacks[name]) {
self['_' + name] = callbacks[name];
} else if (name === "addedBefore" && callbacks.added) {
// Special case: if you specify "added" and "movedBefore", you get an
// ordered observe where for some reason you don't get ordering data on
// the adds. I dunno, we wrote tests for it, there must have been a
// reason.
self._addedBefore = function (id, fields, before) {
callbacks.added(id, fields);
};
}
});
self._stopped = false;
self._id = nextObserveHandleId++;
self.nonMutatingCallbacks = nonMutatingCallbacks;
};
ObserveHandle.prototype.stop = function () {
var self = this;
if (self._stopped)
return;
self._stopped = true;
self._multiplexer.removeHandle(self._id);
ObserveHandle = class {
constructor(multiplexer, callbacks, nonMutatingCallbacks = false) {
this._multiplexer = multiplexer;
multiplexer.callbackNames().forEach((name) => {
if (callbacks[name]) {
this['_' + name] = callbacks[name];
} else if (name === "addedBefore" && callbacks.added) {
// Special case: if you specify "added" and "movedBefore", you get an
// ordered observe where for some reason you don't get ordering data on
// the adds. I dunno, we wrote tests for it, there must have been a
// reason.
this._addedBefore = function (id, fields, before) {
callbacks.added(id, fields);
};
}
});
this._stopped = false;
this._id = nextObserveHandleId++;
this.nonMutatingCallbacks = nonMutatingCallbacks;
}
async stop() {
if (this._stopped) return;
this._stopped = true;
await this._multiplexer.removeHandle(this._id);
}
};

View File

@@ -1,7 +1,5 @@
import { oplogV2V1Converter } from "./oplog_v2_converter";
var Future = Npm.require('fibers/future');
var PHASE = {
QUERYING: "QUERYING",
FETCHING: "FETCHING",
@@ -12,9 +10,9 @@ var PHASE = {
// enclosing call to finishIfNeedToPollQuery.
var SwitchedToQuery = function () {};
var finishIfNeedToPollQuery = function (f) {
return function () {
return async function () {
try {
f.apply(this, arguments);
await f.apply(this, arguments);
} catch (e) {
if (!(e instanceof SwitchedToQuery))
throw e;
@@ -111,7 +109,7 @@ OplogObserveDriver = function (options) {
// behind, say), re-poll.
self._stopHandles.push(self._mongoHandle._oplogHandle.onSkippedEntries(
finishIfNeedToPollQuery(function () {
self._needToPollQuery();
return self._needToPollQuery();
})
));
@@ -124,13 +122,13 @@ OplogObserveDriver = function (options) {
// Note: this call is not allowed to block on anything (especially
// on waiting for oplog entries to catch up) because that will block
// onOplogEntry!
self._needToPollQuery();
return self._needToPollQuery();
} else {
// All other operators should be handled depending on phase
if (self._phase === PHASE.QUERYING) {
self._handleOplogEntryQuerying(op);
return self._handleOplogEntryQuerying(op);
} else {
self._handleOplogEntrySteadyOrFetching(op);
return self._handleOplogEntrySteadyOrFetching(op);
}
}
}));
@@ -140,7 +138,7 @@ OplogObserveDriver = function (options) {
// XXX ordering w.r.t. everything else?
self._stopHandles.push(listenAll(
self._cursorDescription, function (notification) {
self._cursorDescription, function () {
// If we're not in a pre-fire write fence, we don't have to do anything.
var fence = DDPServer._CurrentWriteFence.get();
if (!fence || fence.fired)
@@ -154,15 +152,15 @@ OplogObserveDriver = function (options) {
fence._oplogObserveDrivers = {};
fence._oplogObserveDrivers[self._id] = self;
fence.onBeforeFire(function () {
fence.onBeforeFire(async function () {
var drivers = fence._oplogObserveDrivers;
delete fence._oplogObserveDrivers;
// This fence cannot fire until we've caught up to "this point" in the
// oplog, and all observers made it back to the steady state.
self._mongoHandle._oplogHandle.waitUntilCaughtUp();
await self._mongoHandle._oplogHandle.waitUntilCaughtUp();
_.each(drivers, function (driver) {
for (const driver of Object.values(drivers)) {
if (driver._stopped)
return;
@@ -171,13 +169,11 @@ OplogObserveDriver = function (options) {
// Make sure that all of the callbacks have made it through the
// multiplexer and been delivered to ObserveHandles before committing
// writes.
driver._multiplexer.onFlush(function () {
write.committed();
});
await driver._multiplexer.onFlush(write.committed);
} else {
driver._writesToCommitWhenWeReachSteady.push(write);
}
});
}
});
}
));
@@ -186,17 +182,17 @@ OplogObserveDriver = function (options) {
// oplog entry that got rolled back.
self._stopHandles.push(self._mongoHandle._onFailover(finishIfNeedToPollQuery(
function () {
self._needToPollQuery();
return self._needToPollQuery();
})));
// Give _observeChanges a chance to add the new ObserveHandle to our
// multiplexer, so that the added calls get streamed.
Meteor.defer(finishIfNeedToPollQuery(function () {
self._runInitialQuery();
}));
};
_.extend(OplogObserveDriver.prototype, {
_init: function() {
const self = this;
// Give _observeChanges a chance to add the new ObserveHandle to our
// multiplexer, so that the added calls get streamed.
return self._runInitialQuery();
},
_addPublished: function (id, doc) {
var self = this;
Meteor._noYieldsAllowed(function () {
@@ -488,7 +484,7 @@ _.extend(OplogObserveDriver.prototype, {
self._registerPhaseChange(PHASE.FETCHING);
// Defer, because nothing called from the oplog entry handler may yield,
// but fetch() yields.
Meteor.defer(finishIfNeedToPollQuery(function () {
Meteor.defer(finishIfNeedToPollQuery(async function () {
while (!self._stopped && !self._needToFetch.empty()) {
if (self._phase === PHASE.QUERYING) {
// While fetching, we decided to go into QUERYING mode, and then we
@@ -505,7 +501,9 @@ _.extend(OplogObserveDriver.prototype, {
var thisGeneration = ++self._fetchGeneration;
self._needToFetch = new LocalCollection._IdMap;
var waiting = 0;
var fut = new Future;
let promiseResolver = null;
const awaitablePromise = new Promise(r => promiseResolver = r);
// This loop is safe, because _currentlyFetching will not be updated
// during this loop (in fact, it is never mutated).
self._currentlyFetching.forEach(function (op, id) {
@@ -538,11 +536,11 @@ _.extend(OplogObserveDriver.prototype, {
// this is safe (ie, we won't call fut.return() before the
// forEach is done).
if (waiting === 0)
fut.return();
promiseResolver();
}
}));
});
fut.wait();
await awaitablePromise;
// Exit now if we've had a _pollQuery call (here or in another fiber).
if (self._phase === PHASE.QUERYING)
return;
@@ -551,20 +549,20 @@ _.extend(OplogObserveDriver.prototype, {
// We're done fetching, so we can be steady, unless we've had a
// _pollQuery call (here or in another fiber).
if (self._phase !== PHASE.QUERYING)
self._beSteady();
await self._beSteady();
}));
});
},
_beSteady: function () {
_beSteady: async function () {
var self = this;
Meteor._noYieldsAllowed(function () {
await Meteor._noYieldsAllowed(async function () {
self._registerPhaseChange(PHASE.STEADY);
var writes = self._writesToCommitWhenWeReachSteady;
self._writesToCommitWhenWeReachSteady = [];
self._multiplexer.onFlush(function () {
_.each(writes, function (w) {
w.committed();
});
await self._multiplexer.onFlush(async function () {
for (const w of writes) {
await w.committed();
}
});
});
},
@@ -658,22 +656,27 @@ _.extend(OplogObserveDriver.prototype, {
}
});
},
// Yields!
_runInitialQuery: function () {
async _runInitialQueryAsync() {
var self = this;
if (self._stopped)
throw new Error("oplog stopped surprisingly early");
self._runQuery({initial: true}); // yields
await self._runQuery({initial: true}); // yields
if (self._stopped)
return; // can happen on queryError
// Allow observeChanges calls to return. (After this, it's possible for
// stop() to be called.)
self._multiplexer.ready();
await self._multiplexer.ready();
self._doneQuerying(); // yields
await self._doneQuerying(); // yields
},
// Yields!
_runInitialQuery: function () {
return this._runInitialQueryAsync();
},
// In various circumstances, we may just want to stop processing the oplog and
@@ -704,15 +707,15 @@ _.extend(OplogObserveDriver.prototype, {
// Defer so that we don't yield. We don't need finishIfNeedToPollQuery
// here because SwitchedToQuery is not thrown in QUERYING mode.
Meteor.defer(function () {
self._runQuery();
self._doneQuerying();
Meteor.defer(async function () {
await self._runQuery();
await self._doneQuerying();
});
});
},
// Yields!
_runQuery: function (options) {
async _runQueryAsync(options) {
var self = this;
options = options || {};
var newResults, newBuffer;
@@ -735,7 +738,7 @@ _.extend(OplogObserveDriver.prototype, {
// buffer if such is needed.
var cursor = self._cursorForQuery({ limit: self._limit * 2 });
try {
cursor.forEach(function (doc, i) { // yields
await cursor.forEach(function (doc, i) { // yields
if (!self._limit || i < self._limit) {
newResults.set(doc._id, doc);
} else {
@@ -750,14 +753,14 @@ _.extend(OplogObserveDriver.prototype, {
// successfully. Probably it's a bad selector or something, so we
// should NOT retry. Instead, we should halt the observe (which ends
// up calling `stop` on us).
self._multiplexer.queryError(e);
await self._multiplexer.queryError(e);
return;
}
// During failover (eg) if we get an exception we should log and retry
// instead of crashing.
Meteor._debug("Got exception while polling query", e);
Meteor._sleepForMs(100);
await Meteor._sleepForMs(100);
}
}
@@ -767,6 +770,11 @@ _.extend(OplogObserveDriver.prototype, {
self._publishNewResults(newResults, newBuffer);
},
// Yields!
_runQuery: function (options) {
return this._runQueryAsync(options);
},
// Transitions to QUERYING and runs another query, or (if already in QUERYING)
// ensures that we will query again later.
//
@@ -799,23 +807,25 @@ _.extend(OplogObserveDriver.prototype, {
},
// Yields!
_doneQuerying: function () {
_doneQuerying: async function () {
var self = this;
if (self._stopped)
return;
self._mongoHandle._oplogHandle.waitUntilCaughtUp(); // yields
await self._mongoHandle._oplogHandle.waitUntilCaughtUp();
if (self._stopped)
return;
if (self._phase !== PHASE.QUERYING)
throw Error("Phase unexpectedly " + self._phase);
Meteor._noYieldsAllowed(function () {
await Meteor._noYieldsAllowed(async function () {
if (self._requeryWhenDoneThisQuery) {
self._requeryWhenDoneThisQuery = false;
self._pollQuery();
} else if (self._needToFetch.empty()) {
self._beSteady();
await self._beSteady();
} else {
self._fetchModifiedDocuments();
}
@@ -916,23 +926,20 @@ _.extend(OplogObserveDriver.prototype, {
//
// It's important to check self._stopped after every call in this file that
// can yield!
stop: function () {
_stop: async function() {
var self = this;
if (self._stopped)
return;
self._stopped = true;
_.each(self._stopHandles, function (handle) {
handle.stop();
});
// Note: we *don't* use multiplexer.onFlush here because this stop
// callback is actually invoked by the multiplexer itself when it has
// determined that there are no handles left. So nothing is actually going
// to get flushed (and it's probably not valid to call methods on the
// dying multiplexer).
_.each(self._writesToCommitWhenWeReachSteady, function (w) {
w.committed(); // maybe yields?
});
for (const w of self._writesToCommitWhenWeReachSteady) {
await w.committed();
}
self._writesToCommitWhenWeReachSteady = null;
// Proactively drop references to potentially big things.
@@ -944,7 +951,15 @@ _.extend(OplogObserveDriver.prototype, {
self._listenersHandle = null;
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-drivers-oplog", -1);
"mongo-livedata", "observe-drivers-oplog", -1);
for await (const handle of self._stopHandles) {
await handle.stop();
}
},
stop: function() {
const self = this;
return self._stop();
},
_registerPhaseChange: function (phase) {

View File

@@ -1,5 +1,3 @@
var Future = Npm.require('fibers/future');
import { NpmModuleMongodb } from "meteor/npm-mongo";
const { Long } = NpmModuleMongodb;
@@ -8,10 +6,6 @@ OPLOG_COLLECTION = 'oplog.rs';
var TOO_FAR_BEHIND = process.env.METEOR_OPLOG_TOO_FAR_BEHIND || 2000;
var TAIL_TIMEOUT = +process.env.METEOR_OPLOG_TAIL_TIMEOUT || 30000;
var showTS = function (ts) {
return "Timestamp(" + ts.getHighBits() + ", " + ts.getLowBits() + ")";
};
idForOp = function (op) {
if (op.op === 'd')
return op.o._id;
@@ -35,7 +29,8 @@ OplogHandle = function (oplogUrl, dbName) {
self._oplogTailConnection = null;
self._stopped = false;
self._tailHandle = null;
self._readyFuture = new Future();
self._readyPromiseResolver = null;
self._readyPromise = new Promise(r => self._readyPromiseResolver = r);
self._crossbar = new DDPServer._Crossbar({
factPackage: "mongo-livedata", factName: "oplog-watchers"
});
@@ -72,7 +67,7 @@ OplogHandle = function (oplogUrl, dbName) {
// incremented to be past its timestamp by the worker fiber.
//
// XXX use a priority queue or something else that's faster than an array
self._catchingUpFutures = [];
self._catchingUpResolvers = [];
self._lastProcessedTS = null;
self._onSkippedEntriesHook = new Hook({
@@ -82,7 +77,8 @@ OplogHandle = function (oplogUrl, dbName) {
self._entryQueue = new Meteor._DoubleEndedQueue();
self._workerActive = false;
self._startTailing();
const shouldAwait = self._startTailing();
//TODO Why wait?
};
Object.assign(OplogHandle.prototype, {
@@ -95,13 +91,13 @@ Object.assign(OplogHandle.prototype, {
self._tailHandle.stop();
// XXX should close connections too
},
onOplogEntry: function (trigger, callback) {
_onOplogEntry: async function(trigger, callback) {
var self = this;
if (self._stopped)
throw new Error("Called onOplogEntry on stopped handle!");
// Calling onOplogEntry requires us to wait for the tailing to be ready.
self._readyFuture.wait();
await self._readyPromise;
var originalCallback = callback;
callback = Meteor.bindEnvironment(function (notification) {
@@ -116,6 +112,9 @@ Object.assign(OplogHandle.prototype, {
}
};
},
onOplogEntry: function (trigger, callback) {
return this._onOplogEntry(trigger, callback);
},
// Register a callback to be invoked any time we skip oplog entries (eg,
// because we are too far behind).
onSkippedEntries: function (callback) {
@@ -124,19 +123,15 @@ Object.assign(OplogHandle.prototype, {
throw new Error("Called onSkippedEntries on stopped handle!");
return self._onSkippedEntriesHook.register(callback);
},
// Calls `callback` once the oplog has been processed up to a point that is
// roughly "now": specifically, once we've processed all ops that are
// currently visible.
// XXX become convinced that this is actually safe even if oplogConnection
// is some kind of pool
waitUntilCaughtUp: function () {
async _waitUntilCaughtUp() {
var self = this;
if (self._stopped)
throw new Error("Called waitUntilCaughtUp on stopped handle!");
// Calling waitUntilCaughtUp requries us to wait for the oplog connection to
// be ready.
self._readyFuture.wait();
await self._readyPromise;
var lastEntry;
while (!self._stopped) {
@@ -144,15 +139,15 @@ Object.assign(OplogHandle.prototype, {
// tailing selector (ie, we need to specify the DB name) or else we might
// find a TS that won't show up in the actual tail stream.
try {
lastEntry = self._oplogLastEntryConnection.findOne(
OPLOG_COLLECTION, self._baseOplogSelector,
{fields: {ts: 1}, sort: {$natural: -1}});
lastEntry = await self._oplogLastEntryConnection.findOne(
OPLOG_COLLECTION, self._baseOplogSelector,
{fields: {ts: 1}, sort: {$natural: -1}});
break;
} catch (e) {
// During failover (eg) if we get an exception we should log and retry
// instead of crashing.
Meteor._debug("Got exception while reading last entry", e);
Meteor._sleepForMs(100);
await Meteor._sleepForMs(100);
}
}
@@ -177,21 +172,32 @@ Object.assign(OplogHandle.prototype, {
// Insert the future into our list. Almost always, this will be at the end,
// but it's conceivable that if we fail over from one primary to another,
// the oplog entries we see will go backwards.
var insertAfter = self._catchingUpFutures.length;
while (insertAfter - 1 > 0 && self._catchingUpFutures[insertAfter - 1].ts.greaterThan(ts)) {
var insertAfter = self._catchingUpResolvers.length;
while (insertAfter - 1 > 0 && self._catchingUpResolvers[insertAfter - 1].ts.greaterThan(ts)) {
insertAfter--;
}
var f = new Future;
self._catchingUpFutures.splice(insertAfter, 0, {ts: ts, future: f});
f.wait();
let promiseResolver = null;
const promiseToAwait = new Promise(r => promiseResolver = r);
self._catchingUpResolvers.splice(insertAfter, 0, {ts: ts, resolver: promiseResolver});
await promiseToAwait;
},
_startTailing: function () {
// Calls `callback` once the oplog has been processed up to a point that is
// roughly "now": specifically, once we've processed all ops that are
// currently visible.
// XXX become convinced that this is actually safe even if oplogConnection
// is some kind of pool
waitUntilCaughtUp: function () {
return this._waitUntilCaughtUp();
},
_startTailing: async function () {
var self = this;
// First, make sure that we're talking to the local database.
var mongodbUri = Npm.require('mongodb-uri');
if (mongodbUri.parse(self._oplogUrl).database !== 'local') {
throw Error("$MONGO_OPLOG_URL must be set to the 'local' database of " +
"a Mongo replica set");
"a Mongo replica set");
}
// We make two separate connections to Mongo. The Node Mongo driver
@@ -206,32 +212,28 @@ Object.assign(OplogHandle.prototype, {
// The tail connection will only ever be running a single tail command, so
// it only needs to make one underlying TCP connection.
self._oplogTailConnection = new MongoConnection(
self._oplogUrl, {maxPoolSize: 1});
self._oplogUrl, {maxPoolSize: 1});
// XXX better docs, but: it's to get monotonic results
// XXX is it safe to say "if there's an in flight query, just use its
// results"? I don't think so but should consider that
self._oplogLastEntryConnection = new MongoConnection(
self._oplogUrl, {maxPoolSize: 1});
self._oplogUrl, {maxPoolSize: 1});
// Now, make sure that there actually is a repl set here. If not, oplog
// tailing won't ever find anything!
// More on the isMasterDoc
// https://docs.mongodb.com/manual/reference/command/isMaster/
var f = new Future;
self._oplogLastEntryConnection.db.admin().command(
{ ismaster: 1 }, f.resolver());
var isMasterDoc = f.wait();
const isMasterDoc = await Meteor.promisify((cb) => {
self._oplogLastEntryConnection.db.admin().command({ismaster: 1}, cb);
})();
if (!(isMasterDoc && isMasterDoc.setName)) {
throw Error("$MONGO_OPLOG_URL must be set to the 'local' database of " +
"a Mongo replica set");
"a Mongo replica set");
}
// Find the last oplog entry.
var lastOplogEntry = self._oplogLastEntryConnection.findOne(
OPLOG_COLLECTION, {}, {sort: {$natural: -1}, fields: {ts: 1}});
var lastOplogEntry = await self._oplogLastEntryConnection.findOne(
OPLOG_COLLECTION, {}, {sort: {$natural: -1}, fields: {ts: 1}});
var oplogSelector = _.clone(self._baseOplogSelector);
var oplogSelector = Object.assign({}, self._baseOplogSelector);
if (lastOplogEntry) {
// Start after the last entry that currently exists.
oplogSelector.ts = {$gt: lastOplogEntry.ts};
@@ -242,7 +244,7 @@ Object.assign(OplogHandle.prototype, {
}
var cursorDescription = new CursorDescription(
OPLOG_COLLECTION, oplogSelector, {tailable: true});
OPLOG_COLLECTION, oplogSelector, {tailable: true});
// Start tailing the oplog.
//
@@ -251,14 +253,15 @@ Object.assign(OplogHandle.prototype, {
// one bug that can lead to query callbacks never getting called (even with
// an error) when leadership failover occur.
self._tailHandle = self._oplogTailConnection.tail(
cursorDescription,
function (doc) {
self._entryQueue.push(doc);
self._maybeStartWorker();
},
TAIL_TIMEOUT
cursorDescription,
function (doc) {
self._entryQueue.push(doc);
self._maybeStartWorker();
},
TAIL_TIMEOUT
);
self._readyFuture.return();
self._readyPromiseResolver();
},
_maybeStartWorker: function () {
@@ -362,9 +365,9 @@ Object.assign(OplogHandle.prototype, {
_setLastProcessedTS: function (ts) {
var self = this;
self._lastProcessedTS = ts;
while (!_.isEmpty(self._catchingUpFutures) && self._catchingUpFutures[0].ts.lessThanOrEqual(self._lastProcessedTS)) {
var sequencer = self._catchingUpFutures.shift();
sequencer.future.return();
while (!_.isEmpty(self._catchingUpResolvers) && self._catchingUpResolvers[0].ts.lessThanOrEqual(self._lastProcessedTS)) {
var sequencer = self._catchingUpResolvers.shift();
sequencer.resolver();
}
},

View File

@@ -1,65 +1,70 @@
var OplogCollection = new Mongo.Collection("oplog-" + Random.id());
Tinytest.add("mongo-livedata - oplog - cursorSupported", function (test) {
Tinytest.addAsync("mongo-livedata - oplog - cursorSupported", async function (test) {
var oplogEnabled =
!!MongoInternals.defaultRemoteCollectionDriver().mongo._oplogHandle;
!!MongoInternals.defaultRemoteCollectionDriver().mongo._oplogHandle;
var supported = function (expected, selector, options) {
var supported = async function (expected, selector, options) {
var cursor = OplogCollection.find(selector, options);
var handle = cursor.observeChanges({added: function () {}});
var handle = await cursor.observeChanges({
added: function () {
}
});
// If there's no oplog at all, we shouldn't ever use it.
if (!oplogEnabled)
expected = false;
test.equal(!!handle._multiplexer._observeDriver._usesOplog, expected);
handle.stop();
await handle.stop();
};
supported(true, "asdf");
supported(true, 1234);
supported(true, new Mongo.ObjectID());
await supported(true, "asdf");
await supported(true, 1234);
await supported(true, new Mongo.ObjectID());
supported(true, {_id: "asdf"});
supported(true, {_id: 1234});
supported(true, {_id: new Mongo.ObjectID()});
await supported(true, { _id: "asdf" });
await supported(true, { _id: 1234 });
await supported(true, { _id: new Mongo.ObjectID() });
supported(true, {foo: "asdf",
bar: 1234,
baz: new Mongo.ObjectID(),
eeney: true,
miney: false,
moe: null});
await supported(true, {
foo: "asdf",
bar: 1234,
baz: new Mongo.ObjectID(),
eeney: true,
miney: false,
moe: null
});
supported(true, {});
await supported(true, {});
supported(true, {$and: [{foo: "asdf"}, {bar: "baz"}]});
supported(true, {foo: {x: 1}});
supported(true, {foo: {$gt: 1}});
supported(true, {foo: [1, 2, 3]});
await supported(true, { $and: [{ foo: "asdf" }, { bar: "baz" }] });
await supported(true, { foo: { x: 1 } });
await supported(true, { foo: { $gt: 1 } });
await supported(true, { foo: [1, 2, 3] });
// No $where.
supported(false, {$where: "xxx"});
supported(false, {$and: [{foo: "adsf"}, {$where: "xxx"}]});
await supported(false, { $where: "xxx" });
await supported(false, { $and: [{ foo: "adsf" }, { $where: "xxx" }] });
// No geoqueries.
supported(false, {x: {$near: [1,1]}});
await supported(false, { x: { $near: [1, 1] } });
// Nothing Minimongo doesn't understand. (Minimongo happens to fail to
// implement $elemMatch inside $all which MongoDB supports.)
supported(false, {x: {$all: [{$elemMatch: {y: 2}}]}});
await supported(false, { x: { $all: [{ $elemMatch: { y: 2 } }] } });
supported(true, {}, { sort: {x:1} });
supported(true, {}, { sort: {x:1}, limit: 5 });
supported(false, {}, { sort: {$natural:1}, limit: 5 });
supported(false, {}, { limit: 5 });
supported(false, {}, { skip: 2, limit: 5 });
supported(false, {}, { skip: 2 });
await supported(true, {}, { sort: { x: 1 } });
await supported(true, {}, { sort: { x: 1 }, limit: 5 });
await supported(false, {}, { sort: { $natural: 1 }, limit: 5 });
await supported(false, {}, { limit: 5 });
await supported(false, {}, { skip: 2, limit: 5 });
await supported(false, {}, { skip: 2 });
});
process.env.MONGO_OPLOG_URL && testAsyncMulti(
"mongo-livedata - oplog - entry skipping", [
function (test, expect) {
async function (test, expect) {
var self = this;
self.collectionName = Random.id();
self.collection = new Mongo.Collection(self.collectionName);
self.collection.createIndex({species: 1});
await self.collection.createIndex({ species: 1 });
// Fill collection with lots of irrelevant objects (red cats) and some
// relevant ones (blue dogs).
@@ -96,40 +101,35 @@ process.env.MONGO_OPLOG_URL && testAsyncMulti(
})));
},
function (test, expect) {
async function (test, expect) {
var self = this;
test.equal(self.collection.find().count(),
self.IRRELEVANT_SIZE + self.RELEVANT_SIZE);
test.equal((await self.collection.find().count()),
self.IRRELEVANT_SIZE + self.RELEVANT_SIZE);
var blueDog5Id = null;
var gotSpot = false;
// Watch for blue dogs.
const gotSpotPromise = new Promise(resolve => {
self.subHandle = self.collection.find({
species: 'dog',
color: 'blue',
}).observeChanges({
added(id, fields) {
if (fields.name === 'dog 5') {
blueDog5Id = id;
}
},
changed(id, fields) {
if (EJSON.equals(id, blueDog5Id) &&
fields.name === 'spot') {
gotSpot = true;
resolve();
}
},
});
let resolver; const gotSpotPromise = new Promise(resolve => resolver = resolve)
let resolver2; const gotSpotPromise2 = new Promise(resolve => resolver2 = resolve)
self.subHandle = await self.collection.find({
species: 'dog',
color: 'blue',
}).observeChanges({
added(id, fields) {
if (fields.name === 'dog 5') {
blueDog5Id = id
resolver2()
}
},
changed(id, fields) {
if (EJSON.equals(id, blueDog5Id) &&
fields.name === 'spot') {
gotSpot = true;
resolver();
}
},
});
test.isTrue(self.subHandle._multiplexer._observeDriver._usesOplog);
test.isTrue(blueDog5Id);
test.isFalse(gotSpot);
self.skipped = false;
self.skipHandle = MongoInternals.defaultRemoteCollectionDriver()
.mongo._oplogHandle.onSkippedEntries(function () {
@@ -140,16 +140,19 @@ process.env.MONGO_OPLOG_URL && testAsyncMulti(
// they might in theory be relevant (since they say "something you didn't
// know about is now blue", and who knows, maybe it's a dog) which puts
// the OplogObserveDriver into FETCHING mode, which performs poorly.
self.collection.update({species: 'cat'},
{$set: {color: 'blue'}},
{multi: true});
self.collection.update(blueDog5Id, {$set: {name: 'spot'}});
await self.collection.update({ species: 'cat' },
{ $set: { color: 'blue' } },
{ multi: true });
test.isTrue(blueDog5Id);
test.isFalse(gotSpot);
await self.collection.update(blueDog5Id, { $set: { name: 'spot' } });
// We ought to see the spot change soon!
return gotSpotPromise;
return Promise.all([gotSpotPromise, gotSpotPromise2]);
},
function (test, expect) {
async function (test, expect) {
var self = this;
test.isTrue(self.skipped);
@@ -157,34 +160,34 @@ process.env.MONGO_OPLOG_URL && testAsyncMulti(
MongoInternals.defaultRemoteCollectionDriver()
.mongo._oplogHandle._resetTooFarBehind();
self.skipHandle.stop();
self.subHandle.stop();
self.collection.remove({});
await self.skipHandle.stop();
await self.subHandle.stop();
await self.collection.remove({});
}
]
);
// Meteor.isServer && Tinytest.addAsync(
// "mongo-livedata - oplog - _onFailover",
// async function (test) {
// const driver = MongoInternals.defaultRemoteCollectionDriver();
// const failoverPromise = new Promise(resolve => {
// driver.mongo._onFailover(() => {
// resolve(true);
// });
// });
//
//
// await driver.mongo.db.admin().command({
// replSetStepDown: 1,
// force: true
// });
//
// try {
// const result = await failoverPromise;
// test.isTrue(result);
// } catch (e) {
// test.fail({ message: "Error waiting on Promise", value: JSON.stringify(e) });
// }
// });
Meteor.isServer && Tinytest.addAsync(
"mongo-livedata - oplog - _onFailover",
async function (test) {
const driver = MongoInternals.defaultRemoteCollectionDriver();
const failoverPromise = new Promise(resolve => {
driver.mongo._onFailover(() => {
resolve(true);
});
});
await driver.mongo.db.admin().command({
replSetStepDown: 1,
force: true
});
try {
const result = await failoverPromise;
test.isTrue(result);
} catch (e) {
test.fail({ message: "Error waiting on Promise", value: JSON.stringify(e) });
}
});

View File

@@ -36,7 +36,7 @@ function join(prefix, key) {
return prefix ? `${prefix}.${key}` : key;
}
const arrayOperatorKeyRegex = /^(a|[su]\d+)$/;
const arrayOperatorKeyRegex = /^(a|u\d+)$/;
function isArrayOperatorKey(field) {
return arrayOperatorKeyRegex.test(field);
@@ -96,9 +96,7 @@ function convertOplogDiff(oplogEntry, diff, prefix) {
}
const positionKey = join(join(prefix, key), position.slice(1));
if (position[0] === 's') {
convertOplogDiff(oplogEntry, value, positionKey);
} else if (value === null) {
if (value === null) {
oplogEntry.$unset ??= {};
oplogEntry.$unset[positionKey] = true;
} else {

View File

@@ -77,71 +77,6 @@ const cases = [
{ $v: 2, diff: { u: { params: { e: { _str: '5f953cde8ceca90030bdb86f' } } } } },
{ $v: 2, $set: { params: { e: { _str: '5f953cde8ceca90030bdb86f' } } } },
],
[
{
$v: 2,
diff: {
sitems: {
a: true,
s0: {
u: { id: 'm57DsX8g8L66bM5JX', name: 'Alice' },
sbio: { u: { en: 'Just Alice' } },
slanguages: {
a: true,
s0: {
u: { englishName: 'English', key: 'en', localName: 'English' },
},
},
},
u1: {
id: 'FJwSQHqwpenCN6RQH',
name: 'Bob',
title: { en: 'Fictional character', sv: '' },
bio: { en: 'Just Bob', sv: '' },
avatar: null,
languages: [
{ key: 'sv', englishName: 'Swedish', localName: 'Sverige' },
],
},
u2: null
},
},
},
{
$v: 2,
$set: {
'items.0.id': 'm57DsX8g8L66bM5JX',
'items.0.name': 'Alice',
'items.0.bio.en': 'Just Alice',
'items.0.languages.0.englishName': 'English',
'items.0.languages.0.key': 'en',
'items.0.languages.0.localName': 'English',
'items.1': {
id: 'FJwSQHqwpenCN6RQH',
name: 'Bob',
title: {
en: 'Fictional character',
sv: '',
},
bio: {
en: 'Just Bob',
sv: '',
},
avatar: null,
languages: [
{
key: 'sv',
englishName: 'Swedish',
localName: 'Sverige',
},
],
},
},
$unset: {
'items.2': true
}
},
]
];
Tinytest.add('oplog - v2/v1 conversion', function (test) {

View File

@@ -21,13 +21,6 @@ Npm.strip({
});
Package.onUse(function (api) {
if (process.env.DISABLE_FIBERS) {
api.use('mongo-async', ['server', 'client']);
api.export("Mongo");
api.export('MongoInternals', 'server');
api.export('ObserveMultiplexer', 'server', {testOnly: true});
return;
}
api.use('npm-mongo', 'server');
api.use('allow-deny');

View File

@@ -11,7 +11,7 @@ PollingObserveDriver = function (options) {
self._stopCallbacks = [];
self._stopped = false;
self._synchronousCursor = self._mongoHandle._createSynchronousCursor(
self._cursor = self._mongoHandle._createSynchronousCursor(
self._cursorDescription);
// previous results snapshot. on each poll cycle, diffs against
@@ -74,15 +74,16 @@ PollingObserveDriver = function (options) {
Meteor.clearInterval(intervalHandle);
});
}
// Make sure we actually poll soon!
self._unthrottledEnsurePollIsScheduled();
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-drivers-polling", 1);
};
_.extend(PollingObserveDriver.prototype, {
_init: async function () {
// Make sure we actually poll soon!
await this._unthrottledEnsurePollIsScheduled();
Package['facts-base'] && Package['facts-base'].Facts.incrementServerFact(
"mongo-livedata", "observe-drivers-polling", 1);
},
// This is always called through _.throttle (except once at startup).
_unthrottledEnsurePollIsScheduled: function () {
var self = this;
@@ -129,7 +130,7 @@ _.extend(PollingObserveDriver.prototype, {
});
},
_pollMongo: function () {
async _pollMongo() {
var self = this;
--self._pollsScheduledButNotStarted;
@@ -153,7 +154,7 @@ _.extend(PollingObserveDriver.prototype, {
// Get the new query results. (This yields.)
try {
newResults = self._synchronousCursor.getRawObjects(self._ordered);
newResults = await self._cursor.getRawObjects(self._ordered);
} catch (e) {
if (first && typeof(e.code) === 'number') {
// This is an error document sent to us by mongod, not a connection
@@ -162,9 +163,9 @@ _.extend(PollingObserveDriver.prototype, {
// NOT retry. Instead, we should halt the observe (which ends up calling
// `stop` on us).
self._multiplexer.queryError(
new Error(
"Exception while polling query " +
JSON.stringify(self._cursorDescription) + ": " + e.message));
new Error(
"Exception while polling query " +
JSON.stringify(self._cursorDescription) + ": " + e.message));
return;
}
@@ -176,14 +177,14 @@ _.extend(PollingObserveDriver.prototype, {
// "cancel" the observe from the inside in this case.
Array.prototype.push.apply(self._pendingWrites, writesForCycle);
Meteor._debug("Exception while polling query " +
JSON.stringify(self._cursorDescription), e);
JSON.stringify(self._cursorDescription), e);
return;
}
// Run diffs.
if (!self._stopped) {
LocalCollection._diffQueryChanges(
self._ordered, oldResults, newResults, self._multiplexer);
self._ordered, oldResults, newResults, self._multiplexer);
}
// Signals the multiplexer to allow all observeChanges calls that share this
@@ -211,7 +212,11 @@ _.extend(PollingObserveDriver.prototype, {
stop: function () {
var self = this;
self._stopped = true;
_.each(self._stopCallbacks, function (c) { c(); });
const stopCallbacksCaller = async function(c) {
await c();
};
_.each(self._stopCallbacks, stopCallbacksCaller);
// Release any write fences that are waiting on us.
_.each(self._pendingWrites, function (w) {
w.committed();

View File

@@ -4,28 +4,13 @@ MongoInternals.RemoteCollectionDriver = function (
self.mongo = new MongoConnection(mongo_url, options);
};
const REMOTE_COLLECTION_METHODS = [
'_createCappedCollection',
'_dropIndex',
'_ensureIndex',
'createIndex',
'countDocuments',
'dropCollection',
'estimatedDocumentCount',
'find',
'findOne',
'insert',
'rawCollection',
'remove',
'update',
'upsert',
];
Object.assign(MongoInternals.RemoteCollectionDriver.prototype, {
open: function (name) {
var self = this;
var ret = {};
REMOTE_COLLECTION_METHODS.forEach(
['find', 'findOne', 'insert', 'update', 'upsert',
'remove', '_ensureIndex', 'createIndex', '_dropIndex', '_createCappedCollection',
'dropCollection', 'rawCollection'].forEach(
function (m) {
ret[m] = _.bind(self.mongo[m], self.mongo, name);
});
@@ -55,8 +40,8 @@ MongoInternals.defaultRemoteCollectionDriver = _.once(function () {
// to know about a database connection problem before the app starts. Doing so
// in a `Meteor.startup` is fine, as the `WebApp` handles requests only after
// all are finished.
Meteor.startup(() => {
Promise.await(driver.mongo.client.connect());
Meteor.startup(async () => {
await driver.mongo.client.connect();
});
return driver;

View File

@@ -1,10 +1,10 @@
Tinytest.add('mongo livedata - native upsert - id type MONGO with MODIFIERS update', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - id type MONGO with MODIFIERS update', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
coll.insert({foo: 1});
var result = coll.upsert({foo: 1}, {$set: {foo:2}});
var updated = coll.findOne({foo: 2});
var result = await coll.upsert({foo: 1}, {$set: {foo:2}});
var updated = await coll.findOne({foo: 2});
test.equal(result.insertedId, undefined);
test.equal(result.numberAffected, 1);
@@ -15,12 +15,12 @@ Tinytest.add('mongo livedata - native upsert - id type MONGO with MODIFIERS upda
test.equal(EJSON.equals(updated, {foo: 2}), true);
});
Tinytest.add('mongo livedata - native upsert - id type MONGO with MODIFIERS insert', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - id type MONGO with MODIFIERS insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
var result = coll.upsert({foo: 1}, {$set: {bar:2}});
var inserted = coll.findOne({foo: 1});
var result = await coll.upsert({foo: 1}, {$set: {bar:2}});
var inserted = await coll.findOne({foo: 1});
test.isTrue(result.insertedId !== undefined);
test.equal(result.numberAffected, 1);
@@ -32,13 +32,13 @@ Tinytest.add('mongo livedata - native upsert - id type MONGO with MODIFIERS inse
test.equal(EJSON.equals(inserted, {foo: 1, bar: 2}), true);
});
Tinytest.add('mongo livedata - native upsert - id type MONGO PLAIN OBJECT update', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - id type MONGO PLAIN OBJECT update', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
coll.insert({foo: 1, baz: 42});
var result = coll.upsert({foo: 1}, {bar:2});
var updated = coll.findOne({bar: 2});
var result = await coll.upsert({foo: 1}, {bar:2});
var updated = await coll.findOne({bar: 2});
test.isTrue(result.insertedId === undefined);
test.equal(result.numberAffected, 1);
@@ -49,12 +49,12 @@ Tinytest.add('mongo livedata - native upsert - id type MONGO PLAIN OBJECT update
test.equal(EJSON.equals(updated, {bar: 2}), true);
});
Tinytest.add('mongo livedata - native upsert - id type MONGO PLAIN OBJECT insert', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - id type MONGO PLAIN OBJECT insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
var result = coll.upsert({foo: 1}, {bar:2});
var inserted = coll.findOne({bar: 2});
var result = await coll.upsert({foo: 1}, {bar:2});
var inserted = await coll.findOne({bar: 2});
test.isTrue(result.insertedId !== undefined);
test.equal(result.numberAffected, 1);
@@ -67,13 +67,13 @@ Tinytest.add('mongo livedata - native upsert - id type MONGO PLAIN OBJECT insert
test.equal(EJSON.equals(inserted, {bar: 2}), true);
});
Tinytest.add('mongo livedata - native upsert - id type STRING with MODIFIERS update', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - id type STRING with MODIFIERS update', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'STRING'});
coll.insert({foo: 1});
var result = coll.upsert({foo: 1}, {$set: {foo:2}});
var updated = coll.findOne({foo: 2});
await coll.insert({foo: 1});
var result = await coll.upsert({foo: 1}, {$set: {foo:2}});
var updated = await coll.findOne({foo: 2});
test.equal(result.insertedId, undefined);
test.equal(result.numberAffected, 1);
@@ -84,12 +84,12 @@ Tinytest.add('mongo livedata - native upsert - id type STRING with MODIFIERS upd
test.equal(EJSON.equals(updated, {foo: 2}), true);
});
Tinytest.add('mongo livedata - native upsert - id type STRING with MODIFIERS insert', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - id type STRING with MODIFIERS insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'STRING'});
var result = coll.upsert({foo: 1}, {$set: {bar:2}});
var inserted = coll.findOne({foo: 1});
var result = await coll.upsert({foo: 1}, {$set: {bar:2}});
var inserted = await coll.findOne({foo: 1});
test.isTrue(result.insertedId !== undefined);
test.equal(result.numberAffected, 1);
@@ -101,13 +101,13 @@ Tinytest.add('mongo livedata - native upsert - id type STRING with MODIFIERS ins
test.equal(EJSON.equals(inserted, {foo: 1, bar: 2}), true);
});
Tinytest.add('mongo livedata - native upsert - id type STRING PLAIN OBJECT update', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - id type STRING PLAIN OBJECT update', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'STRING'});
coll.insert({foo: 1, baz: 42});
var result = coll.upsert({foo: 1}, {bar:2});
var updated = coll.findOne({bar: 2});
await coll.insert({foo: 1, baz: 42});
var result = await coll.upsert({foo: 1}, {bar:2});
var updated = await coll.findOne({bar: 2});
test.isTrue(result.insertedId === undefined);
test.equal(result.numberAffected, 1);
@@ -118,12 +118,12 @@ Tinytest.add('mongo livedata - native upsert - id type STRING PLAIN OBJECT updat
test.equal(EJSON.equals(updated, {bar: 2}), true);
});
Tinytest.add('mongo livedata - native upsert - id type STRING PLAIN OBJECT insert', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - id type STRING PLAIN OBJECT insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'STRING'});
var result = coll.upsert({foo: 1}, {bar:2});
var inserted = coll.findOne({bar: 2});
var result = await coll.upsert({foo: 1}, {bar:2});
var inserted = await coll.findOne({bar: 2});
test.isTrue(result.insertedId !== undefined);
test.equal(result.numberAffected, 1);
@@ -135,12 +135,12 @@ Tinytest.add('mongo livedata - native upsert - id type STRING PLAIN OBJECT inser
test.equal(EJSON.equals(inserted, {bar: 2}), true);
});
Tinytest.add('mongo livedata - native upsert - MONGO passing id insert', function (test) {
Tinytest.addAsync('mongo livedata - native upsert - MONGO passing id insert', async function (test) {
var collName = Random.id();
var coll = new Mongo.Collection('native_upsert_'+collName, {idGeneration: 'MONGO'});
var result = coll.upsert({foo: 1}, {_id: 'meu id'});
var inserted = coll.findOne({_id: 'meu id'});
var result = await coll.upsert({foo: 1}, {_id: 'meu id'});
var inserted = await coll.findOne({_id: 'meu id'});
test.equal(result.insertedId, 'meu id');
test.equal(result.numberAffected, 1);

View File

@@ -9,7 +9,7 @@ Plugin.registerMinifier({
class MeteorMinifier {
processFilesForBundle (files, options) {
async processFilesForBundle (files, options) {
const mode = options.minifyMode;
// don't minify anything for development
@@ -63,7 +63,7 @@ class MeteorMinifier {
stats: Object.create(null)
};
files.forEach(file => {
for await (file of files) {
// Don't reminify *.min.js.
if (/\.min\.js$/.test(file.getPathInBundle())) {
toBeAdded.data += file.getContentsAsString();
@@ -71,7 +71,7 @@ class MeteorMinifier {
else {
let minified;
try {
minified = meteorJsMinify(file.getContentsAsString());
minified = await meteorJsMinify(file.getContentsAsString());
}
catch (err) {
maybeThrowMinifyErrorBySourceFile(err, file);
@@ -94,7 +94,7 @@ class MeteorMinifier {
toBeAdded.data += '\n\n';
Plugin.nudge();
});
}
// this is where the minified code gets added to one
// JS file that is delivered to the client