From 859ff555fe440b2e3dec914e9ac2f99f57322168 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 5 Feb 2014 14:33:11 -0800 Subject: [PATCH 01/93] Limits in oplog code - distinguish queries with limits and sorts - separate the concept of matching set from published set - keep a separate buffer for N documents outside of limit to reduce mongo lookups --- packages/minimongo/id_map.js | 2 +- packages/mongo-livedata/dummy-structure.js | 44 ++++ .../mongo-livedata/oplog_observe_driver.js | 231 +++++++++++++++--- packages/mongo-livedata/package.js | 2 + 4 files changed, 239 insertions(+), 40 deletions(-) create mode 100644 packages/mongo-livedata/dummy-structure.js diff --git a/packages/minimongo/id_map.js b/packages/minimongo/id_map.js index 5c2b628155..4e94273fac 100644 --- a/packages/minimongo/id_map.js +++ b/packages/minimongo/id_map.js @@ -68,7 +68,7 @@ _.extend(LocalCollection._IdMap.prototype, { var clone = new LocalCollection._IdMap; self.forEach(function (value, id) { clone.set(id, EJSON.clone(value)); - }); + }); return clone; } }); diff --git a/packages/mongo-livedata/dummy-structure.js b/packages/mongo-livedata/dummy-structure.js new file mode 100644 index 0000000000..ce008e56bd --- /dev/null +++ b/packages/mongo-livedata/dummy-structure.js @@ -0,0 +1,44 @@ +// Implements the interface of IdMap and knows how to find Min or Max element +DummyStructure = function (comparator) { + var self = this; + self.comparator = comparator; + self.idMap = new LocalCollection.IdMap; +}; + +_.each(['get', 'set', 'remove', 'has', 'empty', 'clear', 'forEach', 'size', 'setDefault'], function (method) { + DummyStructure.prototype[method] = function (/* arguments */) { + var self = this; + return self.idMap[method].apply(self, arguments); + }; +}); + +DummyStructure.prototype.clone = function () { + var self = this; + var clone = new DummyStructure; + clone.comparator = self.comparator; + clone.idMap = self.idMap.clone(); + return clone; +}; + +DummyStructure.prototype.minElementId = function () { + var self = this; + var minElementId = null; + self.idMap.forEach(function (value, key) { + if (minElement === null) + minElementId = key; + else if (self.comparator(value, self.idMap.get(minElementId)) < 0) + minElementId = key; + }); + + return minElementId; +}; + +DummyStructure.prototype.maxElementId = function () { + var self = this; + var comparator = self.comparator; + self.comparator = function (a, b) { return -comparator(a, b); }; + var maxElementId = self.minElementId(); + self.comparator = comparator; + return maxElementId; +}; + diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 2cfa2a7775..cc79cf9e24 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -19,8 +19,21 @@ OplogObserveDriver = function (options) { self._cursorDescription = options.cursorDescription; self._mongoHandle = options.mongoHandle; self._multiplexer = options.multiplexer; - if (options.ordered) - throw Error("OplogObserveDriver only supports unordered observeChanges"); + + if (options.ordered) { + // XXX replace with doubly-heaps and shit once we get these working + var comparator = self._cursorDescription.sorter.getComparator(); + self._limit = self._cursorDescription.limit; + self._comparator = comparator; + self._unpublishedBuffer = new DummyStructure(comparator); + // We need something that can find Max value in addition to IdMap interface + self._published = new DummyStructure(comparator); + } else { + self._limit = 0; + self._comparator = null; + self._unpublishedBuffer = null; + self._published = new LocalCollection._IdMap; + } self._stopped = false; self._stopHandles = []; @@ -30,7 +43,6 @@ OplogObserveDriver = function (options) { self._registerPhaseChange(PHASE.QUERYING); - self._published = new LocalCollection._IdMap; var selector = self._cursorDescription.selector; self._matcher = options.matcher; var projection = self._cursorDescription.options.fields || {}; @@ -107,22 +119,111 @@ OplogObserveDriver = function (options) { }; _.extend(OplogObserveDriver.prototype, { - _add: function (doc) { + _addPublished: function (id, doc) { + var self = this; + self._published.set(id, self._sharedProjectionFn(doc)); + self._multiplexer.added(id, self._projectionFn(doc)); + + // After adding this document, the published set might be overflowed + // (exceeding capacity specified by limit). If so, push the maximum element + // to the buffer, we might want to save it in memory to reduce the amount of + // Mongo lookups in the future. + if (self._limit && self._published.size() > self._limit) { + // XXX in theory the size of published is no more than limit+1 + if (self._published.size() !== self._limit + 1) { + // xcxc better error message + throw new Error("After adding to published, " + + (self._limit - self._published.size()) + + " documents are overflowing the set"); + } + + var overflowingDocId = self._published.getMaximumId(); + var overflowingDoc = self._published.get(overflowingDocId); + self._published.remove(overflowingDocId); + self._multiplexer.removed(overflowingDocId); + self._addBuffered(overflowingDocId, overflowingDoc); + } + }, + _removePublished: function (id) { + var self = this; + self._published.remove(id); + self._multiplexer.removed(id); + if (!self._unpublishedBuffer) + return; + // xcxc size on heaps should be cached to O(1) + if (self._published.size() < self._cursorDescription.limit) { + // The unpublished buffer is empty iff published contains the whole + // matching set, i.e. number of matching documents is less or equal to the + // queries limit. + if (!self._unpublishedBuffer.size()) + return; + + var newDocId = self._unpublishedBuffer.minElementId(); + var newDoc = self._unpublishedBuffer.get(newDocId); + self._removeBuffered(newDocId); + self._addPublished(newDocId, newDoc); + } + }, + _changePublished: function (id, oldDoc, newDoc) { + var self = this; + self._published.set(id, self._sharedProjectionFn(newDoc)); + var changed = LocalCollection._makeChangedFields(_.clone(newDoc), oldDoc); + changed = self._projectionFn(changed); + if (!_.isEmpty(changed)) + self._multiplexer.changed(id, changed); + }, + _addBuffered: function (id, doc) { + var self = this; + self._unpublishedBuffer.add(id, self._sharedProjectionFn(fields)); + if (self._unpublishedBuffer.size() > self._limit) + self._unpublishedBuffer.remove(self._unpublishedBuffer.getMaximumId()); + }, + _removeBuffered: function (id) { + var self = this; + self._unpublishedBuffer.remove(id); + if (! self._unpublishedBuffer.size()) + self._needToPollQuery(); + }, + // Called when a document has joined the "Matching" results set. + // Takes responsibility of keeping _unpublishedBuffer in sync with _published + // and the effect of limit enforced. + _addMatching: function (doc) { var self = this; var id = doc._id; var fields = _.clone(doc); delete fields._id; if (self._published.has(id)) throw Error("tried to add something already published " + id); - self._published.set(id, self._sharedProjectionFn(fields)); - self._multiplexer.added(id, self._projectionFn(fields)); + if (self._unpublishedBuffer && self._unpublishedBuffer.has(id)) + throw Error("tried to add something already existed in buffer " + id); // xcxc error msg + + var limit = self._limit; + var comparator = self._comparator; + var maxPublished = (limit && self._published.size() > 0) ? self._published.get(self._published.getMaximumId()) : null; + var maxBuffered = (limit && self._unpublishedBuffer.size() > 0) ? self._unpublishedBuffer.get(self._unpublishedBuffer.getMaximumId()) : null; + // The query is unlimited or didn't publish enough documents yet or the new + // document would fit into published set pushing the maximum element out, + // then we need to publish the doc. + // Otherwise we might need to buffer it (only in case of limited query). + if (!limit || self._published.size() < limit || comparator(maxPublished, fields) === 1) { + self._addPublished(id, fields); + } else if (self._unpublishedBuffer.size() < limit || comparator(maxBuffered, fields) === 1) { + self._addBuffered(id, fields); + } }, - _remove: function (id) { + // Called when a document leaves the "Matching" results set. + // Takes responsibility of keeping _unpublishedBuffer in sync with _published + // and the effect of limit enforced. + _removeMatching: function (id) { var self = this; - if (!self._published.has(id)) - throw Error("tried to remove something unpublished " + id); - self._published.remove(id); - self._multiplexer.removed(id); + if (!self._published.has(id) && !self._unpublishedBuffer) + throw Error("tried to remove something unpublished " + id); // xcxc fix this error msg + + if (self._published.has(id)) { + self._removePublished(id); + } else if (self._unpublishedBuffer.has(id)) { + self._removeBuffered(id); + } }, _handleDoc: function (id, newDoc, mustMatchNow) { var self = this; @@ -134,22 +235,40 @@ _.extend(OplogObserveDriver.prototype, { + EJSON.stringify(self._cursorDescription)); } - var matchedBefore = self._published.has(id); + var publishedBefore = self._published.has(id); + var bufferedBefore = self._unpublishedBuffer && self._unpublishedBuffer.has(id); + var cachedBefore = publishedBefore || bufferedBefore; - if (matchesNow && !matchedBefore) { - self._add(newDoc); - } else if (matchedBefore && !matchesNow) { - self._remove(id); + if (matchesNow && !cachedBefore) { + self._addMatching(newDoc); + } else if (cachedBefore && !matchesNow) { + self._removeMatching(id); } else if (matchesNow) { - var oldDoc = self._published.get(id); - if (!oldDoc) - throw Error("thought that " + id + " was there!"); delete newDoc._id; - self._published.set(id, self._sharedProjectionFn(newDoc)); - var changed = LocalCollection._makeChangedFields(_.clone(newDoc), oldDoc); - changed = self._projectionFn(changed); - if (!_.isEmpty(changed)) - self._multiplexer.changed(id, changed); + var oldDoc = self._published.get(id); + var comparator = self._comparator; + var minBuffered = self._unpublishedBuffer && self._unpublishedBuffer.size() && + self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId()); + + if (publishedBefore) { + // Unordered case where the document stays in published once it matches + // or the case when we don't have enough matching docs to publish or the + // changed but matching doc will stay in published anyways. + if (!self._limit || self._unpublishedBuffer.size() === 0 || comparator(newDoc, minBuffered) < 1) { + self._changePublished(id, oldDoc, newDoc); + } else { + // after the change doc doesn't stay in the published, remove it + self._removePublished(id); + } + } else if (bufferedBefore) { + // after the change we can't know if doc is still in the buffer limit + // w/o querying mongo, so just remove it from buffer + self._removeBuffered(id); + // but it can move into published now, check it + var maxPublished = self._published.get(self._published.getMaximumId()); + if (comparator(newDoc, maxPublished) === -1) + self._addPublished(id, newDoc); + } } }, _fetchModifiedDocuments: function () { @@ -233,16 +352,17 @@ _.extend(OplogObserveDriver.prototype, { } if (op.op === 'd') { - if (self._published.has(id)) - self._remove(id); + if (self._published.has(id) || (self._limit && self._unpublishedBuffer.has(id))) + self._removeMatching(id); } else if (op.op === 'i') { + // xcxc what if buffer has it? if (self._published.has(id)) throw new Error("insert found for already-existing ID"); // XXX what if selector yields? for now it can't but later it could have // $where if (self._matcher.documentMatches(op.o).result) - self._add(op.o); + self._addMatching(op.o); } else if (op.op === 'u') { // Is this a modifier ($set/$unset, which may require us to poll the // database to figure out if the whole document matches the selector) or a @@ -258,10 +378,13 @@ _.extend(OplogObserveDriver.prototype, { if (isReplace) { self._handleDoc(id, _.extend({_id: id}, op.o)); - } else if (self._published.has(id) && canDirectlyModifyDoc) { + } else if ((self._published.has(id) || self._unpublishedBuffer.has(id)) && canDirectlyModifyDoc) { // Oh great, we actually know what the document is, so we can apply // this directly. - var newDoc = EJSON.clone(self._published.get(id)); + if (self._published.has(id)) + var newDoc = EJSON.clone(self._published.get(id)); + else + var newDoc = EJSON.clone(self._unpublishedBuffer.get(id)); newDoc._id = id; LocalCollection._modify(newDoc, op.o); self._handleDoc(id, self._sharedProjectionFn(newDoc)); @@ -280,9 +403,16 @@ _.extend(OplogObserveDriver.prototype, { if (self._stopped) throw new Error("oplog stopped surprisingly early"); - var initialCursor = self._cursorForQuery(); + // Query 2x documents as the half excluded from the original query will go + // into unpublished buffer to reduce additional Mongo lookups in cases when + // documents are removed from the published set and need a replacement. + // XXX needs more thought on non-zero skip + // XXX "2" here is a "magic number" + var initialCursor = self._cursorForQuery({ limit: self._limit * 2 }); initialCursor.forEach(function (initialDoc) { - self._add(initialDoc); + // self._addMatching knows how to correctly separate documents into the + // published set and the buffer. + self._addMatching(initialDoc); }); if (self._stopped) throw new Error("oplog stopped quite early"); @@ -324,12 +454,18 @@ _.extend(OplogObserveDriver.prototype, { // subtle note: _published does not contain _id fields, but newResults // does var newResults = new LocalCollection._IdMap; - var cursor = self._cursorForQuery(); - cursor.forEach(function (doc) { - newResults.set(doc._id, doc); + var newBuffer = new LocalCollection._IdMap; + // XXX 2 is a "magic number" meaning there is an extra chunk of docs for + // buffer if such is needed. + var cursor = self._cursorForQuery({ limit: self._limit * 2 }); + cursor.forEach(function (doc, i) { + if (!self._limit || i < self._limit) + newResults.set(doc._id, doc); + else + newBuffer.set(doc._id, doc); }); - self._publishNewResults(newResults); + self._publishNewResults(newResults, newBuffer); self._doneQuerying(); }); @@ -379,7 +515,7 @@ _.extend(OplogObserveDriver.prototype, { } }, - _cursorForQuery: function () { + _cursorForQuery: function (optionsOverwrite) { var self = this; // The query we run is almost the same as the cursor we are observing, with @@ -388,6 +524,11 @@ _.extend(OplogObserveDriver.prototype, { // "shared" projection). And we don't want to apply any transform in the // cursor, because observeChanges shouldn't use the transform. var options = _.clone(self._cursorDescription.options); + + // Allow the caller to modify the options. Useful to specify different skip + // and limit values. + _.extend(options, optionsOverwrite); + options.fields = self._sharedProjection; delete options.transform; // We are NOT deep cloning fields or selector here, which should be OK. @@ -401,13 +542,19 @@ _.extend(OplogObserveDriver.prototype, { // Replace self._published with newResults (both are IdMaps), invoking observe // callbacks on the multiplexer. + // Replace self._unpublishedBuffer with newBuffer. // // XXX This is very similar to LocalCollection._diffQueryUnorderedChanges. We // should really: (a) Unify IdMap and OrderedDict into Unordered/OrderedDict (b) // Rewrite diff.js to use these classes instead of arrays and objects. - _publishNewResults: function (newResults) { + _publishNewResults: function (newResults, newBuffer) { var self = this; + // If there is a buffer, shut down so it doesn't stay in a way + if (self._unpublishedBuffer) { + self._unpublishedBuffer.clear(); + } + // First remove anything that's gone. Be careful not to modify // self._published while iterating over it. var idsToRemove = []; @@ -416,7 +563,7 @@ _.extend(OplogObserveDriver.prototype, { idsToRemove.push(id); }); _.each(idsToRemove, function (id) { - self._remove(id); + self._removePublished(id); }); // Now do adds and changes. @@ -425,6 +572,11 @@ _.extend(OplogObserveDriver.prototype, { // selector. self._handleDoc(id, doc, true); }); + + // Finally, replace the buffer + newBuffer.forEach(function (doc, id) { + self._addBuffered(id, doc); + }); }, // This stop function is invoked from the onStop of the ObserveMultiplexer, so @@ -451,6 +603,7 @@ _.extend(OplogObserveDriver.prototype, { // Proactively drop references to potentially big things. self._published = null; + self._unpublishedBuffer = null; self._needToFetch = null; self._currentlyFetching = null; self._oplogEntryHandle = null; @@ -489,7 +642,7 @@ OplogObserveDriver.cursorSupported = function (cursorDescription, matcher) { // This option (which are mostly used for sorted cursors) require us to figure // out where a given document fits in an order to know if it's included or // not, and we don't track that information when doing oplog tailing. - if (options.limit || options.skip) return false; + if (options.limit && (options.skip || !options.sorter)) return false; // If a fields projection option is given check if it is supported by // minimongo (some operators are not supported). diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index f0b1f9e3d2..898258486a 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -47,6 +47,8 @@ Package.on_use(function (api) { // For tests only. api.export('MongoTest', 'server', {testOnly: true}); + // xcxc temporary + api.add_files('dummy-structure.js', 'server'); api.add_files(['mongo_driver.js', 'oplog_tailing.js', 'observe_multiplex.js', 'doc_fetcher.js', 'polling_observe_driver.js','oplog_observe_driver.js'], From ad0c88f07183eaef7a1b3d5ada6e1fffbd6edc38 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 11 Feb 2014 16:45:47 -0800 Subject: [PATCH 02/93] Always use limit to separate ordered code vs unordered --- .../mongo-livedata/oplog_observe_driver.js | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index cc79cf9e24..6b684222d9 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -21,6 +21,12 @@ OplogObserveDriver = function (options) { self._multiplexer = options.multiplexer; if (options.ordered) { + // There are several properties ordered driver implements: + // - _limit is a positive number + // - _comparator is a function-comparator by which the query is ordered + // - _unpublishedBuffer is non-null collection + // - _published implements maxElementId method in addition to IdMap methods + // XXX replace with doubly-heaps and shit once we get these working var comparator = self._cursorDescription.sorter.getComparator(); self._limit = self._cursorDescription.limit; @@ -148,14 +154,14 @@ _.extend(OplogObserveDriver.prototype, { var self = this; self._published.remove(id); self._multiplexer.removed(id); - if (!self._unpublishedBuffer) + if (! self._limit) return; // xcxc size on heaps should be cached to O(1) if (self._published.size() < self._cursorDescription.limit) { // The unpublished buffer is empty iff published contains the whole // matching set, i.e. number of matching documents is less or equal to the // queries limit. - if (!self._unpublishedBuffer.size()) + if (! self._unpublishedBuffer.size()) return; var newDocId = self._unpublishedBuffer.minElementId(); @@ -194,7 +200,7 @@ _.extend(OplogObserveDriver.prototype, { delete fields._id; if (self._published.has(id)) throw Error("tried to add something already published " + id); - if (self._unpublishedBuffer && self._unpublishedBuffer.has(id)) + if (self._limit && self._unpublishedBuffer.has(id)) throw Error("tried to add something already existed in buffer " + id); // xcxc error msg var limit = self._limit; @@ -216,7 +222,7 @@ _.extend(OplogObserveDriver.prototype, { // and the effect of limit enforced. _removeMatching: function (id) { var self = this; - if (!self._published.has(id) && !self._unpublishedBuffer) + if (!self._published.has(id) && !self._limit) throw Error("tried to remove something unpublished " + id); // xcxc fix this error msg if (self._published.has(id)) { @@ -236,7 +242,7 @@ _.extend(OplogObserveDriver.prototype, { } var publishedBefore = self._published.has(id); - var bufferedBefore = self._unpublishedBuffer && self._unpublishedBuffer.has(id); + var bufferedBefore = self._limit && self._unpublishedBuffer.has(id); var cachedBefore = publishedBefore || bufferedBefore; if (matchesNow && !cachedBefore) { @@ -247,7 +253,7 @@ _.extend(OplogObserveDriver.prototype, { delete newDoc._id; var oldDoc = self._published.get(id); var comparator = self._comparator; - var minBuffered = self._unpublishedBuffer && self._unpublishedBuffer.size() && + var minBuffered = self._limit && self._unpublishedBuffer.size() && self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId()); if (publishedBefore) { @@ -550,8 +556,9 @@ _.extend(OplogObserveDriver.prototype, { _publishNewResults: function (newResults, newBuffer) { var self = this; - // If there is a buffer, shut down so it doesn't stay in a way - if (self._unpublishedBuffer) { + // If the query is ordered and there is a buffer, shut down so it doesn't + // stay in a way. + if (self._limit) { self._unpublishedBuffer.clear(); } From b8429fa0b18cf68573441d97c64a7fbb8cd1b139 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 11 Feb 2014 21:57:49 -0800 Subject: [PATCH 03/93] Export sorter in Minimongo namespace --- packages/minimongo/sort.js | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/minimongo/sort.js b/packages/minimongo/sort.js index 718ec1537e..60c3dd60b2 100644 --- a/packages/minimongo/sort.js +++ b/packages/minimongo/sort.js @@ -110,6 +110,7 @@ Sorter.prototype.getComparator = function (options) { }]); }; +Minimongo.Sorter = Sorter; MinimongoTest.Sorter = Sorter; // Given an array of comparators From 818a770ed93d02c8a930b8fd4aaf6cb93df20888 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 11 Feb 2014 21:58:09 -0800 Subject: [PATCH 04/93] Fix Dummy Structure actually Dummy-structure was supposed to be so dummy, it would be trivially easy to write a bug-free 20-line code for it. But I failed. To dummy structure... --- packages/mongo-livedata/dummy-structure.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/dummy-structure.js b/packages/mongo-livedata/dummy-structure.js index ce008e56bd..e0622a6d80 100644 --- a/packages/mongo-livedata/dummy-structure.js +++ b/packages/mongo-livedata/dummy-structure.js @@ -2,13 +2,13 @@ DummyStructure = function (comparator) { var self = this; self.comparator = comparator; - self.idMap = new LocalCollection.IdMap; + self.idMap = new LocalCollection._IdMap; }; _.each(['get', 'set', 'remove', 'has', 'empty', 'clear', 'forEach', 'size', 'setDefault'], function (method) { DummyStructure.prototype[method] = function (/* arguments */) { var self = this; - return self.idMap[method].apply(self, arguments); + return self.idMap[method].apply(self.idMap, arguments); }; }); From c389dadc460728c5618fea75b2bca1415f2ae472 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 11 Feb 2014 21:59:13 -0800 Subject: [PATCH 05/93] Correctly distinguish queries with limits and sorts --- packages/mongo-livedata/oplog_observe_driver.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 6b684222d9..d0679fff58 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -20,7 +20,7 @@ OplogObserveDriver = function (options) { self._mongoHandle = options.mongoHandle; self._multiplexer = options.multiplexer; - if (options.ordered) { + if (options.cursorDescription.options.limit) { // There are several properties ordered driver implements: // - _limit is a positive number // - _comparator is a function-comparator by which the query is ordered @@ -28,7 +28,8 @@ OplogObserveDriver = function (options) { // - _published implements maxElementId method in addition to IdMap methods // XXX replace with doubly-heaps and shit once we get these working - var comparator = self._cursorDescription.sorter.getComparator(); + var sorter = new Minimongo.Sorter(options.cursorDescription.options.sort); + var comparator = sorter.getComparator(); self._limit = self._cursorDescription.limit; self._comparator = comparator; self._unpublishedBuffer = new DummyStructure(comparator); @@ -649,7 +650,7 @@ OplogObserveDriver.cursorSupported = function (cursorDescription, matcher) { // This option (which are mostly used for sorted cursors) require us to figure // out where a given document fits in an order to know if it's included or // not, and we don't track that information when doing oplog tailing. - if (options.limit && (options.skip || !options.sorter)) return false; + if (options.limit && (options.skip || !options.sort)) return false; // If a fields projection option is given check if it is supported by // minimongo (some operators are not supported). From 7493890bb2855affbd2d7e71e4dd6a490380035e Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 11 Feb 2014 23:27:07 -0800 Subject: [PATCH 06/93] Fix typos --- packages/mongo-livedata/dummy-structure.js | 2 +- packages/mongo-livedata/oplog_observe_driver.js | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/mongo-livedata/dummy-structure.js b/packages/mongo-livedata/dummy-structure.js index e0622a6d80..0b0708c210 100644 --- a/packages/mongo-livedata/dummy-structure.js +++ b/packages/mongo-livedata/dummy-structure.js @@ -24,7 +24,7 @@ DummyStructure.prototype.minElementId = function () { var self = this; var minElementId = null; self.idMap.forEach(function (value, key) { - if (minElement === null) + if (minElementId === null) minElementId = key; else if (self.comparator(value, self.idMap.get(minElementId)) < 0) minElementId = key; diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index d0679fff58..7a1db5ae59 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -30,7 +30,7 @@ OplogObserveDriver = function (options) { // XXX replace with doubly-heaps and shit once we get these working var sorter = new Minimongo.Sorter(options.cursorDescription.options.sort); var comparator = sorter.getComparator(); - self._limit = self._cursorDescription.limit; + self._limit = self._cursorDescription.options.limit; self._comparator = comparator; self._unpublishedBuffer = new DummyStructure(comparator); // We need something that can find Max value in addition to IdMap interface @@ -144,7 +144,7 @@ _.extend(OplogObserveDriver.prototype, { " documents are overflowing the set"); } - var overflowingDocId = self._published.getMaximumId(); + var overflowingDocId = self._published.maxElementId(); var overflowingDoc = self._published.get(overflowingDocId); self._published.remove(overflowingDocId); self._multiplexer.removed(overflowingDocId); @@ -181,9 +181,9 @@ _.extend(OplogObserveDriver.prototype, { }, _addBuffered: function (id, doc) { var self = this; - self._unpublishedBuffer.add(id, self._sharedProjectionFn(fields)); + self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc)); if (self._unpublishedBuffer.size() > self._limit) - self._unpublishedBuffer.remove(self._unpublishedBuffer.getMaximumId()); + self._unpublishedBuffer.remove(self._unpublishedBuffer.maxElementId()); }, _removeBuffered: function (id) { var self = this; @@ -206,8 +206,8 @@ _.extend(OplogObserveDriver.prototype, { var limit = self._limit; var comparator = self._comparator; - var maxPublished = (limit && self._published.size() > 0) ? self._published.get(self._published.getMaximumId()) : null; - var maxBuffered = (limit && self._unpublishedBuffer.size() > 0) ? self._unpublishedBuffer.get(self._unpublishedBuffer.getMaximumId()) : null; + var maxPublished = (limit && self._published.size() > 0) ? self._published.get(self._published.maxElementId()) : null; + var maxBuffered = (limit && self._unpublishedBuffer.size() > 0) ? self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()) : null; // The query is unlimited or didn't publish enough documents yet or the new // document would fit into published set pushing the maximum element out, // then we need to publish the doc. @@ -272,7 +272,7 @@ _.extend(OplogObserveDriver.prototype, { // w/o querying mongo, so just remove it from buffer self._removeBuffered(id); // but it can move into published now, check it - var maxPublished = self._published.get(self._published.getMaximumId()); + var maxPublished = self._published.get(self._published.maxElementId()); if (comparator(newDoc, maxPublished) === -1) self._addPublished(id, newDoc); } From 4e095325cd38c074efc5a1ca1fb6d4e5720bdc70 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 12 Feb 2014 00:13:20 -0800 Subject: [PATCH 07/93] Typo + always use inequalities with C-style comparators --- packages/mongo-livedata/oplog_observe_driver.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 7a1db5ae59..2ee1ea5f1b 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -158,7 +158,7 @@ _.extend(OplogObserveDriver.prototype, { if (! self._limit) return; // xcxc size on heaps should be cached to O(1) - if (self._published.size() < self._cursorDescription.limit) { + if (self._published.size() < self._limit) { // The unpublished buffer is empty iff published contains the whole // matching set, i.e. number of matching documents is less or equal to the // queries limit. @@ -212,9 +212,9 @@ _.extend(OplogObserveDriver.prototype, { // document would fit into published set pushing the maximum element out, // then we need to publish the doc. // Otherwise we might need to buffer it (only in case of limited query). - if (!limit || self._published.size() < limit || comparator(maxPublished, fields) === 1) { + if (!limit || self._published.size() < limit || comparator(maxPublished, fields) > 0) { self._addPublished(id, fields); - } else if (self._unpublishedBuffer.size() < limit || comparator(maxBuffered, fields) === 1) { + } else if (self._unpublishedBuffer.size() < limit || comparator(maxBuffered, fields) > 0) { self._addBuffered(id, fields); } }, @@ -273,7 +273,7 @@ _.extend(OplogObserveDriver.prototype, { self._removeBuffered(id); // but it can move into published now, check it var maxPublished = self._published.get(self._published.maxElementId()); - if (comparator(newDoc, maxPublished) === -1) + if (comparator(newDoc, maxPublished) < 0) self._addPublished(id, newDoc); } } From 690f7c485bce0e1686e3fb5bff71c0caa0d24be1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 12 Feb 2014 00:14:16 -0800 Subject: [PATCH 08/93] A simple test for limits and it does pass! --- .../mongo-livedata/mongo_livedata_tests.js | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 032e0dbaa7..c6100a41d5 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -745,6 +745,83 @@ if (Meteor.isServer) { }); x++; }); + + Tinytest.addAsync("mongo-livedata - observe sorted, limited " + idGeneration, function (test, onComplete) { + var run = test.runId(); + var coll = new Meteor.Collection("observeLimit-"+run, collectionOptions); + + var observer = function () { + var output = []; + var callbacks = { + changed: function (newDoc) { + output.push({changed: newDoc._id}); + }, + added: function (newDoc) { + output.push({added: newDoc._id}); + }, + removed: function (oldDoc) { + output.push({removed: oldDoc._id}); + } + }; + var handle = coll.find({foo: 22}, + {sort: {bar: 1}, limit: 3}).observe(callbacks); + + return {output: output, handle: handle}; + }; + + var ins = function (doc) { + var id; + runInFence(function () { + id = coll.insert(doc); }); + return id; + }; + + var rem = function (sel) { runInFence(function () { coll.remove(sel); }); }; + + // Insert a doc and start observing. + var docId1 = ins({foo: 22, bar: 5}); + var o = observer(); + // Initial add. + test.length(o.output, 1); + test.equal(o.output.shift(), {added: docId1}); + + // Insert another doc (blocking until observes have fired). + var docId2 = ins({foo: 22, bar: 6}); + // Observed add. + test.length(o.output, 1); + test.equal(o.output.shift(), {added: docId2}); + + var docId3 = ins({ foo: 22, bar: 3 }); + test.length(o.output, 1); + test.equal(o.output.shift(), {added: docId3}); + + // Add a non-matching document + ins({ foo: 13 }); + // It shouldn't be added + test.length(o.output, 0); + + // Add something that matches but is too big to fit in + var docId4 = ins({ foo: 22, bar: 7 }); + // It shouldn't be added + test.length(o.output, 0); + + // Let's add something small enough to fit in + var docId5 = ins({ foo: 22, bar: -1 }); + // We should get an added and a removed events + test.length(o.output, 2); + test.equal(o.output.shift(), {added: docId5}); + // doc 2 was removed from the published set as it is too big to be in + test.equal(o.output.shift(), {removed: docId2}); + + // Now remove something and that doc 2 should be right back + rem(docId5); + test.length(o.output, 2); + test.equal(o.output.shift(), {removed: docId5}); + test.equal(o.output.shift(), {added: docId2}); + + o.handle.stop(); + onComplete(); + }); } From cfadc51b91c9682d904a363b56dc314dc5282a4e Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 12 Feb 2014 01:44:51 -0800 Subject: [PATCH 09/93] More tests on updates. Clarifying comment --- .../mongo-livedata/mongo_livedata_tests.js | 55 +++++++++++++++++-- .../mongo-livedata/oplog_observe_driver.js | 4 ++ 2 files changed, 55 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index c6100a41d5..12618963fa 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -770,13 +770,21 @@ if (Meteor.isServer) { }; var ins = function (doc) { - var id; - runInFence(function () { - id = coll.insert(doc); }); + var id; runInFence(function () { id = coll.insert(doc); }); return id; }; - var rem = function (sel) { runInFence(function () { coll.remove(sel); }); }; + var upd = function (sel, mod, opt) { + runInFence(function () { + coll.update(sel, mod, opt); + }); + }; + // compares arrays a and b w/o looking at order + var setsEqual = function (a, b) { + a = _.map(a, EJSON.stringify); + b = _.map(b, EJSON.stringify); + return _.isEmpty(_.difference(a, b)); + }; // Insert a doc and start observing. var docId1 = ins({foo: 22, bar: 5}); @@ -819,6 +827,45 @@ if (Meteor.isServer) { test.equal(o.output.shift(), {removed: docId5}); test.equal(o.output.shift(), {added: docId2}); + // Current state is [3 5 6] 7] + // Add some negative numbers overflowing the buffer. + // New documents will take the published place, [3 5 6] will take the buffer + // and 7 will be outside of the buffer in MongoDB. + var docId6 = ins({ foo: 22, bar: -1 }); + var docId7 = ins({ foo: 22, bar: -2 }); + var docId8 = ins({ foo: 22, bar: -3 }); + test.length(o.output, 6); + var expected = [{added: docId6}, {removed: docId2}, + {added: docId7}, {removed: docId1}, + {added: docId8}, {removed: docId3}]; + + test.equal(o.output, expected); + o.output.splice(0, 6); + + // Now the state is [-3 -2 -1] 3 5 6] 7 + // If we update first 3 docs (increment them by 20), it would be + // interesting. + upd({ bar: { $lt: 0 }}, { $inc: { bar: 20 } }, { multi: true }); + + // The updated documents can't find their place in published and they can't + // be buffered as we are not aware of the situation outside of the buffer. + // But since our buffer becomes empty, it will be refilled partially with + // updated documents. + test.length(o.output, 6); + expected = [{removed: docId6}, {added: docId4}, + {removed: docId7}, {added: docId1}, + {removed: docId8}, {added: docId2}]; + + // Note: since we are updating multiple things, the order of updates may + // differ from launch to launch. That's why we compare even positions + // (removes) w/o looking at ordering. + test.isTrue(setsEqual([o.output[0], o.output[2], o.output[4]], + [expected[0], expected[2], expected[4]])); + test.equal([o.output[1], o.output[3], o.output[5]], + [expected[1], expected[3], expected[5]]); + // The new arrangment is [3 5 6] 7 17 18] 19 + + o.handle.stop(); onComplete(); }); diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 2ee1ea5f1b..0febd194e3 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -261,6 +261,10 @@ _.extend(OplogObserveDriver.prototype, { // Unordered case where the document stays in published once it matches // or the case when we don't have enough matching docs to publish or the // changed but matching doc will stay in published anyways. + // XXX: We rely on the emptiness of buffer. Be sure to maintain the fact + // that buffer can't be empty if there are matching documents not + // published. Notably, we don't want to schedule repoll and continue + // relying on this property. if (!self._limit || self._unpublishedBuffer.size() === 0 || comparator(newDoc, minBuffered) < 1) { self._changePublished(id, oldDoc, newDoc); } else { From 8be6928b4c74ea29986caa7762157769ebb9b2fb Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 12 Feb 2014 17:34:44 -0800 Subject: [PATCH 10/93] wip test --- packages/mongo-livedata/mongo_livedata_tests.js | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 12618963fa..14ba50208c 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -852,7 +852,7 @@ if (Meteor.isServer) { // But since our buffer becomes empty, it will be refilled partially with // updated documents. test.length(o.output, 6); - expected = [{removed: docId6}, {added: docId4}, + expected = [{removed: docId6}, {added: docId3}, {removed: docId7}, {added: docId1}, {removed: docId8}, {added: docId2}]; @@ -863,8 +863,21 @@ if (Meteor.isServer) { [expected[0], expected[2], expected[4]])); test.equal([o.output[1], o.output[3], o.output[5]], [expected[1], expected[3], expected[5]]); - // The new arrangment is [3 5 6] 7 17 18] 19 + o.output.splice(0, 6); + // The new arrangment is [3 5 6] 7 17 18] 19 + // By ids: [docId3, docId1, docId2] docId4] docId6 docId7 docId8 + // Remove first 4 docs (3, 1, 2, 4) forcing buffer to become empty and + // schedule a repoll. + debugger + rem({ bar: { $lt: 10 } }); + var expectedRemoves = [{removed: docId3}, {removed: docId1}, + {removed: docId2}, {removed: docId4}]; + var expectedAdds = [{added: docId6}, {added: docId7}, {added: docId8}]; + + test.length(o.output, 7); + test.isTrue(setsEqual([o.output[0], o.output[2], o.output[4]])); + test.equal(o.output, expectedRemoves); o.handle.stop(); onComplete(); From 3ac9cf88b74842d965a41932d1108b2f06d3eaa7 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 12 Feb 2014 21:12:33 -0800 Subject: [PATCH 11/93] Fix the test on limit-requery ; keep docs in _published and _unpublishedBuffer consistently w/o _id --- packages/mongo-livedata/mongo_livedata_tests.js | 10 +++++----- packages/mongo-livedata/oplog_observe_driver.js | 2 ++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 14ba50208c..4c55df86c1 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -869,15 +869,15 @@ if (Meteor.isServer) { // By ids: [docId3, docId1, docId2] docId4] docId6 docId7 docId8 // Remove first 4 docs (3, 1, 2, 4) forcing buffer to become empty and // schedule a repoll. - debugger rem({ bar: { $lt: 10 } }); var expectedRemoves = [{removed: docId3}, {removed: docId1}, {removed: docId2}, {removed: docId4}]; - var expectedAdds = [{added: docId6}, {added: docId7}, {added: docId8}]; + var expectedAdds = [{added: docId4}, {added: docId8}, + {added: docId7}, {added: docId6}]; - test.length(o.output, 7); - test.isTrue(setsEqual([o.output[0], o.output[2], o.output[4]])); - test.equal(o.output, expectedRemoves); + test.length(o.output, 8); + test.isTrue(setsEqual([o.output[0], o.output[2], o.output[4]], expectedRemoves)); + test.equal([o.output[1], o.output[3], o.output[5], o.output[7]], expectedAdds); o.handle.stop(); onComplete(); diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 0febd194e3..3407ea702a 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -578,6 +578,7 @@ _.extend(OplogObserveDriver.prototype, { self._removePublished(id); }); + // xcxc this should be sorted? // Now do adds and changes. newResults.forEach(function (doc, id) { // "true" here means to throw if we think this doc doesn't match the @@ -587,6 +588,7 @@ _.extend(OplogObserveDriver.prototype, { // Finally, replace the buffer newBuffer.forEach(function (doc, id) { + delete doc._id; self._addBuffered(id, doc); }); }, From 10d97c70c50e3aa7e73115556fa43cd57eeade42 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 14 Feb 2014 18:58:19 -0800 Subject: [PATCH 12/93] wip fixing the appends to buffer --- .../mongo-livedata/mongo_livedata_tests.js | 37 +++++++++++++++++-- .../mongo-livedata/oplog_observe_driver.js | 27 ++++++++++---- 2 files changed, 53 insertions(+), 11 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 4c55df86c1..143516182d 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -746,6 +746,8 @@ if (Meteor.isServer) { x++; }); + // This test mainly checks the correctness of oplog code dealing with limited + // queries. Compitablity with poll-diff is added as well. Tinytest.addAsync("mongo-livedata - observe sorted, limited " + idGeneration, function (test, onComplete) { var run = test.runId(); var coll = new Meteor.Collection("observeLimit-"+run, collectionOptions); @@ -770,6 +772,7 @@ if (Meteor.isServer) { }; var ins = function (doc) { + if (doc.bar) doc._id = doc.bar.toString(); var id; runInFence(function () { id = coll.insert(doc); }); return id; }; @@ -824,8 +827,8 @@ if (Meteor.isServer) { // Now remove something and that doc 2 should be right back rem(docId5); test.length(o.output, 2); - test.equal(o.output.shift(), {removed: docId5}); - test.equal(o.output.shift(), {added: docId2}); + test.isTrue(setsEqual(o.output, [{removed: docId5}, {added: docId2}])); + o.output.slice(0, 2); // Current state is [3 5 6] 7] // Add some negative numbers overflowing the buffer. @@ -865,7 +868,7 @@ if (Meteor.isServer) { [expected[1], expected[3], expected[5]]); o.output.splice(0, 6); - // The new arrangment is [3 5 6] 7 17 18] 19 + // The new arrangement is [3 5 6] 7 17 18] 19 // By ids: [docId3, docId1, docId2] docId4] docId6 docId7 docId8 // Remove first 4 docs (3, 1, 2, 4) forcing buffer to become empty and // schedule a repoll. @@ -878,6 +881,34 @@ if (Meteor.isServer) { test.length(o.output, 8); test.isTrue(setsEqual([o.output[0], o.output[2], o.output[4]], expectedRemoves)); test.equal([o.output[1], o.output[3], o.output[5], o.output[7]], expectedAdds); + o.output.splice(0, 8); + + // The new arrangement is [17 18 19] or [docId6 docId7 docId8] + var docId9 = ins({ foo: 22, bar: 21 }); + var docId10 = ins({ foo: 22, bar: 31 }); + var docId11 = ins({ foo: 22, bar: 41 }); + var docId12 = ins({ foo: 22, bar: 51 }); + + console.log("----------------------d"); + console.log(o.handle._multiplexer._observeDriver._published.idMap._map); + console.log(o.handle._multiplexer._observeDriver._unpublishedBuffer.idMap._map); + debugger; + upd({ bar: { $lt: 20 } }, { $inc: { bar: 5 } }, { multi: true }); + // Becomes [21 22 23] 24 31 41] 51 + test.length(o.output, 4); + test.equal(o.output.shift(), { removed: docId6 }); + test.equal(o.output.shift(), { added: docId9 }); + test.equal(o.output.shift(), { changed: docId7 }); + test.equal(o.output.shift(), { changed: docId8 }); + console.log(o.handle._multiplexer._observeDriver._published.idMap._map); + console.log(o.handle._multiplexer._observeDriver._unpublishedBuffer.idMap._map); + console.log("----------------------p"); + + rem(docId9); + // Becomes [22 23 24] 31 41] 51 + test.length(o.output, 2); + test.equal(o.output.shift(), { removed: docId9 }); + test.equal(o.output.shift(), { added: docId6 }); o.handle.stop(); onComplete(); diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 3407ea702a..3b9a438b9c 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -41,6 +41,7 @@ OplogObserveDriver = function (options) { self._unpublishedBuffer = null; self._published = new LocalCollection._IdMap; } + self._justUpdatedBuffer = false; self._stopped = false; self._stopHandles = []; @@ -182,8 +183,10 @@ _.extend(OplogObserveDriver.prototype, { _addBuffered: function (id, doc) { var self = this; self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc)); - if (self._unpublishedBuffer.size() > self._limit) + if (self._unpublishedBuffer.size() > self._limit) { self._unpublishedBuffer.remove(self._unpublishedBuffer.maxElementId()); + self._justUpdatedBuffer = false; + } }, _removeBuffered: function (id) { var self = this; @@ -212,9 +215,12 @@ _.extend(OplogObserveDriver.prototype, { // document would fit into published set pushing the maximum element out, // then we need to publish the doc. // Otherwise we might need to buffer it (only in case of limited query). + // Buffering a new document is allowed only if it is inserted in the middle + // or the beginning of it as we cannot determine if there are documents + // outside of the buffer easily. if (!limit || self._published.size() < limit || comparator(maxPublished, fields) > 0) { self._addPublished(id, fields); - } else if (self._unpublishedBuffer.size() < limit || comparator(maxBuffered, fields) > 0) { + } else if (self._justUpdatedBuffer || (maxBuffered && comparator(maxBuffered, fields) > 0)) { self._addBuffered(id, fields); } }, @@ -420,11 +426,15 @@ _.extend(OplogObserveDriver.prototype, { // XXX needs more thought on non-zero skip // XXX "2" here is a "magic number" var initialCursor = self._cursorForQuery({ limit: self._limit * 2 }); - initialCursor.forEach(function (initialDoc) { - // self._addMatching knows how to correctly separate documents into the - // published set and the buffer. - self._addMatching(initialDoc); + initialCursor.forEach(function (initialDoc, i) { + var id = initialDoc._id; + delete initialDoc._id; + if (!self._limit || i < self._limit) + self._addPublished(id, initialDoc); + else + self._addBuffered(id, initialDoc); }); + self._justUpdatedBuffer = true; if (self._stopped) throw new Error("oplog stopped quite early"); // Allow observeChanges calls to return. (After this, it's possible for @@ -477,7 +487,6 @@ _.extend(OplogObserveDriver.prototype, { }); self._publishNewResults(newResults, newBuffer); - self._doneQuerying(); }); }, @@ -578,8 +587,9 @@ _.extend(OplogObserveDriver.prototype, { self._removePublished(id); }); - // xcxc this should be sorted? // Now do adds and changes. + // If self has a buffer and limit, the new fetched result will be + // ordered correctly as the query has sort specifier. newResults.forEach(function (doc, id) { // "true" here means to throw if we think this doc doesn't match the // selector. @@ -591,6 +601,7 @@ _.extend(OplogObserveDriver.prototype, { delete doc._id; self._addBuffered(id, doc); }); + self._justUpdatedBuffer = true; }, // This stop function is invoked from the onStop of the ObserveMultiplexer, so From 92d38af16dc4d199f95f8e9201be32eeb60b38ee Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Sat, 15 Feb 2014 01:32:05 -0800 Subject: [PATCH 13/93] wip changed published document can go into buffer --- packages/mongo-livedata/oplog_observe_driver.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 3b9a438b9c..02f0909165 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -276,6 +276,12 @@ _.extend(OplogObserveDriver.prototype, { } else { // after the change doc doesn't stay in the published, remove it self._removePublished(id); + // but it can move into buffered now, check it + var maxBuffered = self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()); + if (self._justUpdatedBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) + self._addPublished(id, newDoc); + else + self._justUpdatedBuffer = false; } } else if (bufferedBefore) { // after the change we can't know if doc is still in the buffer limit From 1071b37ed445225c19f9ff96dac7f95e8632fbd5 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 13:47:26 -0800 Subject: [PATCH 14/93] Fix tests for oplog --- .../mongo-livedata/mongo_livedata_tests.js | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 143516182d..c0c1fe6dc6 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -770,6 +770,7 @@ if (Meteor.isServer) { return {output: output, handle: handle}; }; + var clearOutput = function (o) { o.output.splice(0, o.output.length); }; var ins = function (doc) { if (doc.bar) doc._id = doc.bar.toString(); @@ -828,7 +829,7 @@ if (Meteor.isServer) { rem(docId5); test.length(o.output, 2); test.isTrue(setsEqual(o.output, [{removed: docId5}, {added: docId2}])); - o.output.slice(0, 2); + clearOutput(o); // Current state is [3 5 6] 7] // Add some negative numbers overflowing the buffer. @@ -843,7 +844,7 @@ if (Meteor.isServer) { {added: docId8}, {removed: docId3}]; test.equal(o.output, expected); - o.output.splice(0, 6); + clearOutput(o); // Now the state is [-3 -2 -1] 3 5 6] 7 // If we update first 3 docs (increment them by 20), it would be @@ -866,7 +867,7 @@ if (Meteor.isServer) { [expected[0], expected[2], expected[4]])); test.equal([o.output[1], o.output[3], o.output[5]], [expected[1], expected[3], expected[5]]); - o.output.splice(0, 6); + clearOutput(o); // The new arrangement is [3 5 6] 7 17 18] 19 // By ids: [docId3, docId1, docId2] docId4] docId6 docId7 docId8 @@ -881,7 +882,7 @@ if (Meteor.isServer) { test.length(o.output, 8); test.isTrue(setsEqual([o.output[0], o.output[2], o.output[4]], expectedRemoves)); test.equal([o.output[1], o.output[3], o.output[5], o.output[7]], expectedAdds); - o.output.splice(0, 8); + clearOutput(o); // The new arrangement is [17 18 19] or [docId6 docId7 docId8] var docId9 = ins({ foo: 22, bar: 21 }); @@ -889,10 +890,7 @@ if (Meteor.isServer) { var docId11 = ins({ foo: 22, bar: 41 }); var docId12 = ins({ foo: 22, bar: 51 }); - console.log("----------------------d"); - console.log(o.handle._multiplexer._observeDriver._published.idMap._map); - console.log(o.handle._multiplexer._observeDriver._unpublishedBuffer.idMap._map); - debugger; + test.length(o.output, 0); upd({ bar: { $lt: 20 } }, { $inc: { bar: 5 } }, { multi: true }); // Becomes [21 22 23] 24 31 41] 51 test.length(o.output, 4); @@ -900,15 +898,14 @@ if (Meteor.isServer) { test.equal(o.output.shift(), { added: docId9 }); test.equal(o.output.shift(), { changed: docId7 }); test.equal(o.output.shift(), { changed: docId8 }); - console.log(o.handle._multiplexer._observeDriver._published.idMap._map); - console.log(o.handle._multiplexer._observeDriver._unpublishedBuffer.idMap._map); - console.log("----------------------p"); + clearOutput(o); rem(docId9); // Becomes [22 23 24] 31 41] 51 test.length(o.output, 2); test.equal(o.output.shift(), { removed: docId9 }); test.equal(o.output.shift(), { added: docId6 }); + clearOutput(o); o.handle.stop(); onComplete(); From 5b0663993abf2eac076ba162f3db75c0721f4bf1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 13:47:38 -0800 Subject: [PATCH 15/93] Fix typo: addPublished -> addBuffered; Revert the population of initialQuerying back to _addMatching everything. --- packages/mongo-livedata/oplog_observe_driver.js | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 02f0909165..ac33779eae 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -279,7 +279,7 @@ _.extend(OplogObserveDriver.prototype, { // but it can move into buffered now, check it var maxBuffered = self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()); if (self._justUpdatedBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) - self._addPublished(id, newDoc); + self._addBuffered(id, newDoc); else self._justUpdatedBuffer = false; } @@ -432,13 +432,8 @@ _.extend(OplogObserveDriver.prototype, { // XXX needs more thought on non-zero skip // XXX "2" here is a "magic number" var initialCursor = self._cursorForQuery({ limit: self._limit * 2 }); - initialCursor.forEach(function (initialDoc, i) { - var id = initialDoc._id; - delete initialDoc._id; - if (!self._limit || i < self._limit) - self._addPublished(id, initialDoc); - else - self._addBuffered(id, initialDoc); + initialCursor.forEach(function (initialDoc) { + self._addMatching(initialDoc); }); self._justUpdatedBuffer = true; if (self._stopped) From 8b931e64d6dbe811e4cc48472625b9551e91d216 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 15:00:51 -0800 Subject: [PATCH 16/93] Tests behave correctly in both oplog and non-oplog mode --- .../mongo-livedata/mongo_livedata_tests.js | 55 ++++++++++++------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index c0c1fe6dc6..46abc8e78b 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -856,17 +856,19 @@ if (Meteor.isServer) { // But since our buffer becomes empty, it will be refilled partially with // updated documents. test.length(o.output, 6); - expected = [{removed: docId6}, {added: docId3}, - {removed: docId7}, {added: docId1}, - {removed: docId8}, {added: docId2}]; + var expectedRemoves = [{removed: docId6}, + {removed: docId7}, + {removed: docId8}]; + var expectedAdds = [{added: docId3}, + {added: docId1}, + {added: docId2}]; // Note: since we are updating multiple things, the order of updates may // differ from launch to launch. That's why we compare even positions // (removes) w/o looking at ordering. - test.isTrue(setsEqual([o.output[0], o.output[2], o.output[4]], - [expected[0], expected[2], expected[4]])); - test.equal([o.output[1], o.output[3], o.output[5]], - [expected[1], expected[3], expected[5]]); + test.isTrue(setsEqual(_.filter(o.output, function (e) {return e.removed;}), + expectedRemoves)); + test.equal(_.filter(o.output, function (e){return e.added;}), expectedAdds); clearOutput(o); // The new arrangement is [3 5 6] 7 17 18] 19 @@ -874,14 +876,28 @@ if (Meteor.isServer) { // Remove first 4 docs (3, 1, 2, 4) forcing buffer to become empty and // schedule a repoll. rem({ bar: { $lt: 10 } }); - var expectedRemoves = [{removed: docId3}, {removed: docId1}, - {removed: docId2}, {removed: docId4}]; - var expectedAdds = [{added: docId4}, {added: docId8}, - {added: docId7}, {added: docId6}]; - test.length(o.output, 8); - test.isTrue(setsEqual([o.output[0], o.output[2], o.output[4]], expectedRemoves)); - test.equal([o.output[1], o.output[3], o.output[5], o.output[7]], expectedAdds); + // XXX the oplog code analyzes the events one by one: one remove after + // another. Poll-n-diff code, on the other side, analyzes the batch action + // of multiple remove. Because of that difference, expected outputs differ. + if (o.handle._multiplexer._observeDriver._usesOplog) { + var expectedRemoves = [{removed: docId3}, {removed: docId1}, + {removed: docId2}, {removed: docId4}]; + var expectedAdds = [{added: docId4}, {added: docId8}, + {added: docId7}, {added: docId6}]; + + test.length(o.output, 8); + } else { + var expectedRemoves = [{removed: docId3}, {removed: docId1}, + {removed: docId2}]; + var expectedAdds = [{added: docId8}, {added: docId7}, {added: docId6}]; + + test.length(o.output, 6); + } + + test.isTrue(setsEqual(_.filter(o.output, function (e) {return e.removed;}), + expectedRemoves)); + test.equal(_.filter(o.output, function (e) {return e.added;}), expectedAdds); clearOutput(o); // The new arrangement is [17 18 19] or [docId6 docId7 docId8] @@ -894,17 +910,16 @@ if (Meteor.isServer) { upd({ bar: { $lt: 20 } }, { $inc: { bar: 5 } }, { multi: true }); // Becomes [21 22 23] 24 31 41] 51 test.length(o.output, 4); - test.equal(o.output.shift(), { removed: docId6 }); - test.equal(o.output.shift(), { added: docId9 }); - test.equal(o.output.shift(), { changed: docId7 }); - test.equal(o.output.shift(), { changed: docId8 }); + test.isTrue(setsEqual(o.output, [{removed: docId6}, + {added: docId9}, + {changed: docId7}, + {changed: docId8}])); clearOutput(o); rem(docId9); // Becomes [22 23 24] 31 41] 51 test.length(o.output, 2); - test.equal(o.output.shift(), { removed: docId9 }); - test.equal(o.output.shift(), { added: docId6 }); + test.isTrue(setsEqual(o.output, [{removed: docId9}, {added: docId6}])); clearOutput(o); o.handle.stop(); From a5ddd71b1d14c70123875a7e6cb78ab1b07c6cc9 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 15:05:18 -0800 Subject: [PATCH 17/93] Tests on newly supported cursor types --- packages/mongo-livedata/oplog_tests.js | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/oplog_tests.js b/packages/mongo-livedata/oplog_tests.js index a94527f322..5d28c01a44 100644 --- a/packages/mongo-livedata/oplog_tests.js +++ b/packages/mongo-livedata/oplog_tests.js @@ -1,8 +1,8 @@ var OplogCollection = new Meteor.Collection("oplog-" + Random.id()); Tinytest.add("mongo-livedata - oplog - cursorSupported", function (test) { - var supported = function (expected, selector) { - var cursor = OplogCollection.find(selector); + var supported = function (expected, selector, options) { + var cursor = OplogCollection.find(selector, options); var handle = cursor.observeChanges({added: function () {}}); test.equal(!!handle._multiplexer._observeDriver._usesOplog, expected); handle.stop(); @@ -38,4 +38,9 @@ Tinytest.add("mongo-livedata - oplog - cursorSupported", function (test) { // Nothing Minimongo doesn't understand. (Minimongo happens to fail to // implement $elemMatch inside $all which MongoDB supports.) supported(false, {x: {$all: [{$elemMatch: {y: 2}}]}}); + + supported(true, {}, { sort: {x:1} }); + supported(true, {}, { sort: {x:1}, limit: 5 }); + supported(false, {}, { limit: 5 }); + supported(false, {}, { skip: 2, limit: 5 }); }); From c03701ef1c98838a52d6030ea7cf334612dc32dc Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 17:01:10 -0800 Subject: [PATCH 18/93] Remove Sorter class from MinimongoTest as it is already exported in Minimongo symbol --- packages/minimongo/minimongo_tests.js | 8 ++++---- packages/minimongo/sort.js | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 4bb58d2b94..8560139017 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -1549,7 +1549,7 @@ Tinytest.add("minimongo - ordering", function (test) { // document ordering under a sort specification var verify = function (sorts, docs) { _.each(_.isArray(sorts) ? sorts : [sorts], function (sort) { - var sorter = new MinimongoTest.Sorter(sort); + var sorter = new Minimongo.Sorter(sort); assert_ordering(test, sorter.getComparator(), docs); }); }; @@ -1577,15 +1577,15 @@ Tinytest.add("minimongo - ordering", function (test) { [{c: 1}, {a: 1, b: 2}, {a: 1, b: 3}, {a: 2, b: 0}]); test.throws(function () { - new MinimongoTest.Sorter("a"); + new Minimongo.Sorter("a"); }); test.throws(function () { - new MinimongoTest.Sorter(123); + new Minimongo.Sorter(123); }); // No sort spec implies everything equal. - test.equal(new MinimongoTest.Sorter({}).getComparator()({a:1}, {a:2}), 0); + test.equal(new Minimongo.Sorter({}).getComparator()({a:1}, {a:2}), 0); // All sorts of array edge cases! // Increasing sort sorts by the smallest element it finds; 1 < 2. diff --git a/packages/minimongo/sort.js b/packages/minimongo/sort.js index 60c3dd60b2..5a1e57a3db 100644 --- a/packages/minimongo/sort.js +++ b/packages/minimongo/sort.js @@ -111,7 +111,6 @@ Sorter.prototype.getComparator = function (options) { }; Minimongo.Sorter = Sorter; -MinimongoTest.Sorter = Sorter; // Given an array of comparators // (functions (a,b)->(negative or positive or zero)), returns a single From 07a18984b15a581e07ae4efcba01ee8b6dba2a38 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 17:25:01 -0800 Subject: [PATCH 19/93] More comments, explicity, fixed ambiguities --- .../mongo-livedata/oplog_observe_driver.js | 35 +++++++++++++------ 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index ac33779eae..dbfbcbf930 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -20,14 +20,22 @@ OplogObserveDriver = function (options) { self._mongoHandle = options.mongoHandle; self._multiplexer = options.multiplexer; + if (options.ordered) { + throw Error("OplogObserveDriver only supports unordered observeChanges"); + } + if (options.cursorDescription.options.limit) { // There are several properties ordered driver implements: // - _limit is a positive number // - _comparator is a function-comparator by which the query is ordered - // - _unpublishedBuffer is non-null collection + // - _unpublishedBuffer is non-null collection, + // the empty buffer in STEADY phase implies that the + // everything that matches the queries selector fits + // into published set. // - _published implements maxElementId method in addition to IdMap methods - // XXX replace with doubly-heaps and shit once we get these working + // We don't support $near and other geo-queries so it's OK to initialize the + // comparator only once in the constructor. var sorter = new Minimongo.Sorter(options.cursorDescription.options.sort); var comparator = sorter.getComparator(); self._limit = self._cursorDescription.options.limit; @@ -191,6 +199,9 @@ _.extend(OplogObserveDriver.prototype, { _removeBuffered: function (id) { var self = this; self._unpublishedBuffer.remove(id); + // To keep the contract "buffer is never empty in STEADY phase unless the + // everything matching fits into published" true, we poll everything as soon + // as we see the buffer becoming empty. if (! self._unpublishedBuffer.size()) self._needToPollQuery(); }, @@ -256,7 +267,7 @@ _.extend(OplogObserveDriver.prototype, { self._addMatching(newDoc); } else if (cachedBefore && !matchesNow) { self._removeMatching(id); - } else if (matchesNow) { + } else if (cachedBefore && matchesNow) { delete newDoc._id; var oldDoc = self._published.get(id); var comparator = self._comparator; @@ -264,14 +275,14 @@ _.extend(OplogObserveDriver.prototype, { self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId()); if (publishedBefore) { - // Unordered case where the document stays in published once it matches + // Unlimited case where the document stays in published once it matches // or the case when we don't have enough matching docs to publish or the // changed but matching doc will stay in published anyways. // XXX: We rely on the emptiness of buffer. Be sure to maintain the fact // that buffer can't be empty if there are matching documents not // published. Notably, we don't want to schedule repoll and continue // relying on this property. - if (!self._limit || self._unpublishedBuffer.size() === 0 || comparator(newDoc, minBuffered) < 1) { + if (!self._limit || self._unpublishedBuffer.size() === 0 || comparator(newDoc, minBuffered) <= 0) { self._changePublished(id, oldDoc, newDoc); } else { // after the change doc doesn't stay in the published, remove it @@ -291,6 +302,8 @@ _.extend(OplogObserveDriver.prototype, { var maxPublished = self._published.get(self._published.maxElementId()); if (comparator(newDoc, maxPublished) < 0) self._addPublished(id, newDoc); + } else { + throw new Error("cachedBefore implies either of publishedBefore or bufferedBefore is true."); } } }, @@ -571,7 +584,7 @@ _.extend(OplogObserveDriver.prototype, { _publishNewResults: function (newResults, newBuffer) { var self = this; - // If the query is ordered and there is a buffer, shut down so it doesn't + // If the query is limited and there is a buffer, shut down so it doesn't // stay in a way. if (self._limit) { self._unpublishedBuffer.clear(); @@ -590,7 +603,7 @@ _.extend(OplogObserveDriver.prototype, { // Now do adds and changes. // If self has a buffer and limit, the new fetched result will be - // ordered correctly as the query has sort specifier. + // limited correctly as the query has sort specifier. newResults.forEach(function (doc, id) { // "true" here means to throw if we think this doc doesn't match the // selector. @@ -667,8 +680,8 @@ OplogObserveDriver.cursorSupported = function (cursorDescription, matcher) { // This option (which are mostly used for sorted cursors) require us to figure // out where a given document fits in an order to know if it's included or - // not, and we don't track that information when doing oplog tailing. - if (options.limit && (options.skip || !options.sort)) return false; + // not. We do it only if skip is not defined or 0. + if (options.skip || (options.limit && !options.sort)) return false; // If a fields projection option is given check if it is supported by // minimongo (some operators are not supported). @@ -688,7 +701,9 @@ OplogObserveDriver.cursorSupported = function (cursorDescription, matcher) { // as Mongo, and can yield!) // - $near (has "interesting" properties in MongoDB, like the possibility // of returning an ID multiple times, though even polling maybe - // have a bug there + // have a bug there) + // XXX: once we support it, we would need to think more on how we + // initialize the comparators when we create the driver. return !matcher.hasWhere() && !matcher.hasGeoQuery(); }; From f336c61eda75174e75879af2132c62071b64d8aa Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 17:30:46 -0800 Subject: [PATCH 20/93] More self-checks with throws (asserts) --- packages/mongo-livedata/oplog_observe_driver.js | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index dbfbcbf930..8edffca8e5 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -147,14 +147,18 @@ _.extend(OplogObserveDriver.prototype, { if (self._limit && self._published.size() > self._limit) { // XXX in theory the size of published is no more than limit+1 if (self._published.size() !== self._limit + 1) { - // xcxc better error message throw new Error("After adding to published, " + - (self._limit - self._published.size()) + + (self._published.size() - self._limit) + " documents are overflowing the set"); } var overflowingDocId = self._published.maxElementId(); var overflowingDoc = self._published.get(overflowingDocId); + + if (_.isEqual(overflowingDocId, id)) { + throw new Error("The document just added is overflowing the published set"); + } + self._published.remove(overflowingDocId); self._multiplexer.removed(overflowingDocId); self._addBuffered(overflowingDocId, overflowingDoc); @@ -192,7 +196,13 @@ _.extend(OplogObserveDriver.prototype, { var self = this; self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc)); if (self._unpublishedBuffer.size() > self._limit) { - self._unpublishedBuffer.remove(self._unpublishedBuffer.maxElementId()); + var maxBufferedId = self._unpublishedBuffer.maxElementId(); + + if (_.isEqual(maxBufferedId, id)) { + throw new Error("The document just added to buffer is overflowing the buffer"); + } + + self._unpublishedBuffer.remove(maxBufferedId); self._justUpdatedBuffer = false; } }, From e4eb3e3c75d525d8e2d878f97b1b5c87f303dc28 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 17:49:39 -0800 Subject: [PATCH 21/93] Never put something into buffer if it will drop out in a moment after --- packages/mongo-livedata/oplog_observe_driver.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 8edffca8e5..ea0fa7fd5a 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -241,7 +241,7 @@ _.extend(OplogObserveDriver.prototype, { // outside of the buffer easily. if (!limit || self._published.size() < limit || comparator(maxPublished, fields) > 0) { self._addPublished(id, fields); - } else if (self._justUpdatedBuffer || (maxBuffered && comparator(maxBuffered, fields) > 0)) { + } else if ((self._justUpdatedBuffer && self._unpublishedBuffer.size() < limit) || (maxBuffered && comparator(maxBuffered, fields) > 0)) { self._addBuffered(id, fields); } }, From 8cad412ea199242943b943d0fc9dd72d1b9e778d Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 15:50:50 -0800 Subject: [PATCH 22/93] init commit for BinaryHeap package Nothing here yet --- packages/binary-heap/binary-heap-tests.js | 0 packages/binary-heap/binary-heap.js | 40 +++++++++++++++++++++++ packages/binary-heap/package.js | 17 ++++++++++ 3 files changed, 57 insertions(+) create mode 100644 packages/binary-heap/binary-heap-tests.js create mode 100644 packages/binary-heap/binary-heap.js create mode 100644 packages/binary-heap/package.js diff --git a/packages/binary-heap/binary-heap-tests.js b/packages/binary-heap/binary-heap-tests.js new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/binary-heap/binary-heap.js b/packages/binary-heap/binary-heap.js new file mode 100644 index 0000000000..0d112965b5 --- /dev/null +++ b/packages/binary-heap/binary-heap.js @@ -0,0 +1,40 @@ +BinaryHeap = function (comparator, initData) { + if (! _.isFunction(comparator)) + throw new Error('Passed comparator is invalid, should be a comparison function'); + var self = this; + self._comparator = comparator; + self._map = {}; + self._heap = []; + + if (_.isArray(initData)) + self._initFromData(initData); +}; + +var idStringify, idParse; +if (Package.minimongo) { + idStringify = Package.minimongo.LocalCollection._idStringify; + idParse = Package.minimongo.LocalCollection._idParse; +} else { + // XXX: These can't deal with special strings like '__proto__' + // XXX: or '{ looksLike: "object" }' or numbers. + idStringify = function (id) { return JSON.stringify(id); }; + idParse = function (id) { return JSON.parse(id); } +} + +_.extend(BinaryHeap.prototype, { + _initFromData: function (data) {}, + + get: function (id) {}, + set: function (id, value) {}, + remove: function (id) {}, + has: function (id) {}, + empty: function (id) {}, + clear: function () {}, + forEach: function (iterator) {}, + size: function () {}, + setDefault: function () {}, + clone: function () {}, + + maxElementId: function () {} +}); + diff --git a/packages/binary-heap/package.js b/packages/binary-heap/package.js new file mode 100644 index 0000000000..fab25f5e1e --- /dev/null +++ b/packages/binary-heap/package.js @@ -0,0 +1,17 @@ +Package.describe({ + summary: "Binary Heap datastructure implementation", + internal: true +}); + +Package.on_use(function (api) { + api.export('BinaryHeap'); + api.use(['underscore']); + api.use(['minimongo'], { weak: true }); + api.add_files('binary-heap.js'); +}); + +Package.on_test(function (api) { + api.use('tinytest'); + api.add_files('binary-heap-tests.js'); +}); + From 7f2c6bb09b75e6e943fc5bfabd523766627529c2 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 20:47:41 -0800 Subject: [PATCH 23/93] The implementation of Heap --- packages/binary-heap/binary-heap.js | 172 +++++++++++++++++++++++++--- 1 file changed, 156 insertions(+), 16 deletions(-) diff --git a/packages/binary-heap/binary-heap.js b/packages/binary-heap/binary-heap.js index 0d112965b5..9ad1731c2d 100644 --- a/packages/binary-heap/binary-heap.js +++ b/packages/binary-heap/binary-heap.js @@ -1,40 +1,180 @@ +// Constructor of Heap +// - comparator - Function - given two items returns a number +// - initData - Array - the initial data in a format: +// Object: +// - id - String - unique id of the item +// - value - Any - the data value +// the contents of initData is retained BinaryHeap = function (comparator, initData) { if (! _.isFunction(comparator)) throw new Error('Passed comparator is invalid, should be a comparison function'); var self = this; self._comparator = comparator; - self._map = {}; + self._heapIdx = {}; self._heap = []; if (_.isArray(initData)) self._initFromData(initData); }; -var idStringify, idParse; +var idStringify; if (Package.minimongo) { idStringify = Package.minimongo.LocalCollection._idStringify; - idParse = Package.minimongo.LocalCollection._idParse; } else { // XXX: These can't deal with special strings like '__proto__' // XXX: or '{ looksLike: "object" }' or numbers. idStringify = function (id) { return JSON.stringify(id); }; - idParse = function (id) { return JSON.parse(id); } } _.extend(BinaryHeap.prototype, { - _initFromData: function (data) {}, + _initFromData: function (data) { + var self = this; - get: function (id) {}, - set: function (id, value) {}, - remove: function (id) {}, - has: function (id) {}, - empty: function (id) {}, - clear: function () {}, - forEach: function (iterator) {}, - size: function () {}, - setDefault: function () {}, - clone: function () {}, + self._heap = _.clone(data); - maxElementId: function () {} + _.each(data, function (o, i) { + self._heapIdx[idStringify(o.id)] = i; + }); + + for (var i = parentIdx(data.length); i >= 0; i--) + self._heapify(i); + }, + + _heapify: function (idx) { + var self = this; + + while (leftChildIdx(idx) < self.size()) { + var left = leftChildIdx(idx); + var right = rightChildIdx(idx); + var largest = idx; + + if (left < self.size() && + self._comparator(self._get(left), self._get(largest)) > 0) { + largest = left; + } + if (right < self.size() && + self._comparator(self._get(right), self._get(largest)) > 0) { + largest = right; + } + + if (largest === idx) + break; + + self._swap(largest, idx); + idx = largest; + } + }, + + _upHeap: function (idx) { + var self = this; + var value = self._get(idx); + var ancestor = idx; + + while (ancestor > 0) { + var parent = parentIdx(ancestor); + if (self._comparator(self._get(parent), value) < 0) + ancestor = parent; + else + break; + } + + if (ancestor !== idx) + self._swap(ancestor, idx); + }, + + // Internal: gets raw data object placed on idxth place in heap + _get: function (idx) { + var self = this; + return self._heap[idx].value; + }, + + _swap: function (idxA, idxB) { + var self = this; + var A = self._heap[idxA]; + var B = self._heap[idxB]; + + self._heapIdx[idStringify(A.id)] = idxB; + self._heapIdx[idStringify(B.id)] = idxA; + + self._heap[idxA] = B; + self._heap[idxB] = A; + }, + + get: function (id) { + var self = this; + return self._get(self._heapIdx[idStringify(id)]); + }, + set: function (id, value) { + var self = this; + + if (self.has(id)) { + if (self.get(id) === value) + return; + else + self.remove(id); + } + + self._heapIdx[idStringify(id)] = self._heap.length; + self._heap.push({ id: id, value: value }); + self._upHeap(self._heap.length - 1); + }, + remove: function (id) { + var self = this; + var strId = idStringify(id); + + if (! self.has(id)) { + var last = self._heap.length - 1; + var idx = self._heapIdx[strId]; + self._swap(idx, last); + self._heap.pop(); + self._heapify(idx); + delete self._heapIdx[strId]; + } + }, + has: function (id) { + var self = this; + return self._heapIdx[idStringify(id)] !== undefined; + }, + empty: function (id) { + var self = this; + return !self.size(); + }, + clear: function () { + var self = this; + self._heap = []; + self._heapIdx = {}; + }, + forEach: function (iterator) { + var self = this; + _.each(self._heap, function (obj) { + return iterator(obj.value, obj.id); + }); + }, + size: function () { + var self = this; + return self._heap.length; + }, + setDefault: function (id, def) { + var self = this; + if (self.has(id)) + return self.get(id); + self.set(id, def); + return def; + }, + clone: function () { + var self = this; + var clone = new BinaryHeap(self._comparator); + clone._heap = EJSON.clone(self._heap); + clone._heapIdx = EJSON.clone(self._heapIdx); + }, + + maxElementId: function () { + var self = this; + return self.size() ? self._heap[0].id : null; + } }); +function leftChildIdx (i) { return i * 2 + 1; } +function rightChildIdx (i) { return i * i + 2; } +function parentIdx (i) { return (i - 1) >> 1; } + From 678d607332622ca28fd0449e5e320b3cdd974fb1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 21:18:03 -0800 Subject: [PATCH 24/93] Fix remove and get of non-existent id --- packages/binary-heap/binary-heap.js | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/packages/binary-heap/binary-heap.js b/packages/binary-heap/binary-heap.js index 9ad1731c2d..1792cce554 100644 --- a/packages/binary-heap/binary-heap.js +++ b/packages/binary-heap/binary-heap.js @@ -102,6 +102,8 @@ _.extend(BinaryHeap.prototype, { get: function (id) { var self = this; + if (! self.has(id)) + return null; return self._get(self._heapIdx[idStringify(id)]); }, set: function (id, value) { @@ -122,12 +124,18 @@ _.extend(BinaryHeap.prototype, { var self = this; var strId = idStringify(id); - if (! self.has(id)) { + if (self.has(id)) { var last = self._heap.length - 1; var idx = self._heapIdx[strId]; - self._swap(idx, last); - self._heap.pop(); - self._heapify(idx); + + if (idx !== last) { + self._swap(idx, last); + self._heap.pop(); + self._heapify(idx); + } else { + self._heap.pop(); + } + delete self._heapIdx[strId]; } }, From 70a4d6cba1a256b773cb788e9f9c537b13ff3a9f Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 21:18:29 -0800 Subject: [PATCH 25/93] Tests for BinaryHeap --- packages/binary-heap/binary-heap-tests.js | 30 +++++++++++++++++++++++ packages/binary-heap/package.js | 1 + 2 files changed, 31 insertions(+) diff --git a/packages/binary-heap/binary-heap-tests.js b/packages/binary-heap/binary-heap-tests.js index e69de29bb2..16e4232156 100644 --- a/packages/binary-heap/binary-heap-tests.js +++ b/packages/binary-heap/binary-heap-tests.js @@ -0,0 +1,30 @@ +Tinytest.add("binary-heap - simple heap tests", function (test) { + var h = new BinaryHeap(function (a, b) { return a-b; }); + h.set("a", 1); + h.set("b", 233); + h.set("c", -122); + h.set("d", 0); + h.set("e", 0); + + test.equal(h.size(), 5); + test.equal(h.maxElementId(), "b"); + test.equal(h.get("b"), 233); + + h.remove("b"); + test.equal(h.size(), 4); + test.equal(h.maxElementId(), "a"); + h.set("e", 44); + test.equal(h.maxElementId(), "e"); + test.equal(h.get("b"), null); + test.isTrue(h.has("a")); + test.isFalse(h.has("dd")); + + h.clear(); + test.isFalse(h.has("a")); + test.equal(h.size(), 0); + test.equal(h.setDefault("a", 12345), 12345); + test.equal(h.setDefault("a", 55555), 12345); + test.equal(h.size(), 1); + test.equal(h.maxElementId(), "a"); +}); + diff --git a/packages/binary-heap/package.js b/packages/binary-heap/package.js index fab25f5e1e..b832d4e524 100644 --- a/packages/binary-heap/package.js +++ b/packages/binary-heap/package.js @@ -12,6 +12,7 @@ Package.on_use(function (api) { Package.on_test(function (api) { api.use('tinytest'); + api.use('binary-heap'); api.add_files('binary-heap-tests.js'); }); From 3c4af7cd8f792b1ef8702c1d18f2cc62236f14b8 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 17 Feb 2014 21:19:13 -0800 Subject: [PATCH 26/93] Rename heapify to downHeap to keep it consistent with upHeap Most text-books use "heapify" but in one text-book I remember the use of "downHeap", so I didn't make it up. --- packages/binary-heap/binary-heap.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/binary-heap/binary-heap.js b/packages/binary-heap/binary-heap.js index 1792cce554..993d598186 100644 --- a/packages/binary-heap/binary-heap.js +++ b/packages/binary-heap/binary-heap.js @@ -37,10 +37,10 @@ _.extend(BinaryHeap.prototype, { }); for (var i = parentIdx(data.length); i >= 0; i--) - self._heapify(i); + self._downHeap(i); }, - _heapify: function (idx) { + _downHeap: function (idx) { var self = this; while (leftChildIdx(idx) < self.size()) { @@ -131,7 +131,7 @@ _.extend(BinaryHeap.prototype, { if (idx !== last) { self._swap(idx, last); self._heap.pop(); - self._heapify(idx); + self._downHeap(idx); } else { self._heap.pop(); } From 4dfb2fee4dfbfa1c43bebd5c01a60b579827ead1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 18 Feb 2014 00:32:23 -0800 Subject: [PATCH 27/93] A step to MaxMinHeap --- packages/binary-heap/.gitignore | 1 + packages/binary-heap/binary-heap-tests.js | 32 ++++++++++++++++ packages/binary-heap/binary-heap.js | 1 + packages/binary-heap/max-min-heap.js | 46 +++++++++++++++++++++++ packages/binary-heap/package.js | 3 +- 5 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 packages/binary-heap/.gitignore create mode 100644 packages/binary-heap/max-min-heap.js diff --git a/packages/binary-heap/.gitignore b/packages/binary-heap/.gitignore new file mode 100644 index 0000000000..677a6fc263 --- /dev/null +++ b/packages/binary-heap/.gitignore @@ -0,0 +1 @@ +.build* diff --git a/packages/binary-heap/binary-heap-tests.js b/packages/binary-heap/binary-heap-tests.js index 16e4232156..aa026e8148 100644 --- a/packages/binary-heap/binary-heap-tests.js +++ b/packages/binary-heap/binary-heap-tests.js @@ -28,3 +28,35 @@ Tinytest.add("binary-heap - simple heap tests", function (test) { test.equal(h.maxElementId(), "a"); }); +Tinytest.add("binary-heap - max-min heap tests", function (test) { + var h = new MaxMinHeap(function (a, b) { return a-b; }); + h.set("a", 1); + h.set("b", 233); + h.set("c", -122); + h.set("d", 0); + h.set("e", 0); + + test.equal(h.size(), 5); + test.equal(h.maxElementId(), "b"); + test.equal(h.get("b"), 233); + test.equal(h.minElementId(), "c"); + + h.remove("b"); + test.equal(h.size(), 4); + test.equal(h.minElementId(), "c"); + h.set("e", -123); + test.equal(h.minElementId(), "e"); + test.equal(h.get("b"), null); + test.isTrue(h.has("a")); + test.isFalse(h.has("dd")); + + h.clear(); + test.isFalse(h.has("a")); + test.equal(h.size(), 0); + test.equal(h.setDefault("a", 12345), 12345); + test.equal(h.setDefault("a", 55555), 12345); + test.equal(h.size(), 1); + test.equal(h.maxElementId(), "a"); + test.equal(h.minElementId(), "a"); +}); + diff --git a/packages/binary-heap/binary-heap.js b/packages/binary-heap/binary-heap.js index 993d598186..8158ad419d 100644 --- a/packages/binary-heap/binary-heap.js +++ b/packages/binary-heap/binary-heap.js @@ -174,6 +174,7 @@ _.extend(BinaryHeap.prototype, { var clone = new BinaryHeap(self._comparator); clone._heap = EJSON.clone(self._heap); clone._heapIdx = EJSON.clone(self._heapIdx); + return clone; }, maxElementId: function () { diff --git a/packages/binary-heap/max-min-heap.js b/packages/binary-heap/max-min-heap.js new file mode 100644 index 0000000000..1b8fe7dc83 --- /dev/null +++ b/packages/binary-heap/max-min-heap.js @@ -0,0 +1,46 @@ +MaxMinHeap = function (comparator, initialData) { + var self = this; + + self.prototype.constructor.call(self, comparator, initialData); + self._minHeap = new BinaryHeap(function (a, b) { + return -comparator(a, b); + }, initialData); +}; + +MaxMinHeap.prototype = Object.create(BinaryHeap); + +_.extend(MaxMinHeap.prototype, { + set: function (id, value) { + var self = this; + self._minHeap.set(id, value); + BinaryHeap.prototype.set.apply(self, arguments); + }, + remove: function (id) { + var self = this; + self._minHeap.remove(id); + BinaryHeap.prototype.remove.apply(self, arguments); + }, + clear: function () { + var self = this; + self._minHeap.clear(id); + BinaryHeap.prototype.clear.apply(self, arguments); + }, + setDefault: function (id, def) { + var self = this; + self._minHeap.setDefault(id, def); + return BinaryHeap.prototype.setDefault.apply(self, arguments); + }, + clone: function () { + var self = this; + var clone = new MaxMinHeap(self._comparator); + clone._heap = EJSON.clone(self._heap); + clone._heapIdx = EJSON.clone(self._heapIdx); + clone._minHeap = self._minHeap.clone(); + return clone; + }, + minElementId: function () { + var self = this; + return self._minHeap.maxElementId(); + } +}); + diff --git a/packages/binary-heap/package.js b/packages/binary-heap/package.js index b832d4e524..9d1504eae3 100644 --- a/packages/binary-heap/package.js +++ b/packages/binary-heap/package.js @@ -5,9 +5,10 @@ Package.describe({ Package.on_use(function (api) { api.export('BinaryHeap'); + api.export('MaxMinHeap'); api.use(['underscore']); api.use(['minimongo'], { weak: true }); - api.add_files('binary-heap.js'); + api.add_files(['binary-heap.js', 'max-min-heap.js']); }); Package.on_test(function (api) { From d59200f9517b4f20a86cb612c1bf4210c5fd56f6 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 18 Feb 2014 14:39:58 -0800 Subject: [PATCH 28/93] Do OOP right --- packages/binary-heap/max-min-heap.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/binary-heap/max-min-heap.js b/packages/binary-heap/max-min-heap.js index 1b8fe7dc83..4221d2f9df 100644 --- a/packages/binary-heap/max-min-heap.js +++ b/packages/binary-heap/max-min-heap.js @@ -1,13 +1,13 @@ MaxMinHeap = function (comparator, initialData) { var self = this; - self.prototype.constructor.call(self, comparator, initialData); + BinaryHeap.call(self, comparator, initialData); self._minHeap = new BinaryHeap(function (a, b) { return -comparator(a, b); }, initialData); }; -MaxMinHeap.prototype = Object.create(BinaryHeap); +MaxMinHeap.prototype = Object.create(BinaryHeap.prototype); _.extend(MaxMinHeap.prototype, { set: function (id, value) { From 4631f2437760af5265a092da5eebd871d74da871 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 18 Feb 2014 15:26:44 -0800 Subject: [PATCH 29/93] Fix _upHeap and fix oop once again --- packages/binary-heap/binary-heap.js | 16 +++++++--------- packages/binary-heap/max-min-heap.js | 10 +++++----- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/packages/binary-heap/binary-heap.js b/packages/binary-heap/binary-heap.js index 8158ad419d..cd35e3f1df 100644 --- a/packages/binary-heap/binary-heap.js +++ b/packages/binary-heap/binary-heap.js @@ -68,18 +68,16 @@ _.extend(BinaryHeap.prototype, { _upHeap: function (idx) { var self = this; var value = self._get(idx); - var ancestor = idx; - while (ancestor > 0) { - var parent = parentIdx(ancestor); - if (self._comparator(self._get(parent), value) < 0) - ancestor = parent; - else + while (idx > 0) { + var parent = parentIdx(idx); + if (self._comparator(self._get(parent), value) < 0) { + self._swap(parent, idx) + idx = parent; + } else { break; + } } - - if (ancestor !== idx) - self._swap(ancestor, idx); }, // Internal: gets raw data object placed on idxth place in heap diff --git a/packages/binary-heap/max-min-heap.js b/packages/binary-heap/max-min-heap.js index 4221d2f9df..4d7af28a03 100644 --- a/packages/binary-heap/max-min-heap.js +++ b/packages/binary-heap/max-min-heap.js @@ -12,23 +12,23 @@ MaxMinHeap.prototype = Object.create(BinaryHeap.prototype); _.extend(MaxMinHeap.prototype, { set: function (id, value) { var self = this; - self._minHeap.set(id, value); BinaryHeap.prototype.set.apply(self, arguments); + self._minHeap.set(id, value); }, remove: function (id) { var self = this; - self._minHeap.remove(id); BinaryHeap.prototype.remove.apply(self, arguments); + self._minHeap.remove(id); }, clear: function () { var self = this; - self._minHeap.clear(id); BinaryHeap.prototype.clear.apply(self, arguments); + self._minHeap.clear(); }, setDefault: function (id, def) { var self = this; - self._minHeap.setDefault(id, def); - return BinaryHeap.prototype.setDefault.apply(self, arguments); + BinaryHeap.prototype.setDefault.apply(self, arguments); + return self._minHeap.setDefault(id, def); }, clone: function () { var self = this; From 3871c92181f549eeb93df36b613c3f331e309f78 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 18 Feb 2014 16:37:31 -0800 Subject: [PATCH 30/93] Don't throw an updated-buffered doc right away. It can go to published or back to buffer. --- .../mongo-livedata/oplog_observe_driver.js | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index ea0fa7fd5a..50fcd10564 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -305,13 +305,22 @@ _.extend(OplogObserveDriver.prototype, { self._justUpdatedBuffer = false; } } else if (bufferedBefore) { - // after the change we can't know if doc is still in the buffer limit - // w/o querying mongo, so just remove it from buffer - self._removeBuffered(id); - // but it can move into published now, check it + oldDoc = self._unpublishedBuffer.get(id); + // remove the old version manually so we don't trigger the querying + // immediately + self._unpublishedBuffer.remove(id); + var maxPublished = self._published.get(self._published.maxElementId()); - if (comparator(newDoc, maxPublished) < 0) + var maxBuffered = self._unpublishedBuffer.size() && self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()); + // the buffered doc was updated, it could move to published + if (comparator(newDoc, maxPublished) < 0) { self._addPublished(id, newDoc); + } else if (self._justUpdatedBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) { + // stays in buffer + self._unpublishedBuffer.set(id, newDoc); + } else { + self._justUpdatedBuffer = false; + } } else { throw new Error("cachedBefore implies either of publishedBefore or bufferedBefore is true."); } From 6144213649c91bf176f7cfd23ca4344a7d9b5276 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 18 Feb 2014 16:38:03 -0800 Subject: [PATCH 31/93] Tests for updated-buffered which stays in buffer or comes to published --- .../mongo-livedata/mongo_livedata_tests.js | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 46abc8e78b..ed318b4a27 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -752,17 +752,21 @@ if (Meteor.isServer) { var run = test.runId(); var coll = new Meteor.Collection("observeLimit-"+run, collectionOptions); + var currentState = {}; var observer = function () { var output = []; var callbacks = { changed: function (newDoc) { output.push({changed: newDoc._id}); + currentState[newDoc._id] = newDoc; }, added: function (newDoc) { output.push({added: newDoc._id}); + currentState[newDoc._id] = newDoc; }, removed: function (oldDoc) { output.push({removed: oldDoc._id}); + delete currentState[oldDoc._id]; } }; var handle = coll.find({foo: 22}, @@ -773,7 +777,6 @@ if (Meteor.isServer) { var clearOutput = function (o) { o.output.splice(0, o.output.length); }; var ins = function (doc) { - if (doc.bar) doc._id = doc.bar.toString(); var id; runInFence(function () { id = coll.insert(doc); }); return id; }; @@ -922,6 +925,27 @@ if (Meteor.isServer) { test.isTrue(setsEqual(o.output, [{removed: docId9}, {added: docId6}])); clearOutput(o); + upd({ bar: { $gt: 25 } }, { $inc: { bar: -7.5 } }, { multi: true }); + // Becomes [22 23 23.5] 24 33.5] 43.5 + test.length(o.output, 2); + test.isTrue(setsEqual(o.output, [{removed: docId6}, {added: docId10}])); + clearOutput(o); + + // Force buffer objects to be moved into published set so we can check them + rem(docId7); + rem(docId8); + rem(docId10); + // Becomes [24 33.5 43.5] + test.length(o.output, 6); + test.isTrue(setsEqual(o.output, [{removed: docId7}, {removed: docId8}, + {removed: docId10}, {added: docId6}, + {added: docId11}, {added: docId12}])); + + test.equal(currentState[docId6], { _id: docId6, foo: 22, bar: 24 }); + test.equal(currentState[docId11], { _id: docId11, foo: 22, bar: 33.5 }); + test.equal(currentState[docId12], { _id: docId12, foo: 22, bar: 43.5 }); + clearOutput(o); + o.handle.stop(); onComplete(); }); From 23ec5007e98638877d6ed44a3f9f5a368aa2f349 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 19 Feb 2014 12:44:19 -0800 Subject: [PATCH 32/93] _justUpdatedBuffer => _safeAppendToBuffer --- .../mongo-livedata/oplog_observe_driver.js | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 50fcd10564..098cf10eed 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -49,7 +49,11 @@ OplogObserveDriver = function (options) { self._unpublishedBuffer = null; self._published = new LocalCollection._IdMap; } - self._justUpdatedBuffer = false; + + // Indicates if it is safe to insert a new document at the end of the buffer + // for this query. i.e. it is known that there are no documents matching the + // selector those are not in published or buffer. + self._safeAppendToBuffer = false; self._stopped = false; self._stopHandles = []; @@ -203,7 +207,7 @@ _.extend(OplogObserveDriver.prototype, { } self._unpublishedBuffer.remove(maxBufferedId); - self._justUpdatedBuffer = false; + self._safeAppendToBuffer = false; } }, _removeBuffered: function (id) { @@ -241,7 +245,7 @@ _.extend(OplogObserveDriver.prototype, { // outside of the buffer easily. if (!limit || self._published.size() < limit || comparator(maxPublished, fields) > 0) { self._addPublished(id, fields); - } else if ((self._justUpdatedBuffer && self._unpublishedBuffer.size() < limit) || (maxBuffered && comparator(maxBuffered, fields) > 0)) { + } else if ((self._safeAppendToBuffer && self._unpublishedBuffer.size() < limit) || (maxBuffered && comparator(maxBuffered, fields) > 0)) { self._addBuffered(id, fields); } }, @@ -299,10 +303,10 @@ _.extend(OplogObserveDriver.prototype, { self._removePublished(id); // but it can move into buffered now, check it var maxBuffered = self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()); - if (self._justUpdatedBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) + if (self._safeAppendToBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) self._addBuffered(id, newDoc); else - self._justUpdatedBuffer = false; + self._safeAppendToBuffer = false; } } else if (bufferedBefore) { oldDoc = self._unpublishedBuffer.get(id); @@ -315,11 +319,11 @@ _.extend(OplogObserveDriver.prototype, { // the buffered doc was updated, it could move to published if (comparator(newDoc, maxPublished) < 0) { self._addPublished(id, newDoc); - } else if (self._justUpdatedBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) { + } else if (self._safeAppendToBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) { // stays in buffer self._unpublishedBuffer.set(id, newDoc); } else { - self._justUpdatedBuffer = false; + self._safeAppendToBuffer = false; } } else { throw new Error("cachedBefore implies either of publishedBefore or bufferedBefore is true."); @@ -467,7 +471,9 @@ _.extend(OplogObserveDriver.prototype, { initialCursor.forEach(function (initialDoc) { self._addMatching(initialDoc); }); - self._justUpdatedBuffer = true; + + self._safeAppendToBuffer = initialCursor.count() < self._limit * 2; + if (self._stopped) throw new Error("oplog stopped quite early"); // Allow observeChanges calls to return. (After this, it's possible for @@ -634,7 +640,8 @@ _.extend(OplogObserveDriver.prototype, { delete doc._id; self._addBuffered(id, doc); }); - self._justUpdatedBuffer = true; + + self._safeAppendToBuffer = newBuffer.size() < self._limit; }, // This stop function is invoked from the onStop of the ObserveMultiplexer, so From b76fd3c3af3eaad18ad2007971852a40aa915f1c Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 13:46:39 -0800 Subject: [PATCH 33/93] Rename BinaryHeap to MaxHeap and MaxMinHeap to MinMaxHeap --- packages/binary-heap/binary-heap-tests.js | 8 ++++---- .../{binary-heap.js => max-heap.js} | 6 +++--- .../{max-min-heap.js => min-max-heap.js} | 20 +++++++++---------- packages/binary-heap/package.js | 6 +++--- 4 files changed, 20 insertions(+), 20 deletions(-) rename packages/binary-heap/{binary-heap.js => max-heap.js} (97%) rename packages/binary-heap/{max-min-heap.js => min-max-heap.js} (58%) diff --git a/packages/binary-heap/binary-heap-tests.js b/packages/binary-heap/binary-heap-tests.js index aa026e8148..86aabea08f 100644 --- a/packages/binary-heap/binary-heap-tests.js +++ b/packages/binary-heap/binary-heap-tests.js @@ -1,5 +1,5 @@ -Tinytest.add("binary-heap - simple heap tests", function (test) { - var h = new BinaryHeap(function (a, b) { return a-b; }); +Tinytest.add("binary-heap - simple max-heap tests", function (test) { + var h = new MaxHeap(function (a, b) { return a-b; }); h.set("a", 1); h.set("b", 233); h.set("c", -122); @@ -28,8 +28,8 @@ Tinytest.add("binary-heap - simple heap tests", function (test) { test.equal(h.maxElementId(), "a"); }); -Tinytest.add("binary-heap - max-min heap tests", function (test) { - var h = new MaxMinHeap(function (a, b) { return a-b; }); +Tinytest.add("binary-heap - min-max heap tests", function (test) { + var h = new MinMaxHeap(function (a, b) { return a-b; }); h.set("a", 1); h.set("b", 233); h.set("c", -122); diff --git a/packages/binary-heap/binary-heap.js b/packages/binary-heap/max-heap.js similarity index 97% rename from packages/binary-heap/binary-heap.js rename to packages/binary-heap/max-heap.js index cd35e3f1df..c2904b70c9 100644 --- a/packages/binary-heap/binary-heap.js +++ b/packages/binary-heap/max-heap.js @@ -5,7 +5,7 @@ // - id - String - unique id of the item // - value - Any - the data value // the contents of initData is retained -BinaryHeap = function (comparator, initData) { +MaxHeap = function (comparator, initData) { if (! _.isFunction(comparator)) throw new Error('Passed comparator is invalid, should be a comparison function'); var self = this; @@ -26,7 +26,7 @@ if (Package.minimongo) { idStringify = function (id) { return JSON.stringify(id); }; } -_.extend(BinaryHeap.prototype, { +_.extend(MaxHeap.prototype, { _initFromData: function (data) { var self = this; @@ -169,7 +169,7 @@ _.extend(BinaryHeap.prototype, { }, clone: function () { var self = this; - var clone = new BinaryHeap(self._comparator); + var clone = new MaxHeap(self._comparator); clone._heap = EJSON.clone(self._heap); clone._heapIdx = EJSON.clone(self._heapIdx); return clone; diff --git a/packages/binary-heap/max-min-heap.js b/packages/binary-heap/min-max-heap.js similarity index 58% rename from packages/binary-heap/max-min-heap.js rename to packages/binary-heap/min-max-heap.js index 4d7af28a03..9503162261 100644 --- a/packages/binary-heap/max-min-heap.js +++ b/packages/binary-heap/min-max-heap.js @@ -1,38 +1,38 @@ -MaxMinHeap = function (comparator, initialData) { +MinMaxHeap = function (comparator, initialData) { var self = this; - BinaryHeap.call(self, comparator, initialData); - self._minHeap = new BinaryHeap(function (a, b) { + MaxHeap.call(self, comparator, initialData); + self._minHeap = new MaxHeap(function (a, b) { return -comparator(a, b); }, initialData); }; -MaxMinHeap.prototype = Object.create(BinaryHeap.prototype); +MinMaxHeap.prototype = Object.create(MaxHeap.prototype); -_.extend(MaxMinHeap.prototype, { +_.extend(MinMaxHeap.prototype, { set: function (id, value) { var self = this; - BinaryHeap.prototype.set.apply(self, arguments); + MaxHeap.prototype.set.apply(self, arguments); self._minHeap.set(id, value); }, remove: function (id) { var self = this; - BinaryHeap.prototype.remove.apply(self, arguments); + MaxHeap.prototype.remove.apply(self, arguments); self._minHeap.remove(id); }, clear: function () { var self = this; - BinaryHeap.prototype.clear.apply(self, arguments); + MaxHeap.prototype.clear.apply(self, arguments); self._minHeap.clear(); }, setDefault: function (id, def) { var self = this; - BinaryHeap.prototype.setDefault.apply(self, arguments); + MaxHeap.prototype.setDefault.apply(self, arguments); return self._minHeap.setDefault(id, def); }, clone: function () { var self = this; - var clone = new MaxMinHeap(self._comparator); + var clone = new MinMaxHeap(self._comparator); clone._heap = EJSON.clone(self._heap); clone._heapIdx = EJSON.clone(self._heapIdx); clone._minHeap = self._minHeap.clone(); diff --git a/packages/binary-heap/package.js b/packages/binary-heap/package.js index 9d1504eae3..a340d7c8c5 100644 --- a/packages/binary-heap/package.js +++ b/packages/binary-heap/package.js @@ -4,11 +4,11 @@ Package.describe({ }); Package.on_use(function (api) { - api.export('BinaryHeap'); - api.export('MaxMinHeap'); + api.export('MaxHeap'); + api.export('MinMaxHeap'); api.use(['underscore']); api.use(['minimongo'], { weak: true }); - api.add_files(['binary-heap.js', 'max-min-heap.js']); + api.add_files(['max-heap.js', 'min-max-heap.js']); }); Package.on_test(function (api) { From b4e94c3b2a5095fbee9f1e22a5b8dc56ad33d780 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 14:04:27 -0800 Subject: [PATCH 34/93] A randomized test for max-heap --- packages/binary-heap/binary-heap-tests.js | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/packages/binary-heap/binary-heap-tests.js b/packages/binary-heap/binary-heap-tests.js index 86aabea08f..9a709b6fe4 100644 --- a/packages/binary-heap/binary-heap-tests.js +++ b/packages/binary-heap/binary-heap-tests.js @@ -28,6 +28,27 @@ Tinytest.add("binary-heap - simple max-heap tests", function (test) { test.equal(h.maxElementId(), "a"); }); +Tinytest.add("binary-heap - big test for max-heap", function (test) { + var positiveNumbers = _.shuffle(_.range(1, 41)); + var negativeNumbers = _.shuffle(_.range(-1, -41, -1)); + var allNumbers = negativeNumbers.concat(positiveNumbers); + + var heap = new MaxHeap(function (a, b) { return a-b; }); + var output = []; + + _.each(allNumbers, function (n) { heap.set(n, n); }); + + _.times(positiveNumbers.length + negativeNumbers.length, function () { + var maxId = heap.maxElementId(); + output.push(heap.get(maxId)); + heap.remove(maxId); + }); + + allNumbers.sort(function (a, b) { return b-a; }); + + test.equal(output, allNumbers); +}); + Tinytest.add("binary-heap - min-max heap tests", function (test) { var h = new MinMaxHeap(function (a, b) { return a-b; }); h.set("a", 1); From 3fa5fd9342f3e5b81c270ed10a7bfc0d854ce6d5 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 14:05:12 -0800 Subject: [PATCH 35/93] binary-heap: Fix rightChildIdx() --- packages/binary-heap/max-heap.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index c2904b70c9..cd614ebe2c 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -182,6 +182,6 @@ _.extend(MaxHeap.prototype, { }); function leftChildIdx (i) { return i * 2 + 1; } -function rightChildIdx (i) { return i * i + 2; } +function rightChildIdx (i) { return i * 2 + 2; } function parentIdx (i) { return (i - 1) >> 1; } From 8c079efcdac835aeda67175a22c9c82dc46195dc Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 16:00:08 -0800 Subject: [PATCH 36/93] Extracted IdMap to a separate package --- packages/id-map/.gitignore | 1 + packages/id-map/id-map.js | 77 +++++++++++++++++++++++++++++++++++ packages/id-map/package.js | 11 +++++ packages/minimongo/id_map.js | 71 +------------------------------- packages/minimongo/package.js | 2 +- 5 files changed, 92 insertions(+), 70 deletions(-) create mode 100644 packages/id-map/.gitignore create mode 100644 packages/id-map/id-map.js create mode 100644 packages/id-map/package.js diff --git a/packages/id-map/.gitignore b/packages/id-map/.gitignore new file mode 100644 index 0000000000..677a6fc263 --- /dev/null +++ b/packages/id-map/.gitignore @@ -0,0 +1 @@ +.build* diff --git a/packages/id-map/id-map.js b/packages/id-map/id-map.js new file mode 100644 index 0000000000..888fdee63d --- /dev/null +++ b/packages/id-map/id-map.js @@ -0,0 +1,77 @@ +IdMap = function (idStringify, idParse) { + var self = this; + self._map = {}; + self._idStringify = idStringify || JSON.stringify; + self._idParse = idParse || JSON.parse; +}; + +// Some of these methods are designed to match methods on OrderedDict, since +// (eg) ObserveMultiplex and _CachingChangeObserver use them interchangeably. +// (Conceivably, this should be replaced with "UnorderedDict" with a specific +// set of methods that overlap between the two.) + +_.extend(IdMap.prototype, { + get: function (id) { + var self = this; + var key = self._idStringify(id); + return self._map[key]; + }, + set: function (id, value) { + var self = this; + var key = self._idStringify(id); + self._map[key] = value; + }, + remove: function (id) { + var self = this; + var key = self._idStringify(id); + delete self._map[key]; + }, + has: function (id) { + var self = this; + var key = self._idStringify(id); + return _.has(self._map, key); + }, + empty: function () { + var self = this; + return _.isEmpty(self._map); + }, + clear: function () { + var self = this; + self._map = {}; + }, + // Iterates over the items in the map. Return `false` to break the loop. + forEach: function (iterator) { + var self = this; + // don't use _.each, because we can't break out of it. + var keys = _.keys(self._map); + for (var i = 0; i < keys.length; i++) { + var breakIfFalse = iterator.call(null, self._map[keys[i]], + self._idParse(keys[i])); + if (breakIfFalse === false) + return; + } + }, + size: function () { + var self = this; + return _.size(self._map); + }, + setDefault: function (id, def) { + var self = this; + var key = self._idStringify(id); + if (_.has(self._map, key)) + return self._map[key]; + self._map[key] = def; + return def; + }, + // Assumes that values are EJSON-cloneable, and that we don't need to clone + // IDs (ie, that nobody is going to mutate an ObjectId). + clone: function () { + var self = this; + var clone = new IdMap(self._idStringify, self._idParse); + self.forEach(function (value, id) { + clone.set(id, EJSON.clone(value)); + }); + return clone; + } +}); + diff --git a/packages/id-map/package.js b/packages/id-map/package.js new file mode 100644 index 0000000000..d3ef1db346 --- /dev/null +++ b/packages/id-map/package.js @@ -0,0 +1,11 @@ +Package.describe({ + summary: "Dictionary data structure: a wrapper for a raw object", + internal: true +}); + +Package.on_use(function (api) { + api.export('IdMap'); + api.use(['underscore', 'json', 'ejson']); + api.add_files([ 'id-map.js' ]); +}); + diff --git a/packages/minimongo/id_map.js b/packages/minimongo/id_map.js index 4e94273fac..7fc10d8add 100644 --- a/packages/minimongo/id_map.js +++ b/packages/minimongo/id_map.js @@ -1,74 +1,7 @@ LocalCollection._IdMap = function () { var self = this; - self._map = {}; + IdMap.call(self, LocalCollection._idStringify, LocalCollection._idParse); }; -// Some of these methods are designed to match methods on OrderedDict, since -// (eg) ObserveMultiplex and _CachingChangeObserver use them interchangeably. -// (Conceivably, this should be replaced with "UnorderedDict" with a specific -// set of methods that overlap between the two.) +LocalCollection._IdMap.prototype = Object.create(IdMap.prototype); -_.extend(LocalCollection._IdMap.prototype, { - get: function (id) { - var self = this; - var key = LocalCollection._idStringify(id); - return self._map[key]; - }, - set: function (id, value) { - var self = this; - var key = LocalCollection._idStringify(id); - self._map[key] = value; - }, - remove: function (id) { - var self = this; - var key = LocalCollection._idStringify(id); - delete self._map[key]; - }, - has: function (id) { - var self = this; - var key = LocalCollection._idStringify(id); - return _.has(self._map, key); - }, - empty: function () { - var self = this; - return _.isEmpty(self._map); - }, - clear: function () { - var self = this; - self._map = {}; - }, - // Iterates over the items in the map. Return `false` to break the loop. - forEach: function (iterator) { - var self = this; - // don't use _.each, because we can't break out of it. - var keys = _.keys(self._map); - for (var i = 0; i < keys.length; i++) { - var breakIfFalse = iterator.call(null, self._map[keys[i]], - LocalCollection._idParse(keys[i])); - if (breakIfFalse === false) - return; - } - }, - size: function () { - var self = this; - return _.size(self._map); - }, - setDefault: function (id, def) { - var self = this; - var key = LocalCollection._idStringify(id); - if (_.has(self._map, key)) - return self._map[key]; - self._map[key] = def; - return def; - }, - // Assumes that values are EJSON-cloneable, and that we don't need to clone - // IDs (ie, that nobody is going to mutate an ObjectId). - clone: function () { - var self = this; - var clone = new LocalCollection._IdMap; - self.forEach(function (value, id) { - clone.set(id, EJSON.clone(value)); - }); - return clone; - } -}); diff --git a/packages/minimongo/package.js b/packages/minimongo/package.js index 8791cbfe77..58783070fb 100644 --- a/packages/minimongo/package.js +++ b/packages/minimongo/package.js @@ -7,7 +7,7 @@ Package.on_use(function (api) { api.export('LocalCollection'); api.export('Minimongo'); api.export('MinimongoTest', { testOnly: true }); - api.use(['underscore', 'json', 'ejson', 'ordered-dict', 'deps', + api.use(['underscore', 'json', 'ejson', 'id-map', 'ordered-dict', 'deps', 'random', 'ordered-dict']); // This package is used for geo-location queries such as $near api.use('geojson-utils'); From 939d8d7e7f8f10bfbaba3f4c5758aba526ab82dd Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 16:29:01 -0800 Subject: [PATCH 37/93] es3 friendly inheritance --- packages/binary-heap/min-max-heap.js | 4 +++- packages/minimongo/id_map.js | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/binary-heap/min-max-heap.js b/packages/binary-heap/min-max-heap.js index 9503162261..332dc33f07 100644 --- a/packages/binary-heap/min-max-heap.js +++ b/packages/binary-heap/min-max-heap.js @@ -7,7 +7,9 @@ MinMaxHeap = function (comparator, initialData) { }, initialData); }; -MinMaxHeap.prototype = Object.create(MaxHeap.prototype); +var F = function () {}; +F.prototype = MaxHeap.prototype; +MinMaxHeap.prototype = new F; _.extend(MinMaxHeap.prototype, { set: function (id, value) { diff --git a/packages/minimongo/id_map.js b/packages/minimongo/id_map.js index 7fc10d8add..cf098dbee1 100644 --- a/packages/minimongo/id_map.js +++ b/packages/minimongo/id_map.js @@ -3,5 +3,5 @@ LocalCollection._IdMap = function () { IdMap.call(self, LocalCollection._idStringify, LocalCollection._idParse); }; -LocalCollection._IdMap.prototype = Object.create(IdMap.prototype); +LocalCollection._IdMap.prototype = IdMap.prototype; From cd918bbf17f34e29c8044d9ada4708a64def419c Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 17:13:52 -0800 Subject: [PATCH 38/93] binary-heap uses id-map --- packages/binary-heap/max-heap.js | 37 ++++++++++++++------------------ packages/binary-heap/package.js | 2 +- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index cd614ebe2c..5eb05747ab 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -10,22 +10,18 @@ MaxHeap = function (comparator, initData) { throw new Error('Passed comparator is invalid, should be a comparison function'); var self = this; self._comparator = comparator; - self._heapIdx = {}; + + if (Package && Package.minimongo) + self._heapIdx = new Package.minimongo.LocalCollection._IdMap; + else + self._heapIdx = new IdMap; + self._heap = []; if (_.isArray(initData)) self._initFromData(initData); }; -var idStringify; -if (Package.minimongo) { - idStringify = Package.minimongo.LocalCollection._idStringify; -} else { - // XXX: These can't deal with special strings like '__proto__' - // XXX: or '{ looksLike: "object" }' or numbers. - idStringify = function (id) { return JSON.stringify(id); }; -} - _.extend(MaxHeap.prototype, { _initFromData: function (data) { var self = this; @@ -33,7 +29,7 @@ _.extend(MaxHeap.prototype, { self._heap = _.clone(data); _.each(data, function (o, i) { - self._heapIdx[idStringify(o.id)] = i; + self._heapIdx.set(o.id, i); }); for (var i = parentIdx(data.length); i >= 0; i--) @@ -91,8 +87,8 @@ _.extend(MaxHeap.prototype, { var A = self._heap[idxA]; var B = self._heap[idxB]; - self._heapIdx[idStringify(A.id)] = idxB; - self._heapIdx[idStringify(B.id)] = idxA; + self._heapIdx.set(A.id, idxB); + self._heapIdx.set(B.id, idxA); self._heap[idxA] = B; self._heap[idxB] = A; @@ -102,7 +98,7 @@ _.extend(MaxHeap.prototype, { var self = this; if (! self.has(id)) return null; - return self._get(self._heapIdx[idStringify(id)]); + return self._get(self._heapIdx.get(id)); }, set: function (id, value) { var self = this; @@ -114,17 +110,16 @@ _.extend(MaxHeap.prototype, { self.remove(id); } - self._heapIdx[idStringify(id)] = self._heap.length; + self._heapIdx.set(id, self._heap.length); self._heap.push({ id: id, value: value }); self._upHeap(self._heap.length - 1); }, remove: function (id) { var self = this; - var strId = idStringify(id); if (self.has(id)) { var last = self._heap.length - 1; - var idx = self._heapIdx[strId]; + var idx = self._heapIdx.get(id); if (idx !== last) { self._swap(idx, last); @@ -134,12 +129,12 @@ _.extend(MaxHeap.prototype, { self._heap.pop(); } - delete self._heapIdx[strId]; + self._heapIdx.remove(id); } }, has: function (id) { var self = this; - return self._heapIdx[idStringify(id)] !== undefined; + return self._heapIdx.has(id); }, empty: function (id) { var self = this; @@ -148,7 +143,7 @@ _.extend(MaxHeap.prototype, { clear: function () { var self = this; self._heap = []; - self._heapIdx = {}; + self._heapIdx.clear(); }, forEach: function (iterator) { var self = this; @@ -171,7 +166,7 @@ _.extend(MaxHeap.prototype, { var self = this; var clone = new MaxHeap(self._comparator); clone._heap = EJSON.clone(self._heap); - clone._heapIdx = EJSON.clone(self._heapIdx); + clone._heapIdx = self._heapIdx.clone(); return clone; }, diff --git a/packages/binary-heap/package.js b/packages/binary-heap/package.js index a340d7c8c5..5788fc1973 100644 --- a/packages/binary-heap/package.js +++ b/packages/binary-heap/package.js @@ -6,7 +6,7 @@ Package.describe({ Package.on_use(function (api) { api.export('MaxHeap'); api.export('MinMaxHeap'); - api.use(['underscore']); + api.use(['underscore', 'id-map']); api.use(['minimongo'], { weak: true }); api.add_files(['max-heap.js', 'min-max-heap.js']); }); From 006e307f1542b97c254fac7e0b4e8e7fa410f453 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 21:38:09 -0800 Subject: [PATCH 39/93] More comments on Heap implementation, linear initialization doesn't retain anything but the references to the values. --- packages/binary-heap/max-heap.js | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index 5eb05747ab..6327e58e3d 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -9,24 +9,40 @@ MaxHeap = function (comparator, initData) { if (! _.isFunction(comparator)) throw new Error('Passed comparator is invalid, should be a comparison function'); var self = this; + + // a C-style comparator that is given two values and returns a number, + // negative if the first value is less than the second, positive if the second + // value is greater than the first and zero if they are equal. self._comparator = comparator; + // _heapIdx maps an id to an index in the Heap array the corresponding value + // is located on. if (Package && Package.minimongo) self._heapIdx = new Package.minimongo.LocalCollection._IdMap; else self._heapIdx = new IdMap; + // The Heap data-structure implemented as a 0-based contiguous array where + // every item on index idx is a node in a complete binary tree. Every node can + // have leaves on indexes idx*2+1 and idx*2+2, except for the lists. Every + // node has a parent on index (idx-1)/2; self._heap = []; + // If the initial array is passed, we can build the heap in linear time + // complexity (O(N)) compared to linearithmetic time complexity (O(nlogn)) if + // we push elements one by one. if (_.isArray(initData)) self._initFromData(initData); }; _.extend(MaxHeap.prototype, { + // Builds a new heap in-place in linear time based on passed data _initFromData: function (data) { var self = this; - self._heap = _.clone(data); + self._heap = _.map(data, function (o) { + return { id: EJSON.clone(o.id), value: o.value }; + }); _.each(data, function (o, i) { self._heapIdx.set(o.id, i); From 19eeff650a29369fed01e8f036fb2d67467ff56a Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 21:45:28 -0800 Subject: [PATCH 40/93] Implement heap clone with passing an array of the original's data --- packages/binary-heap/max-heap.js | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index 6327e58e3d..37fc90c941 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -180,9 +180,7 @@ _.extend(MaxHeap.prototype, { }, clone: function () { var self = this; - var clone = new MaxHeap(self._comparator); - clone._heap = EJSON.clone(self._heap); - clone._heapIdx = self._heapIdx.clone(); + var clone = new MaxHeap(self._comparator, self._heap); return clone; }, From d87b18a56e23cf1d9ac8fbdbdeb78db5dbe1d8bb Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 22:38:56 -0800 Subject: [PATCH 41/93] Fix building heap from array --- packages/binary-heap/max-heap.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index 37fc90c941..349dde8030 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -48,7 +48,8 @@ _.extend(MaxHeap.prototype, { self._heapIdx.set(o.id, i); }); - for (var i = parentIdx(data.length); i >= 0; i--) + // start from the first non-leaf - the parent of the last leaf + for (var i = parentIdx(data.length - 1); i >= 0; i--) self._downHeap(i); }, From 7bad26e79613c5d912ed086d32e30bb53f3cfc59 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 14:25:06 -0800 Subject: [PATCH 42/93] Make min-max-heap's clone work in the same way it works for max-heap --- packages/binary-heap/min-max-heap.js | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/packages/binary-heap/min-max-heap.js b/packages/binary-heap/min-max-heap.js index 332dc33f07..957d6014f4 100644 --- a/packages/binary-heap/min-max-heap.js +++ b/packages/binary-heap/min-max-heap.js @@ -34,10 +34,7 @@ _.extend(MinMaxHeap.prototype, { }, clone: function () { var self = this; - var clone = new MinMaxHeap(self._comparator); - clone._heap = EJSON.clone(self._heap); - clone._heapIdx = EJSON.clone(self._heapIdx); - clone._minHeap = self._minHeap.clone(); + var clone = new MinMaxHeap(self._comparator, self._heap); return clone; }, minElementId: function () { From 503c5f263f07d4fa8598234bba6022ff4565ef37 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 14:27:20 -0800 Subject: [PATCH 43/93] Extra safety belt for binary-heap constructor --- packages/binary-heap/max-heap.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index 349dde8030..ce74539d1a 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -48,6 +48,9 @@ _.extend(MaxHeap.prototype, { self._heapIdx.set(o.id, i); }); + if (! data.length) + return; + // start from the first non-leaf - the parent of the last leaf for (var i = parentIdx(data.length - 1); i >= 0; i--) self._downHeap(i); From e853ced77c5f42eeba06149b3525ddc0a54a346d Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 14:35:01 -0800 Subject: [PATCH 44/93] Binary-heap: refactor out the _maxIndex method --- packages/binary-heap/max-heap.js | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index ce74539d1a..e8a2b0445a 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -64,12 +64,10 @@ _.extend(MaxHeap.prototype, { var right = rightChildIdx(idx); var largest = idx; - if (left < self.size() && - self._comparator(self._get(left), self._get(largest)) > 0) { + if (left < self.size() && self._maxIndex(largest, left) === left) { largest = left; } - if (right < self.size() && - self._comparator(self._get(right), self._get(largest)) > 0) { + if (right < self.size() && self._maxIndex(largest, right) === right) { largest = right; } @@ -83,11 +81,10 @@ _.extend(MaxHeap.prototype, { _upHeap: function (idx) { var self = this; - var value = self._get(idx); while (idx > 0) { var parent = parentIdx(idx); - if (self._comparator(self._get(parent), value) < 0) { + if (self._maxIndex(parent, idx) === idx) { self._swap(parent, idx) idx = parent; } else { @@ -96,6 +93,13 @@ _.extend(MaxHeap.prototype, { } }, + _maxIndex: function (idxA, idxB) { + var self = this; + var valueA = self._get(idxA); + var valueB = self._get(idxB); + return self._comparator(valueA, valueB) > 0 ? idxA : idxB; + }, + // Internal: gets raw data object placed on idxth place in heap _get: function (idx) { var self = this; From 74553efa304f32bed43f8b5cbbee0116640e2c36 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 14:35:55 -0800 Subject: [PATCH 45/93] Binary-heap: avoid capitalized names for non-classes --- packages/binary-heap/max-heap.js | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index e8a2b0445a..891e9b3771 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -108,14 +108,14 @@ _.extend(MaxHeap.prototype, { _swap: function (idxA, idxB) { var self = this; - var A = self._heap[idxA]; - var B = self._heap[idxB]; + var recA = self._heap[idxA]; + var recB = self._heap[idxB]; - self._heapIdx.set(A.id, idxB); - self._heapIdx.set(B.id, idxA); + self._heapIdx.set(recA.id, idxB); + self._heapIdx.set(recB.id, idxA); - self._heap[idxA] = B; - self._heap[idxB] = A; + self._heap[idxA] = recB; + self._heap[idxB] = recA; }, get: function (id) { From b01ed021eaee519c035b707cfdfb77501e32a7bb Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 17:48:22 -0800 Subject: [PATCH 46/93] New giant randomized test with self-checks --- packages/binary-heap/binary-heap-tests.js | 34 +++++++++++++++++++++++ packages/binary-heap/max-heap.js | 9 ++++++ 2 files changed, 43 insertions(+) diff --git a/packages/binary-heap/binary-heap-tests.js b/packages/binary-heap/binary-heap-tests.js index 9a709b6fe4..496af9c0c3 100644 --- a/packages/binary-heap/binary-heap-tests.js +++ b/packages/binary-heap/binary-heap-tests.js @@ -81,3 +81,37 @@ Tinytest.add("binary-heap - min-max heap tests", function (test) { test.equal(h.minElementId(), "a"); }); +Tinytest.add("binary-heap - big test for min-max-heap", function (test) { + var N = 500; + var positiveNumbers = _.shuffle(_.range(1, N + 1)); + var negativeNumbers = _.shuffle(_.range(-1, -N - 1, -1)); + var allNumbers = positiveNumbers.concat(negativeNumbers); + + var heap = new MinMaxHeap(function (a, b) { return a-b; }); + var output = []; + + _.each(allNumbers, function (n) { + heap.set(n, n); + heap._selfCheck(); + heap._minHeap._selfCheck(); + }); + + allNumbers = _.shuffle(allNumbers); + _.each(allNumbers, function (n) { + heap.set(-n, n); + heap._selfCheck(); + heap._minHeap._selfCheck(); + }); + + _.times(positiveNumbers.length + negativeNumbers.length, function () { + var minId = heap.minElementId(); + output.push(heap.get(minId)); + heap.remove(minId); + heap._selfCheck(); heap._minHeap._selfCheck(); + }); + + allNumbers.sort(function (a, b) { return a-b; }); + + test.equal(output, allNumbers); +}); + diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index 891e9b3771..2d70e3509d 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -195,6 +195,15 @@ _.extend(MaxHeap.prototype, { maxElementId: function () { var self = this; return self.size() ? self._heap[0].id : null; + }, + + _selfCheck: function () { + var self = this; + for (var i = 1; i < self._heap.length; i++) + if (self._maxIndex(parentIdx(i), i) !== parentIdx(i)) + throw new Error("An item with id " + self._heap[i].id + + " has a parent younger than him: " + + self._heap[parentIdx(i)].id); } }); From 133428e6e513ff358b41a2a8e32e1aed8448c087 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 17:48:37 -0800 Subject: [PATCH 47/93] Fix the removal and the priority of _maxIndex. Removing an arbitary element you might need to upHeap the position of removed element instead of downHeap. It is not the case with the maximum, because root can't be upHeaped. --- packages/binary-heap/max-heap.js | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index 2d70e3509d..7215e92d5f 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -97,7 +97,7 @@ _.extend(MaxHeap.prototype, { var self = this; var valueA = self._get(idxA); var valueB = self._get(idxB); - return self._comparator(valueA, valueB) > 0 ? idxA : idxB; + return self._comparator(valueA, valueB) >= 0 ? idxA : idxB; }, // Internal: gets raw data object placed on idxth place in heap @@ -130,8 +130,9 @@ _.extend(MaxHeap.prototype, { if (self.has(id)) { if (self.get(id) === value) return; - else + else { self.remove(id); + } } self._heapIdx.set(id, self._heap.length); @@ -149,6 +150,7 @@ _.extend(MaxHeap.prototype, { self._swap(idx, last); self._heap.pop(); self._downHeap(idx); + self._upHeap(idx); } else { self._heap.pop(); } From 1dc172696f10fb99f51f292b0da6ca645a7e4aed Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 18:12:35 -0800 Subject: [PATCH 48/93] minor stylish tweak --- packages/binary-heap/max-heap.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index 7215e92d5f..137d7271b4 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -149,13 +149,13 @@ _.extend(MaxHeap.prototype, { if (idx !== last) { self._swap(idx, last); self._heap.pop(); + self._heapIdx.remove(id); self._downHeap(idx); self._upHeap(idx); } else { self._heap.pop(); + self._heapIdx.remove(id); } - - self._heapIdx.remove(id); } }, has: function (id) { From f38294b17301be7b7c3f0da0498cf6ea62f107dd Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 18:19:13 -0800 Subject: [PATCH 49/93] Binary-Heap: update in-place --- packages/binary-heap/max-heap.js | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index 137d7271b4..ccc4ff3f36 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -130,14 +130,16 @@ _.extend(MaxHeap.prototype, { if (self.has(id)) { if (self.get(id) === value) return; - else { - self.remove(id); - } - } - self._heapIdx.set(id, self._heap.length); - self._heap.push({ id: id, value: value }); - self._upHeap(self._heap.length - 1); + var idx = self._heapIdx.get(id); + self._heap[idx].value = value; + self._upHeap(idx); + self._downHeap(idx); + } else { + self._heapIdx.set(id, self._heap.length); + self._heap.push({ id: id, value: value }); + self._upHeap(self._heap.length - 1); + } }, remove: function (id) { var self = this; From a6410c4af750c1027756c1822c515de0bdccf8e7 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 18:23:58 -0800 Subject: [PATCH 50/93] A comment on Min-Max-Heap implementation --- packages/binary-heap/min-max-heap.js | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packages/binary-heap/min-max-heap.js b/packages/binary-heap/min-max-heap.js index 957d6014f4..b491c61bb1 100644 --- a/packages/binary-heap/min-max-heap.js +++ b/packages/binary-heap/min-max-heap.js @@ -1,3 +1,15 @@ +// This implementation of Min/Max-Heap is just a subclass of Max-Heap +// with a Min-Heap as an encapsulated property. +// +// Most of the operations are just proxy methods to call the same method on both +// heaps. +// +// This implementation takes 2*N memory but is fairly simple to write and +// understand. And the constant factor of a simple Heap is usually smaller +// compared to other two-way priority queues like Min/Max Heaps +// (http://www.cs.otago.ac.nz/staffpriv/mike/Papers/MinMaxHeaps/MinMaxHeaps.pdf) +// and Interval Heaps +// (http://www.cise.ufl.edu/~sahni/dsaac/enrich/c13/double.htm) MinMaxHeap = function (comparator, initialData) { var self = this; From 9ccdc0b80c6ddc6526f7ce3bad09c5c67dccd6dc Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 19:47:23 -0800 Subject: [PATCH 51/93] Don't repoll if everything fits into buffer; Use EJSON.equals Better comments --- .../mongo-livedata/oplog_observe_driver.js | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 098cf10eed..50f33aa679 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -28,11 +28,11 @@ OplogObserveDriver = function (options) { // There are several properties ordered driver implements: // - _limit is a positive number // - _comparator is a function-comparator by which the query is ordered - // - _unpublishedBuffer is non-null collection, + // - _unpublishedBuffer is non-null Min/Max Heap, // the empty buffer in STEADY phase implies that the // everything that matches the queries selector fits // into published set. - // - _published implements maxElementId method in addition to IdMap methods + // - _published - Min Heap (also implements IdMap methods) // We don't support $near and other geo-queries so it's OK to initialize the // comparator only once in the constructor. @@ -159,7 +159,7 @@ _.extend(OplogObserveDriver.prototype, { var overflowingDocId = self._published.maxElementId(); var overflowingDoc = self._published.get(overflowingDocId); - if (_.isEqual(overflowingDocId, id)) { + if (EJSON.equals(overflowingDocId, id)) { throw new Error("The document just added is overflowing the published set"); } @@ -202,7 +202,7 @@ _.extend(OplogObserveDriver.prototype, { if (self._unpublishedBuffer.size() > self._limit) { var maxBufferedId = self._unpublishedBuffer.maxElementId(); - if (_.isEqual(maxBufferedId, id)) { + if (EJSON.equals(maxBufferedId, id)) { throw new Error("The document just added to buffer is overflowing the buffer"); } @@ -216,7 +216,7 @@ _.extend(OplogObserveDriver.prototype, { // To keep the contract "buffer is never empty in STEADY phase unless the // everything matching fits into published" true, we poll everything as soon // as we see the buffer becoming empty. - if (! self._unpublishedBuffer.size()) + if (! self._unpublishedBuffer.size() && ! self._safeAppendToBuffer) self._needToPollQuery(); }, // Called when a document has joined the "Matching" results set. @@ -468,11 +468,13 @@ _.extend(OplogObserveDriver.prototype, { // XXX needs more thought on non-zero skip // XXX "2" here is a "magic number" var initialCursor = self._cursorForQuery({ limit: self._limit * 2 }); + var fetchedDocsCount = 0; initialCursor.forEach(function (initialDoc) { self._addMatching(initialDoc); + fetchedDocsCount++; }); - self._safeAppendToBuffer = initialCursor.count() < self._limit * 2; + self._safeAppendToBuffer = fetchedDocsCount < self._limit * 2; if (self._stopped) throw new Error("oplog stopped quite early"); @@ -704,9 +706,10 @@ OplogObserveDriver.cursorSupported = function (cursorDescription, matcher) { if (options._disableOplog) return false; - // This option (which are mostly used for sorted cursors) require us to figure - // out where a given document fits in an order to know if it's included or - // not. We do it only if skip is not defined or 0. + // skip is not supported: to support it we would need to keep track of all + // "skipped" documents or at least their ids. + // limit w/o a sort specifier is not supported: current implementation needs a + // determent way to order documents. if (options.skip || (options.limit && !options.sort)) return false; // If a fields projection option is given check if it is supported by From 622d61f0a7b977b6a6b09d6be6e07f4213d4ad66 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 21 Feb 2014 20:03:22 -0800 Subject: [PATCH 52/93] fix tests' setsEqual --- .../mongo-livedata/mongo_livedata_tests.js | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index ed318b4a27..9f0665c11e 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -752,27 +752,27 @@ if (Meteor.isServer) { var run = test.runId(); var coll = new Meteor.Collection("observeLimit-"+run, collectionOptions); - var currentState = {}; var observer = function () { + var state = {}; var output = []; var callbacks = { changed: function (newDoc) { output.push({changed: newDoc._id}); - currentState[newDoc._id] = newDoc; + state[newDoc._id] = newDoc; }, added: function (newDoc) { output.push({added: newDoc._id}); - currentState[newDoc._id] = newDoc; + state[newDoc._id] = newDoc; }, removed: function (oldDoc) { output.push({removed: oldDoc._id}); - delete currentState[oldDoc._id]; + delete state[oldDoc._id]; } }; var handle = coll.find({foo: 22}, {sort: {bar: 1}, limit: 3}).observe(callbacks); - return {output: output, handle: handle}; + return {output: output, handle: handle, state: state}; }; var clearOutput = function (o) { o.output.splice(0, o.output.length); }; @@ -790,7 +790,7 @@ if (Meteor.isServer) { var setsEqual = function (a, b) { a = _.map(a, EJSON.stringify); b = _.map(b, EJSON.stringify); - return _.isEmpty(_.difference(a, b)); + return _.isEmpty(_.difference(a, b)) && _.isEmpty(_.difference(b, a)); }; // Insert a doc and start observing. @@ -941,9 +941,9 @@ if (Meteor.isServer) { {removed: docId10}, {added: docId6}, {added: docId11}, {added: docId12}])); - test.equal(currentState[docId6], { _id: docId6, foo: 22, bar: 24 }); - test.equal(currentState[docId11], { _id: docId11, foo: 22, bar: 33.5 }); - test.equal(currentState[docId12], { _id: docId12, foo: 22, bar: 43.5 }); + test.equal(o.state[docId6], { _id: docId6, foo: 22, bar: 24 }); + test.equal(o.state[docId11], { _id: docId11, foo: 22, bar: 33.5 }); + test.equal(o.state[docId12], { _id: docId12, foo: 22, bar: 43.5 }); clearOutput(o); o.handle.stop(); From 9d4783e4fc6bb9f4dc5f60d9ecd4b2a31a62155d Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Sun, 23 Feb 2014 17:51:51 -0800 Subject: [PATCH 53/93] Additional assert for _safeAppendToBuffer --- packages/mongo-livedata/oplog_observe_driver.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 50f33aa679..65e26bf1c6 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -174,13 +174,16 @@ _.extend(OplogObserveDriver.prototype, { self._multiplexer.removed(id); if (! self._limit) return; - // xcxc size on heaps should be cached to O(1) if (self._published.size() < self._limit) { // The unpublished buffer is empty iff published contains the whole // matching set, i.e. number of matching documents is less or equal to the // queries limit. - if (! self._unpublishedBuffer.size()) + if (! self._unpublishedBuffer.size()) { + // Assertion of the statement above + if (! self._safeAppendToBuffer && self._phase !== PHASE.QUERYING) + throw new Error("At this phase, buffer can be empty only if published contains the whole matching set"); return; + } var newDocId = self._unpublishedBuffer.minElementId(); var newDoc = self._unpublishedBuffer.get(newDocId); From f0d13f6678ee1fff0f25a26d6489756efd458822 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Sun, 23 Feb 2014 17:52:17 -0800 Subject: [PATCH 54/93] Restructure and update comments for _addMatching-limits behavior The logic remains the same. Also remove `fields` - nothing uses the term `fields` except for the fields projection. --- .../mongo-livedata/oplog_observe_driver.js | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 65e26bf1c6..c9d6ce6ed2 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -228,8 +228,8 @@ _.extend(OplogObserveDriver.prototype, { _addMatching: function (doc) { var self = this; var id = doc._id; - var fields = _.clone(doc); - delete fields._id; + var doc = _.clone(doc); + delete doc._id; if (self._published.has(id)) throw Error("tried to add something already published " + id); if (self._limit && self._unpublishedBuffer.has(id)) @@ -242,14 +242,25 @@ _.extend(OplogObserveDriver.prototype, { // The query is unlimited or didn't publish enough documents yet or the new // document would fit into published set pushing the maximum element out, // then we need to publish the doc. + var toPublish = ! limit || self._published.size() < limit || + comparator(maxPublished, doc) > 0; + // Otherwise we might need to buffer it (only in case of limited query). - // Buffering a new document is allowed only if it is inserted in the middle - // or the beginning of it as we cannot determine if there are documents - // outside of the buffer easily. - if (!limit || self._published.size() < limit || comparator(maxPublished, fields) > 0) { - self._addPublished(id, fields); - } else if ((self._safeAppendToBuffer && self._unpublishedBuffer.size() < limit) || (maxBuffered && comparator(maxBuffered, fields) > 0)) { - self._addBuffered(id, fields); + // Buffering is allowed if the buffer is not filled up yet and all matching + // docs are either in the published set or in the buffer. + var canAppendToBuffer = self._safeAppendToBuffer && + self._unpublishedBuffer.size() < limit; + + // Or if it is small enough to be safely inserted to the middle or the + // beginning of the buffer. + var canInsertIntoBuffer = maxBuffered && comparator(maxBuffered, doc) > 0; + + var toBuffer = canAppendToBuffer || canInsertIntoBuffer; + + if (toPublish) { + self._addPublished(id, doc); + } else if (toBuffer) { + self._addBuffered(id, doc); } }, // Called when a document leaves the "Matching" results set. From 90ba50acb74470d76b58b7aa5bd8531e0343bfd1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Sun, 23 Feb 2014 18:19:57 -0800 Subject: [PATCH 55/93] Restructure and comment on _handleDoc logic for limits Logic remains the same. --- .../mongo-livedata/oplog_observe_driver.js | 38 ++++++++++++++----- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index c9d6ce6ed2..9dfca00e83 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -233,12 +233,14 @@ _.extend(OplogObserveDriver.prototype, { if (self._published.has(id)) throw Error("tried to add something already published " + id); if (self._limit && self._unpublishedBuffer.has(id)) - throw Error("tried to add something already existed in buffer " + id); // xcxc error msg + throw Error("tried to add something already existed in buffer " + id); var limit = self._limit; var comparator = self._comparator; - var maxPublished = (limit && self._published.size() > 0) ? self._published.get(self._published.maxElementId()) : null; - var maxBuffered = (limit && self._unpublishedBuffer.size() > 0) ? self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()) : null; + var maxPublished = (limit && self._published.size() > 0) ? + self._published.get(self._published.maxElementId()) : null; + var maxBuffered = (limit && self._unpublishedBuffer.size() > 0) ? + self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()) : null; // The query is unlimited or didn't publish enough documents yet or the new // document would fit into published set pushing the maximum element out, // then we need to publish the doc. @@ -310,17 +312,27 @@ _.extend(OplogObserveDriver.prototype, { // that buffer can't be empty if there are matching documents not // published. Notably, we don't want to schedule repoll and continue // relying on this property. - if (!self._limit || self._unpublishedBuffer.size() === 0 || comparator(newDoc, minBuffered) <= 0) { + var staysInPublished = ! self._limit || + self._unpublishedBuffer.size() === 0 || + comparator(newDoc, minBuffered) <= 0; + + if (staysInPublished) { self._changePublished(id, oldDoc, newDoc); } else { // after the change doc doesn't stay in the published, remove it self._removePublished(id); // but it can move into buffered now, check it var maxBuffered = self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()); - if (self._safeAppendToBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) + + var toBuffer = self._safeAppendToBuffer || + (maxBuffered && comparator(newDoc, maxBuffered) < 0); + + if (toBuffer) { self._addBuffered(id, newDoc); - else + } else { + // Throw away from both published set and buffer self._safeAppendToBuffer = false; + } } } else if (bufferedBefore) { oldDoc = self._unpublishedBuffer.get(id); @@ -330,13 +342,21 @@ _.extend(OplogObserveDriver.prototype, { var maxPublished = self._published.get(self._published.maxElementId()); var maxBuffered = self._unpublishedBuffer.size() && self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()); + // the buffered doc was updated, it could move to published - if (comparator(newDoc, maxPublished) < 0) { + var toPublish = comparator(newDoc, maxPublished) < 0; + + // or stays in buffer even after the change + var staysInBuffer = self._safeAppendToBuffer || + (maxBuffered && comparator(newDoc, maxBuffered) < 0); + + if (toPublish) { self._addPublished(id, newDoc); - } else if (self._safeAppendToBuffer || (maxBuffered && comparator(newDoc, maxBuffered) < 0)) { - // stays in buffer + } else if (staysInBuffer) { + // stays in buffer but changes self._unpublishedBuffer.set(id, newDoc); } else { + // Throw away from both published set and buffer self._safeAppendToBuffer = false; } } else { From 7c202f6004bc697261cf58d126eb12009f62f4f5 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Sun, 23 Feb 2014 20:03:38 -0800 Subject: [PATCH 56/93] Better assertions in the oplog code --- packages/mongo-livedata/oplog_observe_driver.js | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 9dfca00e83..86f35b2d6b 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -270,8 +270,8 @@ _.extend(OplogObserveDriver.prototype, { // and the effect of limit enforced. _removeMatching: function (id) { var self = this; - if (!self._published.has(id) && !self._limit) - throw Error("tried to remove something unpublished " + id); // xcxc fix this error msg + if (! self._published.has(id) && ! self._limit) + throw Error("tried to remove something matching but not cached " + id); if (self._published.has(id)) { self._removePublished(id); @@ -448,9 +448,10 @@ _.extend(OplogObserveDriver.prototype, { if (self._published.has(id) || (self._limit && self._unpublishedBuffer.has(id))) self._removeMatching(id); } else if (op.op === 'i') { - // xcxc what if buffer has it? if (self._published.has(id)) - throw new Error("insert found for already-existing ID"); + throw new Error("insert found for already-existing ID in published"); + if (self._unpublishedBuffer && self._unpublishedBuffer.has(id)) + throw new Error("insert found for already-existing ID in buffer"); // XXX what if selector yields? for now it can't but later it could have // $where From 90a0c2d5c2a3f9df98122bc90525f722c6043fd8 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 00:25:38 -0800 Subject: [PATCH 57/93] Add missing `_safeAppendToBuffer = false` and add clarity with comments --- .../mongo-livedata/oplog_observe_driver.js | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 86f35b2d6b..16d5b7f04b 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -202,6 +202,8 @@ _.extend(OplogObserveDriver.prototype, { _addBuffered: function (id, doc) { var self = this; self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc)); + + // If something is overflowing the buffer, we just remove it from cache if (self._unpublishedBuffer.size() > self._limit) { var maxBufferedId = self._unpublishedBuffer.maxElementId(); @@ -210,9 +212,14 @@ _.extend(OplogObserveDriver.prototype, { } self._unpublishedBuffer.remove(maxBufferedId); + + // Since something matching is removed from cache (both published set and + // buffer), set flag to false self._safeAppendToBuffer = false; } }, + // Is called either to remove the doc completely from matching set or to move + // it to the published set later. _removeBuffered: function (id) { var self = this; self._unpublishedBuffer.remove(id); @@ -263,6 +270,9 @@ _.extend(OplogObserveDriver.prototype, { self._addPublished(id, doc); } else if (toBuffer) { self._addBuffered(id, doc); + } else { + // dropping it and not saving to the cache + self._safeAppendToBuffer = false; } }, // Called when a document leaves the "Matching" results set. @@ -475,10 +485,11 @@ _.extend(OplogObserveDriver.prototype, { } else if ((self._published.has(id) || self._unpublishedBuffer.has(id)) && canDirectlyModifyDoc) { // Oh great, we actually know what the document is, so we can apply // this directly. - if (self._published.has(id)) - var newDoc = EJSON.clone(self._published.get(id)); - else - var newDoc = EJSON.clone(self._unpublishedBuffer.get(id)); + var newDoc = self._published.has(id) ? + self._published.get(id) : + self._unpublishedBuffer.get(id); + newDoc = EJSON.clone(newDoc); + newDoc._id = id; LocalCollection._modify(newDoc, op.o); self._handleDoc(id, self._sharedProjectionFn(newDoc)); From 036dc4885edcf429490609c036034d6ffdc08ac7 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 00:26:11 -0800 Subject: [PATCH 58/93] Tiny changes to tests formatting --- .../mongo-livedata/mongo_livedata_tests.js | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 9f0665c11e..832eb35416 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -793,9 +793,20 @@ if (Meteor.isServer) { return _.isEmpty(_.difference(a, b)) && _.isEmpty(_.difference(b, a)); }; + // tests '_id' subfields for all documents in oplog buffer + var testOplogBufferIds = function (ids) { + var bufferIds = []; + o.handle._multiplexer._observeDriver._unpublishedBuffer.forEach(function (x, id) { + bufferIds.push(id); + }); + + test.isTrue(setsEqual(ids, bufferIds)); + }; + // Insert a doc and start observing. var docId1 = ins({foo: 22, bar: 5}); var o = observer(); + var usesOplog = o.handle._multiplexer._observeDriver._usesOplog; // Initial add. test.length(o.output, 1); test.equal(o.output.shift(), {added: docId1}); @@ -833,8 +844,9 @@ if (Meteor.isServer) { test.length(o.output, 2); test.isTrue(setsEqual(o.output, [{removed: docId5}, {added: docId2}])); clearOutput(o); + usesOplog && testOplogBufferIds([docId4]); - // Current state is [3 5 6] 7] + // Current state is [3 5 6 | 7] // Add some negative numbers overflowing the buffer. // New documents will take the published place, [3 5 6] will take the buffer // and 7 will be outside of the buffer in MongoDB. @@ -849,7 +861,7 @@ if (Meteor.isServer) { test.equal(o.output, expected); clearOutput(o); - // Now the state is [-3 -2 -1] 3 5 6] 7 + // Now the state is [-3 -2 -1 | 3 5 6] 7 // If we update first 3 docs (increment them by 20), it would be // interesting. upd({ bar: { $lt: 0 }}, { $inc: { bar: 20 } }, { multi: true }); @@ -883,7 +895,7 @@ if (Meteor.isServer) { // XXX the oplog code analyzes the events one by one: one remove after // another. Poll-n-diff code, on the other side, analyzes the batch action // of multiple remove. Because of that difference, expected outputs differ. - if (o.handle._multiplexer._observeDriver._usesOplog) { + if (usesOplog) { var expectedRemoves = [{removed: docId3}, {removed: docId1}, {removed: docId2}, {removed: docId4}]; var expectedAdds = [{added: docId4}, {added: docId8}, From 97f7ccfa63b5418495b88155efd20be3ff77095e Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 20:22:24 -0800 Subject: [PATCH 59/93] Glasser's comments --- packages/binary-heap/max-heap.js | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index ccc4ff3f36..ddc01733fd 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -4,7 +4,7 @@ // Object: // - id - String - unique id of the item // - value - Any - the data value -// the contents of initData is retained +// each value is retained MaxHeap = function (comparator, initData) { if (! _.isFunction(comparator)) throw new Error('Passed comparator is invalid, should be a comparison function'); @@ -24,12 +24,12 @@ MaxHeap = function (comparator, initData) { // The Heap data-structure implemented as a 0-based contiguous array where // every item on index idx is a node in a complete binary tree. Every node can - // have leaves on indexes idx*2+1 and idx*2+2, except for the lists. Every + // have children on indexes idx*2+1 and idx*2+2, except for the leaves. Every // node has a parent on index (idx-1)/2; self._heap = []; // If the initial array is passed, we can build the heap in linear time - // complexity (O(N)) compared to linearithmetic time complexity (O(nlogn)) if + // complexity (O(N)) compared to linearithmic time complexity (O(nlogn)) if // we push elements one by one. if (_.isArray(initData)) self._initFromData(initData); @@ -41,7 +41,7 @@ _.extend(MaxHeap.prototype, { var self = this; self._heap = _.map(data, function (o) { - return { id: EJSON.clone(o.id), value: o.value }; + return { id: o.id, value: o.value }; }); _.each(data, function (o, i) { @@ -64,11 +64,11 @@ _.extend(MaxHeap.prototype, { var right = rightChildIdx(idx); var largest = idx; - if (left < self.size() && self._maxIndex(largest, left) === left) { - largest = left; + if (left < self.size()) { + largest = self._maxIndex(largest, left); } - if (right < self.size() && self._maxIndex(largest, right) === right) { - largest = right; + if (right < self.size()) { + largest = self._maxIndex(largest, right); } if (largest === idx) @@ -133,7 +133,11 @@ _.extend(MaxHeap.prototype, { var idx = self._heapIdx.get(id); self._heap[idx].value = value; + + // Fix the new value's position + // Either bubble new value up if it is greater than its parent self._upHeap(idx); + // or bubble it down if it is smaller than one of its children self._downHeap(idx); } else { self._heapIdx.set(id, self._heap.length); @@ -152,8 +156,10 @@ _.extend(MaxHeap.prototype, { self._swap(idx, last); self._heap.pop(); self._heapIdx.remove(id); - self._downHeap(idx); + + // Fix the swapped value's position self._upHeap(idx); + self._downHeap(idx); } else { self._heap.pop(); self._heapIdx.remove(id); @@ -173,6 +179,7 @@ _.extend(MaxHeap.prototype, { self._heap = []; self._heapIdx.clear(); }, + // iterate over values in no particular order forEach: function (iterator) { var self = this; _.each(self._heap, function (obj) { @@ -206,7 +213,7 @@ _.extend(MaxHeap.prototype, { for (var i = 1; i < self._heap.length; i++) if (self._maxIndex(parentIdx(i), i) !== parentIdx(i)) throw new Error("An item with id " + self._heap[i].id + - " has a parent younger than him: " + + " has a parent younger than it: " + self._heap[parentIdx(i)].id); } }); From 9451416f2767314266e024638bf23db472eec444 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 20:28:45 -0800 Subject: [PATCH 60/93] Use options object in MaxHeap constructor and allow passing a custom IdMap class --- packages/binary-heap/max-heap.js | 26 ++++++++++++++------------ packages/binary-heap/min-max-heap.js | 6 +++--- packages/binary-heap/package.js | 1 - 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/packages/binary-heap/max-heap.js b/packages/binary-heap/max-heap.js index ddc01733fd..aa08a3fad5 100644 --- a/packages/binary-heap/max-heap.js +++ b/packages/binary-heap/max-heap.js @@ -1,11 +1,14 @@ // Constructor of Heap // - comparator - Function - given two items returns a number -// - initData - Array - the initial data in a format: -// Object: -// - id - String - unique id of the item -// - value - Any - the data value -// each value is retained -MaxHeap = function (comparator, initData) { +// - options: +// - initData - Array - Optional - the initial data in a format: +// Object: +// - id - String - unique id of the item +// - value - Any - the data value +// each value is retained +// - IdMap - Constructor - Optional - custom IdMap class to store id->index +// mappings internally. Standard IdMap is used by default. +MaxHeap = function (comparator, options) { if (! _.isFunction(comparator)) throw new Error('Passed comparator is invalid, should be a comparison function'); var self = this; @@ -15,12 +18,11 @@ MaxHeap = function (comparator, initData) { // value is greater than the first and zero if they are equal. self._comparator = comparator; + options = _.defaults(options || {}, { IdMap: IdMap }); + // _heapIdx maps an id to an index in the Heap array the corresponding value // is located on. - if (Package && Package.minimongo) - self._heapIdx = new Package.minimongo.LocalCollection._IdMap; - else - self._heapIdx = new IdMap; + self._heapIdx = new options.IdMap; // The Heap data-structure implemented as a 0-based contiguous array where // every item on index idx is a node in a complete binary tree. Every node can @@ -31,8 +33,8 @@ MaxHeap = function (comparator, initData) { // If the initial array is passed, we can build the heap in linear time // complexity (O(N)) compared to linearithmic time complexity (O(nlogn)) if // we push elements one by one. - if (_.isArray(initData)) - self._initFromData(initData); + if (_.isArray(options.initData)) + self._initFromData(options.initData); }; _.extend(MaxHeap.prototype, { diff --git a/packages/binary-heap/min-max-heap.js b/packages/binary-heap/min-max-heap.js index b491c61bb1..49911b4785 100644 --- a/packages/binary-heap/min-max-heap.js +++ b/packages/binary-heap/min-max-heap.js @@ -10,13 +10,13 @@ // (http://www.cs.otago.ac.nz/staffpriv/mike/Papers/MinMaxHeaps/MinMaxHeaps.pdf) // and Interval Heaps // (http://www.cise.ufl.edu/~sahni/dsaac/enrich/c13/double.htm) -MinMaxHeap = function (comparator, initialData) { +MinMaxHeap = function (comparator, options) { var self = this; - MaxHeap.call(self, comparator, initialData); + MaxHeap.call(self, comparator, options); self._minHeap = new MaxHeap(function (a, b) { return -comparator(a, b); - }, initialData); + }, options); }; var F = function () {}; diff --git a/packages/binary-heap/package.js b/packages/binary-heap/package.js index 5788fc1973..f8c4ae4613 100644 --- a/packages/binary-heap/package.js +++ b/packages/binary-heap/package.js @@ -7,7 +7,6 @@ Package.on_use(function (api) { api.export('MaxHeap'); api.export('MinMaxHeap'); api.use(['underscore', 'id-map']); - api.use(['minimongo'], { weak: true }); api.add_files(['max-heap.js', 'min-max-heap.js']); }); From 6dfd5bfc29c3e30ff6787772c438207dc0f5c72a Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 21:25:17 -0800 Subject: [PATCH 61/93] Additional generated tests for min-max-heap --- packages/binary-heap/binary-heap-tests.js | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/packages/binary-heap/binary-heap-tests.js b/packages/binary-heap/binary-heap-tests.js index 496af9c0c3..0a487b7b2f 100644 --- a/packages/binary-heap/binary-heap-tests.js +++ b/packages/binary-heap/binary-heap-tests.js @@ -90,6 +90,7 @@ Tinytest.add("binary-heap - big test for min-max-heap", function (test) { var heap = new MinMaxHeap(function (a, b) { return a-b; }); var output = []; + var initialSets = _.clone(allNumbers); _.each(allNumbers, function (n) { heap.set(n, n); heap._selfCheck(); @@ -97,6 +98,8 @@ Tinytest.add("binary-heap - big test for min-max-heap", function (test) { }); allNumbers = _.shuffle(allNumbers); + var secondarySets = _.clone(allNumbers); + _.each(allNumbers, function (n) { heap.set(-n, n); heap._selfCheck(); @@ -110,8 +113,26 @@ Tinytest.add("binary-heap - big test for min-max-heap", function (test) { heap._selfCheck(); heap._minHeap._selfCheck(); }); + test.equal(heap.size(), 0); + allNumbers.sort(function (a, b) { return a-b; }); - test.equal(output, allNumbers); + var initialTestText = "initial sets: " + initialSets.toString() + + "; secondary sets: " + secondarySets.toString(); + test.equal(output, allNumbers, initialTestText); + + _.each(initialSets, function (n) { heap.set(n, n); }) + _.each(secondarySets, function (n) { heap.set(-n, n); }); + + allNumbers.sort(function (a, b) { return b-a; }); + output = []; + _.times(positiveNumbers.length + negativeNumbers.length, function () { + var maxId = heap.maxElementId(); + output.push(heap.get(maxId)); + heap.remove(maxId); + heap._selfCheck(); heap._minHeap._selfCheck(); + }); + + test.equal(output, allNumbers, initialTestText); }); From a51bc653727b3f4fdd7e7c4bc7279999aecd6896 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 23:55:16 -0800 Subject: [PATCH 62/93] Meteor._inherits - prototypical inheritance implementation --- packages/meteor/helpers.js | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/packages/meteor/helpers.js b/packages/meteor/helpers.js index 9b05246669..341df557b9 100644 --- a/packages/meteor/helpers.js +++ b/packages/meteor/helpers.js @@ -111,5 +111,26 @@ _.extend(Meteor, { return fut.wait(); return result; }; + }, + + // Sets child's prototype to a new object whose prototype is parent's + // prototype. Used as: + // Meteor._inherit(ClassB, ClassA). + // _.extend(ClassB.prototype, { ... }) + // Inspired by CoffeeScript's `extend` and Google Closure's `goog.inherits`. + _inherits: function (Child, Parent) { + // copy static fields + _.each(Parent, function (prop, field) { + Child[field] = prop; + }); + + // a middle member of prototype chain: takes the prototype from the Parent + var Middle = function () { + this.constructor = Child; + }; + Middle.prototype = Parent.prototype; + Child.prototype = new Middle(); + Child.__super__ = Parent.prototype; + return Child; } }); From 7bd160192c7d7f2b81bf9c616f50f015a9e83425 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 23:55:35 -0800 Subject: [PATCH 63/93] Replace ad-hoc inheritance with Meteor._inherit calls --- packages/binary-heap/min-max-heap.js | 4 +--- packages/minimongo/id_map.js | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/packages/binary-heap/min-max-heap.js b/packages/binary-heap/min-max-heap.js index 49911b4785..990a71402d 100644 --- a/packages/binary-heap/min-max-heap.js +++ b/packages/binary-heap/min-max-heap.js @@ -19,9 +19,7 @@ MinMaxHeap = function (comparator, options) { }, options); }; -var F = function () {}; -F.prototype = MaxHeap.prototype; -MinMaxHeap.prototype = new F; +Meteor._inherits(MinMaxHeap, MaxHeap); _.extend(MinMaxHeap.prototype, { set: function (id, value) { diff --git a/packages/minimongo/id_map.js b/packages/minimongo/id_map.js index cf098dbee1..ba4880980b 100644 --- a/packages/minimongo/id_map.js +++ b/packages/minimongo/id_map.js @@ -3,5 +3,5 @@ LocalCollection._IdMap = function () { IdMap.call(self, LocalCollection._idStringify, LocalCollection._idParse); }; -LocalCollection._IdMap.prototype = IdMap.prototype; +Meteor._inherits(LocalCollection._IdMap, IdMap); From b8c8d87d5fec8a681ea8e73026551f242dbf0349 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 20 Feb 2014 16:37:35 -0800 Subject: [PATCH 64/93] Relax tests and remove a dead piece of code Apparently the implementation of sin/cos changed in the recent Chrome as now the results are off by 1e-6 (compared to a different C++ implementation we generated the tests previously). It is not worth tracking down the 1e-6 error and change implementation for it. --- packages/geojson-utils/geojson-utils.tests.js | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/packages/geojson-utils/geojson-utils.tests.js b/packages/geojson-utils/geojson-utils.tests.js index 73cc5d1554..ea7ab39462 100644 --- a/packages/geojson-utils/geojson-utils.tests.js +++ b/packages/geojson-utils/geojson-utils.tests.js @@ -54,10 +54,6 @@ Tinytest.add("geojson-utils - point distance", function (test) { }); Tinytest.add("geojson-utils - points distance generated tests", function (test) { - var floatEqual = function (a, b) { - test.isTrue(Math.abs(a - b) < 0.000001); - }; - // Pairs of points we will be looking a distance between var tests = [[[-19.416501816827804,-13.442164216190577], [8.694866622798145,-8.511979941977188]], [[151.2841189110186,-56.14564002258703], [167.77983197313733,0.05544793023727834]], @@ -91,8 +87,8 @@ Tinytest.add("geojson-utils - points distance generated tests", function (test) _.each(tests, function (pair, testN) { var distance = GeoJSON.pointDistance.apply(this, _.map(pair, toGeoJSONPoint)); - test.isTrue(Math.abs(distance - answers[testN]) < 0.00000001, - "Wrong distance between points " + JSON.stringify(pair) + ": " + distance); + test.isTrue(Math.abs(distance - answers[testN]) < 0.000001, + "Wrong distance between points " + JSON.stringify(pair) + ": " + distance + ", " + Math.abs(distance - answers[testN]) + " differenc"); }); function toGeoJSONPoint (coordinates) { From db28492e6bd99aa138e0c7d4a1ce15fe4661f13b Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 00:45:22 -0800 Subject: [PATCH 65/93] Tweak the description of id-map --- packages/id-map/package.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/id-map/package.js b/packages/id-map/package.js index d3ef1db346..876d3406a6 100644 --- a/packages/id-map/package.js +++ b/packages/id-map/package.js @@ -1,5 +1,5 @@ Package.describe({ - summary: "Dictionary data structure: a wrapper for a raw object", + summary: "Dictionary data structure allowing non-string keys", internal: true }); From 4657f8c1df75b388003a2aef42754d7a57acb17c Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 00:51:00 -0800 Subject: [PATCH 66/93] Use binary-heap package in mongo-livedata --- packages/mongo-livedata/dummy-structure.js | 44 ------------------- .../mongo-livedata/oplog_observe_driver.js | 5 ++- packages/mongo-livedata/package.js | 6 ++- 3 files changed, 7 insertions(+), 48 deletions(-) delete mode 100644 packages/mongo-livedata/dummy-structure.js diff --git a/packages/mongo-livedata/dummy-structure.js b/packages/mongo-livedata/dummy-structure.js deleted file mode 100644 index 0b0708c210..0000000000 --- a/packages/mongo-livedata/dummy-structure.js +++ /dev/null @@ -1,44 +0,0 @@ -// Implements the interface of IdMap and knows how to find Min or Max element -DummyStructure = function (comparator) { - var self = this; - self.comparator = comparator; - self.idMap = new LocalCollection._IdMap; -}; - -_.each(['get', 'set', 'remove', 'has', 'empty', 'clear', 'forEach', 'size', 'setDefault'], function (method) { - DummyStructure.prototype[method] = function (/* arguments */) { - var self = this; - return self.idMap[method].apply(self.idMap, arguments); - }; -}); - -DummyStructure.prototype.clone = function () { - var self = this; - var clone = new DummyStructure; - clone.comparator = self.comparator; - clone.idMap = self.idMap.clone(); - return clone; -}; - -DummyStructure.prototype.minElementId = function () { - var self = this; - var minElementId = null; - self.idMap.forEach(function (value, key) { - if (minElementId === null) - minElementId = key; - else if (self.comparator(value, self.idMap.get(minElementId)) < 0) - minElementId = key; - }); - - return minElementId; -}; - -DummyStructure.prototype.maxElementId = function () { - var self = this; - var comparator = self.comparator; - self.comparator = function (a, b) { return -comparator(a, b); }; - var maxElementId = self.minElementId(); - self.comparator = comparator; - return maxElementId; -}; - diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 16d5b7f04b..41f58efa3d 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -38,11 +38,12 @@ OplogObserveDriver = function (options) { // comparator only once in the constructor. var sorter = new Minimongo.Sorter(options.cursorDescription.options.sort); var comparator = sorter.getComparator(); + var heapOptions = { IdMap: LocalCollection._IdMap }; self._limit = self._cursorDescription.options.limit; self._comparator = comparator; - self._unpublishedBuffer = new DummyStructure(comparator); + self._unpublishedBuffer = new MinMaxHeap(comparator, heapOptions); // We need something that can find Max value in addition to IdMap interface - self._published = new DummyStructure(comparator); + self._published = new MaxHeap(comparator, heapOptions); } else { self._limit = 0; self._comparator = null; diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 898258486a..4f1dfa8866 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -24,6 +24,10 @@ Package.on_use(function (api) { ['client', 'server']); api.use('check', ['client', 'server']); + // Binary Heap data structure is used to optimize oplog observe driver + // performance. + api.use('binary-heap', 'server'); + // Allow us to detect 'insecure'. api.use('insecure', {weak: true}); @@ -47,8 +51,6 @@ Package.on_use(function (api) { // For tests only. api.export('MongoTest', 'server', {testOnly: true}); - // xcxc temporary - api.add_files('dummy-structure.js', 'server'); api.add_files(['mongo_driver.js', 'oplog_tailing.js', 'observe_multiplex.js', 'doc_fetcher.js', 'polling_observe_driver.js','oplog_observe_driver.js'], From be7038e1e8a22c77f22943acd1a571af4af28c5f Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 01:11:35 -0800 Subject: [PATCH 67/93] More white-box tests one test fails so far --- .../mongo-livedata/mongo_livedata_tests.js | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 832eb35416..0d36a112e7 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -800,7 +800,7 @@ if (Meteor.isServer) { bufferIds.push(id); }); - test.isTrue(setsEqual(ids, bufferIds)); + test.isTrue(setsEqual(ids, bufferIds), "expected: " + ids + "; got: " + bufferIds); }; // Insert a doc and start observing. @@ -885,8 +885,9 @@ if (Meteor.isServer) { expectedRemoves)); test.equal(_.filter(o.output, function (e){return e.added;}), expectedAdds); clearOutput(o); + usesOplog && testOplogBufferIds([docId4, docId7, docId8]); - // The new arrangement is [3 5 6] 7 17 18] 19 + // The new arrangement is [3 5 6 | 7 17 18] 19 // By ids: [docId3, docId1, docId2] docId4] docId6 docId7 docId8 // Remove first 4 docs (3, 1, 2, 4) forcing buffer to become empty and // schedule a repoll. @@ -921,27 +922,33 @@ if (Meteor.isServer) { var docId11 = ins({ foo: 22, bar: 41 }); var docId12 = ins({ foo: 22, bar: 51 }); + // Becomes [17 18 19 | 21 31 41] 51 + usesOplog && testOplogBufferIds([docId9, docId10, docId11]); test.length(o.output, 0); upd({ bar: { $lt: 20 } }, { $inc: { bar: 5 } }, { multi: true }); - // Becomes [21 22 23] 24 31 41] 51 + // Becomes [21 22 23 | 24 31 41] 51 test.length(o.output, 4); test.isTrue(setsEqual(o.output, [{removed: docId6}, {added: docId9}, {changed: docId7}, {changed: docId8}])); clearOutput(o); + usesOplog && testOplogBufferIds([docId6, docId10, docId11]); rem(docId9); - // Becomes [22 23 24] 31 41] 51 + // Becomes [22 23 24 | 31 41] 51 test.length(o.output, 2); test.isTrue(setsEqual(o.output, [{removed: docId9}, {added: docId6}])); clearOutput(o); + usesOplog && testOplogBufferIds([docId10, docId11]); upd({ bar: { $gt: 25 } }, { $inc: { bar: -7.5 } }, { multi: true }); - // Becomes [22 23 23.5] 24 33.5] 43.5 + // Becomes [22 23 23.5 | 24 33.5] 43.5 test.length(o.output, 2); test.isTrue(setsEqual(o.output, [{removed: docId6}, {added: docId10}])); clearOutput(o); + // xcxc this test fails :( + usesOplog && testOplogBufferIds([docId6, docId11]); // Force buffer objects to be moved into published set so we can check them rem(docId7); @@ -957,6 +964,7 @@ if (Meteor.isServer) { test.equal(o.state[docId11], { _id: docId11, foo: 22, bar: 33.5 }); test.equal(o.state[docId12], { _id: docId12, foo: 22, bar: 43.5 }); clearOutput(o); + usesOplog && testOplogBufferIds([]); o.handle.stop(); onComplete(); From b082f1847a77e2a58aa1bbed1e7e9dc765674476 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 10:00:27 -0800 Subject: [PATCH 68/93] Another small test missing --- packages/mongo-livedata/mongo_livedata_tests.js | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 0d36a112e7..942d26ccb4 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -915,6 +915,7 @@ if (Meteor.isServer) { expectedRemoves)); test.equal(_.filter(o.output, function (e) {return e.added;}), expectedAdds); clearOutput(o); + usesOplog && testOplogBufferIds([]); // The new arrangement is [17 18 19] or [docId6 docId7 docId8] var docId9 = ins({ foo: 22, bar: 21 }); From 3355ba7e85db7f504bafb310e760c66a43e66c22 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 12:08:41 -0800 Subject: [PATCH 69/93] Fix a check when buffer can be null --- packages/mongo-livedata/oplog_observe_driver.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 41f58efa3d..cb125a6c78 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -481,9 +481,12 @@ _.extend(OplogObserveDriver.prototype, { var canDirectlyModifyDoc = !isReplace && modifierCanBeDirectlyApplied(op.o); + var publishedBefore = self._published.has(id); + var bufferedBefore = self._limit && self._unpublishedBuffer.has(id); + if (isReplace) { self._handleDoc(id, _.extend({_id: id}, op.o)); - } else if ((self._published.has(id) || self._unpublishedBuffer.has(id)) && canDirectlyModifyDoc) { + } else if ((publishedBefore || bufferedBefore) && canDirectlyModifyDoc) { // Oh great, we actually know what the document is, so we can apply // this directly. var newDoc = self._published.has(id) ? From 6ad69c52ae6cfc032e8a4032dc872b2ca8162c15 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 24 Feb 2014 12:10:15 -0800 Subject: [PATCH 70/93] Explicit test for unsupported cursor with a single skip --- packages/mongo-livedata/oplog_tests.js | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/mongo-livedata/oplog_tests.js b/packages/mongo-livedata/oplog_tests.js index 5d28c01a44..d9bc31709c 100644 --- a/packages/mongo-livedata/oplog_tests.js +++ b/packages/mongo-livedata/oplog_tests.js @@ -43,4 +43,5 @@ Tinytest.add("mongo-livedata - oplog - cursorSupported", function (test) { supported(true, {}, { sort: {x:1}, limit: 5 }); supported(false, {}, { limit: 5 }); supported(false, {}, { skip: 2, limit: 5 }); + supported(false, {}, { skip: 2 }); }); From 76ffc74e0efb96971499ed8c05a88e250b757478 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 11:26:38 -0800 Subject: [PATCH 71/93] Tests tests tests --- packages/mongo-livedata/mongo_livedata_tests.js | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 942d26ccb4..f747a771b6 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -860,6 +860,7 @@ if (Meteor.isServer) { test.equal(o.output, expected); clearOutput(o); + usesOplog && testOplogBufferIds([docId1, docId2, docId3]); // Now the state is [-3 -2 -1 | 3 5 6] 7 // If we update first 3 docs (increment them by 20), it would be From 370ee125a9252d747a7edd975c7c32f6de12ab87 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 11:37:01 -0800 Subject: [PATCH 72/93] Rationalize the test failure --- packages/mongo-livedata/mongo_livedata_tests.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index f747a771b6..d29114c3b3 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -945,12 +945,13 @@ if (Meteor.isServer) { usesOplog && testOplogBufferIds([docId10, docId11]); upd({ bar: { $gt: 25 } }, { $inc: { bar: -7.5 } }, { multi: true }); - // Becomes [22 23 23.5 | 24 33.5] 43.5 + // Becomes [22 23 23.5 | 24] 33.5 43.5 - 33.5 doesn't update in-place in + // buffer, because it the driver is not sure it can do it and there is no a + // different doc which is less than 33.5. test.length(o.output, 2); test.isTrue(setsEqual(o.output, [{removed: docId6}, {added: docId10}])); clearOutput(o); - // xcxc this test fails :( - usesOplog && testOplogBufferIds([docId6, docId11]); + usesOplog && testOplogBufferIds([docId6]); // Force buffer objects to be moved into published set so we can check them rem(docId7); From b23a26fbbd1e5449e422661a11e5dfcf23f0b88c Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 11:49:54 -0800 Subject: [PATCH 73/93] More white-box style tests for _safeAppendToBuffer flag --- packages/mongo-livedata/mongo_livedata_tests.js | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index d29114c3b3..161ccbf36d 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -802,6 +802,12 @@ if (Meteor.isServer) { test.isTrue(setsEqual(ids, bufferIds), "expected: " + ids + "; got: " + bufferIds); }; + var testSafeAppendToBufferFlag = function (expected) { + if (expected) + test.isTrue(o.handle._multiplexer._observeDriver._safeAppendToBuffer); + else + test.isFalse(o.handle._multiplexer._observeDriver._safeAppendToBuffer); + }; // Insert a doc and start observing. var docId1 = ins({foo: 22, bar: 5}); @@ -845,6 +851,7 @@ if (Meteor.isServer) { test.isTrue(setsEqual(o.output, [{removed: docId5}, {added: docId2}])); clearOutput(o); usesOplog && testOplogBufferIds([docId4]); + usesOplog && testSafeAppendToBufferFlag(true); // Current state is [3 5 6 | 7] // Add some negative numbers overflowing the buffer. @@ -861,6 +868,7 @@ if (Meteor.isServer) { test.equal(o.output, expected); clearOutput(o); usesOplog && testOplogBufferIds([docId1, docId2, docId3]); + usesOplog && testSafeAppendToBufferFlag(false); // Now the state is [-3 -2 -1 | 3 5 6] 7 // If we update first 3 docs (increment them by 20), it would be @@ -887,6 +895,7 @@ if (Meteor.isServer) { test.equal(_.filter(o.output, function (e){return e.added;}), expectedAdds); clearOutput(o); usesOplog && testOplogBufferIds([docId4, docId7, docId8]); + usesOplog && testSafeAppendToBufferFlag(false); // The new arrangement is [3 5 6 | 7 17 18] 19 // By ids: [docId3, docId1, docId2] docId4] docId6 docId7 docId8 @@ -917,6 +926,7 @@ if (Meteor.isServer) { test.equal(_.filter(o.output, function (e) {return e.added;}), expectedAdds); clearOutput(o); usesOplog && testOplogBufferIds([]); + usesOplog && testSafeAppendToBufferFlag(true); // The new arrangement is [17 18 19] or [docId6 docId7 docId8] var docId9 = ins({ foo: 22, bar: 21 }); @@ -926,6 +936,7 @@ if (Meteor.isServer) { // Becomes [17 18 19 | 21 31 41] 51 usesOplog && testOplogBufferIds([docId9, docId10, docId11]); + usesOplog && testSafeAppendToBufferFlag(false); test.length(o.output, 0); upd({ bar: { $lt: 20 } }, { $inc: { bar: 5 } }, { multi: true }); // Becomes [21 22 23 | 24 31 41] 51 @@ -936,6 +947,7 @@ if (Meteor.isServer) { {changed: docId8}])); clearOutput(o); usesOplog && testOplogBufferIds([docId6, docId10, docId11]); + usesOplog && testSafeAppendToBufferFlag(false); rem(docId9); // Becomes [22 23 24 | 31 41] 51 @@ -943,6 +955,7 @@ if (Meteor.isServer) { test.isTrue(setsEqual(o.output, [{removed: docId9}, {added: docId6}])); clearOutput(o); usesOplog && testOplogBufferIds([docId10, docId11]); + usesOplog && testSafeAppendToBufferFlag(false); upd({ bar: { $gt: 25 } }, { $inc: { bar: -7.5 } }, { multi: true }); // Becomes [22 23 23.5 | 24] 33.5 43.5 - 33.5 doesn't update in-place in @@ -952,6 +965,7 @@ if (Meteor.isServer) { test.isTrue(setsEqual(o.output, [{removed: docId6}, {added: docId10}])); clearOutput(o); usesOplog && testOplogBufferIds([docId6]); + usesOplog && testSafeAppendToBufferFlag(false); // Force buffer objects to be moved into published set so we can check them rem(docId7); @@ -968,6 +982,7 @@ if (Meteor.isServer) { test.equal(o.state[docId12], { _id: docId12, foo: 22, bar: 43.5 }); clearOutput(o); usesOplog && testOplogBufferIds([]); + usesOplog && testSafeAppendToBufferFlag(true); o.handle.stop(); onComplete(); From 4b955b5afc6407c3e05ab67eff3756f03e1c55dc Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 22:45:29 -0800 Subject: [PATCH 74/93] Relax events order in tests as mongo-livedata doesn't define the order in observe interface --- .../mongo-livedata/mongo_livedata_tests.js | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 161ccbf36d..01c5b54b52 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -841,9 +841,9 @@ if (Meteor.isServer) { var docId5 = ins({ foo: 22, bar: -1 }); // We should get an added and a removed events test.length(o.output, 2); - test.equal(o.output.shift(), {added: docId5}); // doc 2 was removed from the published set as it is too big to be in - test.equal(o.output.shift(), {removed: docId2}); + test.isTrue(setsEqual(o.output, [{added: docId5}, {removed: docId2}])); + clearOutput(o); // Now remove something and that doc 2 should be right back rem(docId5); @@ -865,7 +865,7 @@ if (Meteor.isServer) { {added: docId7}, {removed: docId1}, {added: docId8}, {removed: docId3}]; - test.equal(o.output, expected); + test.isTrue(setsEqual(o.output, expected)); clearOutput(o); usesOplog && testOplogBufferIds([docId1, docId2, docId3]); usesOplog && testSafeAppendToBufferFlag(false); @@ -887,12 +887,7 @@ if (Meteor.isServer) { {added: docId1}, {added: docId2}]; - // Note: since we are updating multiple things, the order of updates may - // differ from launch to launch. That's why we compare even positions - // (removes) w/o looking at ordering. - test.isTrue(setsEqual(_.filter(o.output, function (e) {return e.removed;}), - expectedRemoves)); - test.equal(_.filter(o.output, function (e){return e.added;}), expectedAdds); + test.isTrue(setsEqual(o.output, expectedAdds.concat(expectedRemoves))); clearOutput(o); usesOplog && testOplogBufferIds([docId4, docId7, docId8]); usesOplog && testSafeAppendToBufferFlag(false); @@ -921,9 +916,7 @@ if (Meteor.isServer) { test.length(o.output, 6); } - test.isTrue(setsEqual(_.filter(o.output, function (e) {return e.removed;}), - expectedRemoves)); - test.equal(_.filter(o.output, function (e) {return e.added;}), expectedAdds); + test.isTrue(setsEqual(o.output, expectedAdds.concat(expectedRemoves))); clearOutput(o); usesOplog && testOplogBufferIds([]); usesOplog && testSafeAppendToBufferFlag(true); @@ -977,6 +970,7 @@ if (Meteor.isServer) { {removed: docId10}, {added: docId6}, {added: docId11}, {added: docId12}])); + test.length(_.keys(o.state), 3); test.equal(o.state[docId6], { _id: docId6, foo: 22, bar: 24 }); test.equal(o.state[docId11], { _id: docId11, foo: 22, bar: 33.5 }); test.equal(o.state[docId12], { _id: docId12, foo: 22, bar: 43.5 }); From afc58bcac6f43f3d1631c682e2eb21e308e759ab Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 23:15:02 -0800 Subject: [PATCH 75/93] fix typo --- packages/mongo-livedata/oplog_observe_driver.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index cb125a6c78..199a91146d 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -759,7 +759,7 @@ OplogObserveDriver.cursorSupported = function (cursorDescription, matcher) { // skip is not supported: to support it we would need to keep track of all // "skipped" documents or at least their ids. // limit w/o a sort specifier is not supported: current implementation needs a - // determent way to order documents. + // deterministic way to order documents. if (options.skip || (options.limit && !options.sort)) return false; // If a fields projection option is given check if it is supported by From a0cc339b6d5171345493379aefd5d547848d5c8b Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 23:19:20 -0800 Subject: [PATCH 76/93] toPublish and toBuffer (or staysInBuffer) can't be true at once --- packages/mongo-livedata/oplog_observe_driver.js | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 199a91146d..199fba6265 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -258,12 +258,13 @@ _.extend(OplogObserveDriver.prototype, { // Otherwise we might need to buffer it (only in case of limited query). // Buffering is allowed if the buffer is not filled up yet and all matching // docs are either in the published set or in the buffer. - var canAppendToBuffer = self._safeAppendToBuffer && + var canAppendToBuffer = !toPublish && self._safeAppendToBuffer && self._unpublishedBuffer.size() < limit; // Or if it is small enough to be safely inserted to the middle or the // beginning of the buffer. - var canInsertIntoBuffer = maxBuffered && comparator(maxBuffered, doc) > 0; + var canInsertIntoBuffer = !toPublish && maxBuffered && + comparator(maxBuffered, doc) > 0; var toBuffer = canAppendToBuffer || canInsertIntoBuffer; @@ -358,8 +359,8 @@ _.extend(OplogObserveDriver.prototype, { var toPublish = comparator(newDoc, maxPublished) < 0; // or stays in buffer even after the change - var staysInBuffer = self._safeAppendToBuffer || - (maxBuffered && comparator(newDoc, maxBuffered) < 0); + var staysInBuffer = (! toPublish && self._safeAppendToBuffer) || + (!toPublish && maxBuffered && comparator(newDoc, maxBuffered) < 0); if (toPublish) { self._addPublished(id, newDoc); From 2f4dac064e7895787a4f7cc0d6c9896cb8289a45 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 23:22:59 -0800 Subject: [PATCH 77/93] Keep _id in published and buffer --- packages/mongo-livedata/oplog_observe_driver.js | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 199fba6265..8d422ac196 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -142,8 +142,10 @@ OplogObserveDriver = function (options) { _.extend(OplogObserveDriver.prototype, { _addPublished: function (id, doc) { var self = this; + var fields = _.clone(doc); + delete fields._id; self._published.set(id, self._sharedProjectionFn(doc)); - self._multiplexer.added(id, self._projectionFn(doc)); + self._multiplexer.added(id, self._projectionFn(fields)); // After adding this document, the published set might be overflowed // (exceeding capacity specified by limit). If so, push the maximum element @@ -236,8 +238,6 @@ _.extend(OplogObserveDriver.prototype, { _addMatching: function (doc) { var self = this; var id = doc._id; - var doc = _.clone(doc); - delete doc._id; if (self._published.has(id)) throw Error("tried to add something already published " + id); if (self._limit && self._unpublishedBuffer.has(id)) @@ -310,7 +310,6 @@ _.extend(OplogObserveDriver.prototype, { } else if (cachedBefore && !matchesNow) { self._removeMatching(id); } else if (cachedBefore && matchesNow) { - delete newDoc._id; var oldDoc = self._published.get(id); var comparator = self._comparator; var minBuffered = self._limit && self._unpublishedBuffer.size() && @@ -690,7 +689,6 @@ _.extend(OplogObserveDriver.prototype, { // Finally, replace the buffer newBuffer.forEach(function (doc, id) { - delete doc._id; self._addBuffered(id, doc); }); From a94bb6d88310008bbd1c0c31f5a8b36f444f8909 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 25 Feb 2014 23:26:29 -0800 Subject: [PATCH 78/93] Allow appending to buffer if new doc is equivalent to the greatest element --- packages/mongo-livedata/oplog_observe_driver.js | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 8d422ac196..8400373390 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -210,10 +210,6 @@ _.extend(OplogObserveDriver.prototype, { if (self._unpublishedBuffer.size() > self._limit) { var maxBufferedId = self._unpublishedBuffer.maxElementId(); - if (EJSON.equals(maxBufferedId, id)) { - throw new Error("The document just added to buffer is overflowing the buffer"); - } - self._unpublishedBuffer.remove(maxBufferedId); // Since something matching is removed from cache (both published set and @@ -253,7 +249,7 @@ _.extend(OplogObserveDriver.prototype, { // document would fit into published set pushing the maximum element out, // then we need to publish the doc. var toPublish = ! limit || self._published.size() < limit || - comparator(maxPublished, doc) > 0; + comparator(doc, maxPublished) < 0; // Otherwise we might need to buffer it (only in case of limited query). // Buffering is allowed if the buffer is not filled up yet and all matching @@ -264,7 +260,7 @@ _.extend(OplogObserveDriver.prototype, { // Or if it is small enough to be safely inserted to the middle or the // beginning of the buffer. var canInsertIntoBuffer = !toPublish && maxBuffered && - comparator(maxBuffered, doc) > 0; + comparator(doc, maxBuffered) <= 0; var toBuffer = canAppendToBuffer || canInsertIntoBuffer; @@ -336,7 +332,7 @@ _.extend(OplogObserveDriver.prototype, { var maxBuffered = self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId()); var toBuffer = self._safeAppendToBuffer || - (maxBuffered && comparator(newDoc, maxBuffered) < 0); + (maxBuffered && comparator(newDoc, maxBuffered) <= 0); if (toBuffer) { self._addBuffered(id, newDoc); @@ -359,7 +355,7 @@ _.extend(OplogObserveDriver.prototype, { // or stays in buffer even after the change var staysInBuffer = (! toPublish && self._safeAppendToBuffer) || - (!toPublish && maxBuffered && comparator(newDoc, maxBuffered) < 0); + (!toPublish && maxBuffered && comparator(newDoc, maxBuffered) <= 0); if (toPublish) { self._addPublished(id, newDoc); From de9bb4bfc4e6051988099bcd2356e72cf407582a Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 26 Feb 2014 15:53:48 -0800 Subject: [PATCH 79/93] Change how we sanity-check _publishNewResults The old version was just checking that docs *matched*, but we want the stronger result that docs are *published*. --- .../mongo-livedata/oplog_observe_driver.js | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 8400373390..5fb94ec4b1 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -287,15 +287,11 @@ _.extend(OplogObserveDriver.prototype, { self._removeBuffered(id); } }, - _handleDoc: function (id, newDoc, mustMatchNow) { + _handleDoc: function (id, newDoc) { var self = this; newDoc = _.clone(newDoc); var matchesNow = newDoc && self._matcher.documentMatches(newDoc).result; - if (mustMatchNow && !matchesNow) { - throw Error("expected " + EJSON.stringify(newDoc) + " to match " - + EJSON.stringify(self._cursorDescription)); - } var publishedBefore = self._published.has(id); var bufferedBefore = self._limit && self._unpublishedBuffer.has(id); @@ -678,9 +674,18 @@ _.extend(OplogObserveDriver.prototype, { // If self has a buffer and limit, the new fetched result will be // limited correctly as the query has sort specifier. newResults.forEach(function (doc, id) { - // "true" here means to throw if we think this doc doesn't match the - // selector. - self._handleDoc(id, doc, true); + self._handleDoc(id, doc); + }); + + // Sanity-check that everything we tried to put into _published ended up + // there. + // XXX if this is slow, remove it later + if (self._published.size() !== newResults.size()) { + throw Error("failed to copy newResults into _published!"); + } + self._published.forEach(function (doc, id) { + if (!newResults.has(id)) + throw Error("_published has a doc that newResults doesn't; " + id); }); // Finally, replace the buffer From 078c00563ab212e2454b7c64b45378d9636b4413 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 26 Feb 2014 15:54:37 -0800 Subject: [PATCH 80/93] Refactor an assertion in _removePublished --- .../mongo-livedata/oplog_observe_driver.js | 48 ++++++++++++++----- 1 file changed, 37 insertions(+), 11 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 5fb94ec4b1..e91689002a 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -175,24 +175,50 @@ _.extend(OplogObserveDriver.prototype, { var self = this; self._published.remove(id); self._multiplexer.removed(id); - if (! self._limit) + if (! self._limit || self._published.size() === self._limit) return; - if (self._published.size() < self._limit) { - // The unpublished buffer is empty iff published contains the whole - // matching set, i.e. number of matching documents is less or equal to the - // queries limit. - if (! self._unpublishedBuffer.size()) { - // Assertion of the statement above - if (! self._safeAppendToBuffer && self._phase !== PHASE.QUERYING) - throw new Error("At this phase, buffer can be empty only if published contains the whole matching set"); - return; - } + if (self._published.size() > self._limit) + throw Error("self._published got too big"); + + // OK, we are publishing less than the limit. Maybe we should look in the + // buffer to find the next element past what we were publishing before. + + if (!self._unpublishedBuffer.empty()) { + // There's something in the buffer; move the first thing in it to + // _published. var newDocId = self._unpublishedBuffer.minElementId(); var newDoc = self._unpublishedBuffer.get(newDocId); self._removeBuffered(newDocId); self._addPublished(newDocId, newDoc); + return; } + + // There's nothing in the buffer. This could mean one of a few things. + + // (a) We could be in the middle of re-running the query (specifically, we + // could be in _publishNewResults). In that case, _unpublishedBuffer is + // empty because we clear it at the beginning of _publishNewResults. In this + // case, our caller already knows the entire answer to the query and we + // don't need to do anything fancy here. Just return. + if (self._phase === PHASE.QUERYING) + return; + + // (b) We're pretty confident that the union of _published and + // _unpublishedBuffer contain all documents that match selector. Because + // _unpublishedBuffer is empty, that means we're confident that _published + // contains all documents that match selector. So we have nothing to do. + if (self._safeAppendToBuffer) + return; + + // (c) Maybe there are other documents out there that should be in our + // buffer. But in that case, when we emptied _unpublishedBuffer in + // _removeBuffered, we should have called _needToPollQuery, which will + // either put something in _unpublishedBuffer or set _safeAppendToBuffer (or + // both), and it will put us in QUERYING for that whole time. So in fact, we + // shouldnt' be able to get here. + + throw new Error("Buffer inexplicably empty"); }, _changePublished: function (id, oldDoc, newDoc) { var self = this; From 15a4cac27c6bf41f24fecec0d4f764a56135009c Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 26 Feb 2014 15:58:08 -0800 Subject: [PATCH 81/93] typo --- packages/mongo-livedata/oplog_observe_driver.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index e91689002a..b7f1c26db6 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -216,7 +216,7 @@ _.extend(OplogObserveDriver.prototype, { // _removeBuffered, we should have called _needToPollQuery, which will // either put something in _unpublishedBuffer or set _safeAppendToBuffer (or // both), and it will put us in QUERYING for that whole time. So in fact, we - // shouldnt' be able to get here. + // shouldn't be able to get here. throw new Error("Buffer inexplicably empty"); }, From af08231656987d32699d9dfb1305930e373224c7 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 26 Feb 2014 16:32:19 -0800 Subject: [PATCH 82/93] When switching to QUERYING, stop what you're doing We don't think there was a bug before but this should make things a little more firm. --- .../mongo-livedata/oplog_observe_driver.js | 80 ++++++++++++------- 1 file changed, 53 insertions(+), 27 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index b7f1c26db6..701c9bde84 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -7,6 +7,20 @@ var PHASE = { STEADY: "STEADY" }; +// Exception thrown by _needToPollQuery which unrolls the stack up to the +// enclosing call to finishIfNeedToPollQuery. +var SwitchedToQuery = function () {}; +var finishIfNeedToPollQuery = function (f) { + return function () { + try { + f.apply(this, arguments); + } catch (e) { + if (!(e instanceof SwitchedToQuery)) + throw e; + } + }; +}; + // OplogObserveDriver is an alternative to PollingObserveDriver which follows // the Mongo operation log instead of just re-polling the query. It obeys the // same simple interface: constructing it starts sending observeChanges @@ -84,7 +98,7 @@ OplogObserveDriver = function (options) { forEachTrigger(self._cursorDescription, function (trigger) { self._stopHandles.push(self._mongoHandle._oplogHandle.onOplogEntry( trigger, function (notification) { - Meteor._noYieldsAllowed(function () { + Meteor._noYieldsAllowed(finishIfNeedToPollQuery(function () { var op = notification.op; if (notification.dropCollection) { // Note: this call is not allowed to block on anything (especially @@ -98,7 +112,7 @@ OplogObserveDriver = function (options) { else self._handleOplogEntrySteadyOrFetching(op); } - }); + })); } )); }); @@ -134,9 +148,9 @@ OplogObserveDriver = function (options) { // Give _observeChanges a chance to add the new ObserveHandle to our // multiplexer, so that the added calls get streamed. - Meteor.defer(function () { + Meteor.defer(finishIfNeedToPollQuery(function () { self._runInitialQuery(); - }); + })); }; _.extend(OplogObserveDriver.prototype, { @@ -398,7 +412,7 @@ _.extend(OplogObserveDriver.prototype, { self._registerPhaseChange(PHASE.FETCHING); // Defer, because nothing called from the oplog entry handler may yield, but // fetch() yields. - Meteor.defer(function () { + Meteor.defer(finishIfNeedToPollQuery(function () { while (!self._stopped && !self._needToFetch.empty()) { if (self._phase !== PHASE.FETCHING) throw new Error("phase in fetchModifiedDocuments: " + self._phase); @@ -415,36 +429,40 @@ _.extend(OplogObserveDriver.prototype, { waiting++; self._mongoHandle._docFetcher.fetch( self._cursorDescription.collectionName, id, cacheKey, - function (err, doc) { - if (err) { - if (!anyError) - anyError = err; - } else if (!self._stopped && self._phase === PHASE.FETCHING - && self._fetchGeneration === thisGeneration) { - // We re-check the generation in case we've had an explicit - // _pollQuery call which should effectively cancel this round of - // fetches. (_pollQuery increments the generation.) - self._handleDoc(id, doc); + finishIfNeedToPollQuery(function (err, doc) { + try { + if (err) { + if (!anyError) + anyError = err; + } else if (!self._stopped && self._phase === PHASE.FETCHING + && self._fetchGeneration === thisGeneration) { + // We re-check the generation in case we've had an explicit + // _pollQuery call (eg, in another fiber) which should + // effectively cancel this round of fetches. (_pollQuery + // increments the generation.) + self._handleDoc(id, doc); + } + } finally { + waiting--; + // Because fetch() never calls its callback synchronously, this + // is safe (ie, we won't call fut.return() before the forEach is + // done). + if (waiting === 0) + fut.return(); } - waiting--; - // Because fetch() never calls its callback synchronously, this is - // safe (ie, we won't call fut.return() before the forEach is - // done). - if (waiting === 0) - fut.return(); - }); + })); }); fut.wait(); // XXX do this even if we've switched to PHASE.QUERYING? if (anyError) throw anyError; - // Exit now if we've had a _pollQuery call. + // Exit now if we've had a _pollQuery call (here or in another fiber). if (self._phase === PHASE.QUERYING) return; self._currentlyFetching = null; } self._beSteady(); - }); + })); }, _beSteady: function () { var self = this; @@ -579,7 +597,8 @@ _.extend(OplogObserveDriver.prototype, { ++self._fetchGeneration; // ignore any in-flight fetches self._registerPhaseChange(PHASE.QUERYING); - // Defer so that we don't block. + // Defer so that we don't block. We don't need finishIfNeedToPollQuery here + // because SwitchedToQuery is not called in QUERYING mode. Meteor.defer(function () { // subtle note: _published does not contain _id fields, but newResults // does @@ -604,7 +623,14 @@ _.extend(OplogObserveDriver.prototype, { // ensures that we will query again later. // // This function may not block, because it is called from an oplog entry - // handler. + // handler. However, if we were not already in the QUERYING phase, it throws + // an exception that is caught by the closest surrounding + // finishIfNeedToPollQuery call; this ensures that we don't continue running + // close that was designed for another phase inside PHASE.QUERYING. + // + // (It's also necessary whenever logic in this file yields to check that other + // phases haven't put us into QUERYING mode, though; eg, + // _fetchModifiedDocuments does this.) _needToPollQuery: function () { var self = this; if (self._stopped) @@ -614,7 +640,7 @@ _.extend(OplogObserveDriver.prototype, { // pausing FETCHING). if (self._phase !== PHASE.QUERYING) { self._pollQuery(); - return; + throw new SwitchedToQuery; } // We're currently in QUERYING. Set a flag to ensure that we run another From 7948c0f38b65cab02fae0f3369449d30d74e1b05 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 26 Feb 2014 17:09:16 -0800 Subject: [PATCH 83/93] Test for preserving fields needed for sort --- .../mongo-livedata/mongo_livedata_tests.js | 70 +++++++++++++++++-- 1 file changed, 63 insertions(+), 7 deletions(-) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 01c5b54b52..f3ee3dcac6 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -746,6 +746,13 @@ if (Meteor.isServer) { x++; }); + // compares arrays a and b w/o looking at order + var setsEqual = function (a, b) { + a = _.map(a, EJSON.stringify); + b = _.map(b, EJSON.stringify); + return _.isEmpty(_.difference(a, b)) && _.isEmpty(_.difference(b, a)); + }; + // This test mainly checks the correctness of oplog code dealing with limited // queries. Compitablity with poll-diff is added as well. Tinytest.addAsync("mongo-livedata - observe sorted, limited " + idGeneration, function (test, onComplete) { @@ -786,13 +793,6 @@ if (Meteor.isServer) { coll.update(sel, mod, opt); }); }; - // compares arrays a and b w/o looking at order - var setsEqual = function (a, b) { - a = _.map(a, EJSON.stringify); - b = _.map(b, EJSON.stringify); - return _.isEmpty(_.difference(a, b)) && _.isEmpty(_.difference(b, a)); - }; - // tests '_id' subfields for all documents in oplog buffer var testOplogBufferIds = function (ids) { var bufferIds = []; @@ -981,6 +981,62 @@ if (Meteor.isServer) { o.handle.stop(); onComplete(); }); + + Tinytest.addAsync("mongo-livedata - observe sorted, limited, sort fields " + idGeneration, function (test, onComplete) { + var run = test.runId(); + var coll = new Meteor.Collection("observeLimit-"+run, collectionOptions); + + var observer = function () { + var state = {}; + var output = []; + var callbacks = { + changed: function (newDoc) { + output.push({changed: newDoc._id}); + state[newDoc._id] = newDoc; + }, + added: function (newDoc) { + output.push({added: newDoc._id}); + state[newDoc._id] = newDoc; + }, + removed: function (oldDoc) { + output.push({removed: oldDoc._id}); + delete state[oldDoc._id]; + } + }; + var handle = coll.find({}, {sort: {x: 1}, + limit: 2, + fields: {y: 1}}).observe(callbacks); + + return {output: output, handle: handle, state: state}; + }; + var clearOutput = function (o) { o.output.splice(0, o.output.length); }; + var ins = function (doc) { + var id; runInFence(function () { id = coll.insert(doc); }); + return id; + }; + + var o = observer(); + + var docId1 = ins({ x: 1, y: 1222 }); + var docId2 = ins({ x: 5, y: 5222 }); + + test.length(o.output, 2); + test.equal(o.output, [{added: docId1}, {added: docId2}]); + clearOutput(o); + + var docId3 = ins({ x: 7, y: 7222 }); + var docId4 = ins({ x: -1, y: -1222 }); + + // Becomes [docId4 docId1 | docId2 docId3] + test.length(o.output, 2); + test.isTrue(setsEqual(o.output, [{added: docId4}, {removed: docId2}])); + + test.equal(_.size(o.state), 2); + test.equal(o.state[docId4], {_id: docId4, y: -1222}); + test.equal(o.state[docId1], {_id: docId1, y: 1222}); + + onComplete(); + }); } From b3548e54378433ff67e5dbf2b7cfc922290bda1b Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 26 Feb 2014 18:44:24 -0800 Subject: [PATCH 84/93] Combine sorter spec with projection --- packages/minimongo/package.js | 3 ++- packages/minimongo/selector_projection.js | 20 ++++++++++++-------- packages/minimongo/sort.js | 2 +- packages/minimongo/sorter_projection.js | 19 +++++++++++++++++++ 4 files changed, 34 insertions(+), 10 deletions(-) create mode 100644 packages/minimongo/sorter_projection.js diff --git a/packages/minimongo/package.js b/packages/minimongo/package.js index 58783070fb..4dca1e8433 100644 --- a/packages/minimongo/package.js +++ b/packages/minimongo/package.js @@ -28,7 +28,8 @@ Package.on_use(function (api) { // Functionality used only by oplog tailing on the server side api.add_files([ 'selector_projection.js', - 'selector_modifier.js' + 'selector_modifier.js', + 'sorter_projection.js' ], 'server'); }); diff --git a/packages/minimongo/selector_projection.js b/packages/minimongo/selector_projection.js index 73a2727a8e..e7d7135587 100644 --- a/packages/minimongo/selector_projection.js +++ b/packages/minimongo/selector_projection.js @@ -12,12 +12,23 @@ Minimongo.Matcher.prototype.combineIntoProjection = function (projection) { if (_.contains(selectorPaths, '')) return {}; + return combineImportantPathsIntoProjection(selectorPaths, projection); +}; + +Minimongo.Matcher.prototype._getPathsElidingNumericKeys = function () { + var self = this; + return _.map(self._getPaths(), function (path) { + return _.reject(path.split('.'), isNumericKey).join('.'); + }); +}; + +combineImportantPathsIntoProjection = function (paths, projection) { var prjDetails = projectionDetails(projection); var tree = prjDetails.tree; var mergedProjection = {}; // merge the paths to include - tree = pathsToTree(selectorPaths, + tree = pathsToTree(paths, function (path) { return true; }, function (node, path, fullPath) { return true; }, tree); @@ -40,13 +51,6 @@ Minimongo.Matcher.prototype.combineIntoProjection = function (projection) { } }; -Minimongo.Matcher.prototype._getPathsElidingNumericKeys = function () { - var self = this; - return _.map(self._getPaths(), function (path) { - return _.reject(path.split('.'), isNumericKey).join('.'); - }); -}; - // Returns a set of key paths similar to // { 'foo.bar': 1, 'a.b.c': 1 } var treeToPaths = function (tree, prefix) { diff --git a/packages/minimongo/sort.js b/packages/minimongo/sort.js index bc74bd327f..e55e2af86f 100644 --- a/packages/minimongo/sort.js +++ b/packages/minimongo/sort.js @@ -38,7 +38,7 @@ Sorter = function (spec) { }); } } else { - throw Error("Bad sort specification: ", JSON.stringify(spec)); + throw Error("Bad sort specification: " + JSON.stringify(spec)); } // reduceValue takes in all the possible values for the sort key along various diff --git a/packages/minimongo/sorter_projection.js b/packages/minimongo/sorter_projection.js new file mode 100644 index 0000000000..c9cad84862 --- /dev/null +++ b/packages/minimongo/sorter_projection.js @@ -0,0 +1,19 @@ +Sorter.combineSpecIntoProjection = function (spec, projection) { + var self = this; + var specPaths = getSortSpecPaths(spec); + + return combineImportantPathsIntoProjection(specPaths, projection); +}; + +var getSortSpecPaths = function (spec) { + if (_.isArray(spec)) + return _.map(spec, function (fieldSpec) { + return _.isArray(fieldSpec) ? fieldSpec[0] : fieldSpec; + }); + + if (_.isObject(spec)) + return _.keys(spec); + + throw new Error("Bad sort specification: " + JSON.stringify(spec)); +}; + From 85ba085c78c3f3b185af063642a459bcab9c9007 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 26 Feb 2014 18:45:25 -0800 Subject: [PATCH 85/93] Oplog code takes sorter fields into shared projection --- packages/mongo-livedata/oplog_observe_driver.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 701c9bde84..7834a4cbb8 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -38,6 +38,8 @@ OplogObserveDriver = function (options) { throw Error("OplogObserveDriver only supports unordered observeChanges"); } + var sortSpec = options.cursorDescription.options.sort; + if (options.cursorDescription.options.limit) { // There are several properties ordered driver implements: // - _limit is a positive number @@ -50,7 +52,7 @@ OplogObserveDriver = function (options) { // We don't support $near and other geo-queries so it's OK to initialize the // comparator only once in the constructor. - var sorter = new Minimongo.Sorter(options.cursorDescription.options.sort); + var sorter = new Minimongo.Sorter(sortSpec); var comparator = sorter.getComparator(); var heapOptions = { IdMap: LocalCollection._IdMap }; self._limit = self._cursorDescription.options.limit; @@ -85,6 +87,8 @@ OplogObserveDriver = function (options) { // Projection function, result of combining important fields for selector and // existing fields projection self._sharedProjection = self._matcher.combineIntoProjection(projection); + if (sortSpec) + self._sharedProjection = Minimongo.Sorter.combineSpecIntoProjection(sortSpec, self._sharedProjection); self._sharedProjectionFn = LocalCollection._compileProjection( self._sharedProjection); From a96243e746682559d8b03597108ebdaf4e3c43f1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 28 Feb 2014 14:01:19 -0800 Subject: [PATCH 86/93] Sorter.combineIntoProjection is an instance method Reuse existing sort spec parsing, store sort spec explicitly with key-paths. Elide numeric key in Sorter._getPaths --- packages/minimongo/selector_projection.js | 6 +++--- packages/minimongo/sort.js | 10 +++++++++- packages/minimongo/sorter_projection.js | 17 ++--------------- 3 files changed, 14 insertions(+), 19 deletions(-) diff --git a/packages/minimongo/selector_projection.js b/packages/minimongo/selector_projection.js index e7d7135587..5f6e101b5b 100644 --- a/packages/minimongo/selector_projection.js +++ b/packages/minimongo/selector_projection.js @@ -3,7 +3,7 @@ // @returns Object - projection object (same as fields option of mongo cursor) Minimongo.Matcher.prototype.combineIntoProjection = function (projection) { var self = this; - var selectorPaths = self._getPathsElidingNumericKeys(); + var selectorPaths = Minimongo._pathsElidingNumericKeys(self._getPaths()); // Special case for $where operator in the selector - projection should depend // on all fields of the document. getSelectorPaths returns a list of paths @@ -15,9 +15,9 @@ Minimongo.Matcher.prototype.combineIntoProjection = function (projection) { return combineImportantPathsIntoProjection(selectorPaths, projection); }; -Minimongo.Matcher.prototype._getPathsElidingNumericKeys = function () { +Minimongo._pathsElidingNumericKeys = function (paths) { var self = this; - return _.map(self._getPaths(), function (path) { + return _.map(paths, function (path) { return _.reject(path.split('.'), isNumericKey).join('.'); }); }; diff --git a/packages/minimongo/sort.js b/packages/minimongo/sort.js index e55e2af86f..879d0e48c4 100644 --- a/packages/minimongo/sort.js +++ b/packages/minimongo/sort.js @@ -14,17 +14,19 @@ Sorter = function (spec) { var self = this; - var sortSpecParts = []; + var sortSpecParts = self._sortSpecParts = []; if (spec instanceof Array) { for (var i = 0; i < spec.length; i++) { if (typeof spec[i] === "string") { sortSpecParts.push({ + path: spec[i], lookup: makeLookupFunction(spec[i]), ascending: true }); } else { sortSpecParts.push({ + path: spec[i][0], lookup: makeLookupFunction(spec[i][0]), ascending: spec[i][1] !== "desc" }); @@ -33,6 +35,7 @@ Sorter = function (spec) { } else if (typeof spec === "object") { for (var key in spec) { sortSpecParts.push({ + path: key, lookup: makeLookupFunction(key), ascending: spec[key] >= 0 }); @@ -118,6 +121,11 @@ Sorter.prototype.getComparator = function (options) { }]); }; +Sorter.prototype._getPaths = function () { + var self = this; + return _.pluck(self._sortSpecParts, 'path'); +}; + Minimongo.Sorter = Sorter; // Given an array of comparators diff --git a/packages/minimongo/sorter_projection.js b/packages/minimongo/sorter_projection.js index c9cad84862..f02f388f7e 100644 --- a/packages/minimongo/sorter_projection.js +++ b/packages/minimongo/sorter_projection.js @@ -1,19 +1,6 @@ -Sorter.combineSpecIntoProjection = function (spec, projection) { +Sorter.prototype.combineIntoProjection = function (projection) { var self = this; - var specPaths = getSortSpecPaths(spec); - + var specPaths = Minimongo._pathsElidingNumericKeys(self._getPaths()); return combineImportantPathsIntoProjection(specPaths, projection); }; -var getSortSpecPaths = function (spec) { - if (_.isArray(spec)) - return _.map(spec, function (fieldSpec) { - return _.isArray(fieldSpec) ? fieldSpec[0] : fieldSpec; - }); - - if (_.isObject(spec)) - return _.keys(spec); - - throw new Error("Bad sort specification: " + JSON.stringify(spec)); -}; - From c14e41a0d7440fd4f41f3b2201d8b5b6900d56b1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 28 Feb 2014 14:22:30 -0800 Subject: [PATCH 87/93] Simple tests on Sorter.combineIntoProjection --- packages/minimongo/minimongo_server_tests.js | 25 ++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/packages/minimongo/minimongo_server_tests.js b/packages/minimongo/minimongo_server_tests.js index efd7808b4c..127e30a8e6 100644 --- a/packages/minimongo/minimongo_server_tests.js +++ b/packages/minimongo/minimongo_server_tests.js @@ -332,6 +332,31 @@ Tinytest.add("minimongo - selector and projection combination", function (test) }); +Tinytest.add("minimongo - sorter and projection combination", function (test) { + function testSorterProjectionComb (sortSpec, proj, expected, desc) { + var sorter = new Minimongo.Sorter(sortSpec); + test.equal(sorter.combineIntoProjection(proj), expected, desc); + } + + // Test with inclusive projection + testSorterProjectionComb({ a: 1, b: 1 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true }, "simplest incl"); + testSorterProjectionComb({ a: 1, b: -1 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true }, "simplest incl"); + testSorterProjectionComb({ 'a.c': 1 }, { b: 1 }, { 'a.c': true, b: true }, "dot path incl"); + testSorterProjectionComb({ 'a.1.c': 1 }, { b: 1 }, { 'a.c': true, b: true }, "dot num path incl"); + testSorterProjectionComb({ 'a.1.c': 1 }, { b: 1, a: 1 }, { a: true, b: true }, "dot num path incl overlap"); + testSorterProjectionComb({ 'a.1.c': 1, 'a.2.b': -1 }, { b: 1 }, { 'a.c': true, 'a.b': true, b: true }, "dot num path incl"); + testSorterProjectionComb({ 'a.1.c': 1, 'a.2.b': -1 }, {}, {}, "dot num path with empty incl"); + + // Test with exclusive projection + testSorterProjectionComb({ a: 1, b: 1 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl"); + testSorterProjectionComb({ a: 1, b: -1 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl"); + testSorterProjectionComb({ 'a.c': 1 }, { b: 0 }, { b: false }, "dot path excl"); + testSorterProjectionComb({ 'a.1.c': 1 }, { b: 0 }, { b: false }, "dot num path excl"); + testSorterProjectionComb({ 'a.1.c': 1 }, { b: 0, a: 0 }, { b: false }, "dot num path excl overlap"); + testSorterProjectionComb({ 'a.1.c': 1, 'a.2.b': -1 }, { b: 0 }, { b: false }, "dot num path excl"); +}); + + (function () { // TODO: Tests for "can selector become true by modifier" are incomplete, // absent or test the functionality of "not ideal" implementation (test checks From 571ba01f16cb554e04bd54c538b591ad374e34b9 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 28 Feb 2014 14:25:33 -0800 Subject: [PATCH 88/93] Use new sorter + projection api in oplog code --- packages/mongo-livedata/oplog_observe_driver.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 7834a4cbb8..3a12d5a5e1 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -39,6 +39,8 @@ OplogObserveDriver = function (options) { } var sortSpec = options.cursorDescription.options.sort; + var sorter = sortSpec && new Minimongo.Sorter(sortSpec); + var comparator = sorter && sorter.getComparator(); if (options.cursorDescription.options.limit) { // There are several properties ordered driver implements: @@ -52,8 +54,6 @@ OplogObserveDriver = function (options) { // We don't support $near and other geo-queries so it's OK to initialize the // comparator only once in the constructor. - var sorter = new Minimongo.Sorter(sortSpec); - var comparator = sorter.getComparator(); var heapOptions = { IdMap: LocalCollection._IdMap }; self._limit = self._cursorDescription.options.limit; self._comparator = comparator; @@ -87,8 +87,8 @@ OplogObserveDriver = function (options) { // Projection function, result of combining important fields for selector and // existing fields projection self._sharedProjection = self._matcher.combineIntoProjection(projection); - if (sortSpec) - self._sharedProjection = Minimongo.Sorter.combineSpecIntoProjection(sortSpec, self._sharedProjection); + if (sorter) + self._sharedProjection = sorter.combineIntoProjection(self._sharedProjection); self._sharedProjectionFn = LocalCollection._compileProjection( self._sharedProjection); From 29ebdbfeae0fef98ceb0e563da11b66384eaced1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 28 Feb 2014 14:34:44 -0800 Subject: [PATCH 89/93] Sophisticate the tests for oplog + limits + sorter --- .../mongo-livedata/mongo_livedata_tests.js | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index f3ee3dcac6..ea0349b1d8 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -1014,6 +1014,9 @@ if (Meteor.isServer) { var id; runInFence(function () { id = coll.insert(doc); }); return id; }; + var rem = function (id) { + runInFence(function () { coll.remove(id); }); + }; var o = observer(); @@ -1025,6 +1028,8 @@ if (Meteor.isServer) { clearOutput(o); var docId3 = ins({ x: 7, y: 7222 }); + test.length(o.output, 0); + var docId4 = ins({ x: -1, y: -1222 }); // Becomes [docId4 docId1 | docId2 docId3] @@ -1034,6 +1039,21 @@ if (Meteor.isServer) { test.equal(_.size(o.state), 2); test.equal(o.state[docId4], {_id: docId4, y: -1222}); test.equal(o.state[docId1], {_id: docId1, y: 1222}); + clearOutput(o); + + rem(docId2); + // Becomes [docId4 docId1 | docId3] + test.length(o.output, 0); + + rem(docId4); + // Becomes [docId1 docId3] + test.length(o.output, 2); + test.isTrue(setsEqual(o.output, [{added: docId3}, {removed: docId4}])); + + test.equal(_.size(o.state), 2); + test.equal(o.state[docId3], {_id: docId3, y: 7222}); + test.equal(o.state[docId1], {_id: docId1, y: 1222}); + clearOutput(o); onComplete(); }); From c615ec11efef3dd78c29b31339284fa48fc989fb Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 28 Feb 2014 16:51:09 -0800 Subject: [PATCH 90/93] Move the comment --- packages/mongo-livedata/oplog_observe_driver.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 3a12d5a5e1..1743902156 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -40,6 +40,8 @@ OplogObserveDriver = function (options) { var sortSpec = options.cursorDescription.options.sort; var sorter = sortSpec && new Minimongo.Sorter(sortSpec); + // We don't support $near and other geo-queries so it's OK to initialize the + // comparator only once in the constructor. var comparator = sorter && sorter.getComparator(); if (options.cursorDescription.options.limit) { @@ -52,8 +54,6 @@ OplogObserveDriver = function (options) { // into published set. // - _published - Min Heap (also implements IdMap methods) - // We don't support $near and other geo-queries so it's OK to initialize the - // comparator only once in the constructor. var heapOptions = { IdMap: LocalCollection._IdMap }; self._limit = self._cursorDescription.options.limit; self._comparator = comparator; From 1fef6e5662c556bf6a294465b6583aa231060817 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 28 Feb 2014 18:18:09 -0800 Subject: [PATCH 91/93] Factor out published/buffer renewal into _runQuery Fixes the bug when the buffer is empty on the first run if the size of query is more than limit*2 --- .../mongo-livedata/oplog_observe_driver.js | 52 +++++++++---------- 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 1743902156..7512fe74f4 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -552,19 +552,7 @@ _.extend(OplogObserveDriver.prototype, { if (self._stopped) throw new Error("oplog stopped surprisingly early"); - // Query 2x documents as the half excluded from the original query will go - // into unpublished buffer to reduce additional Mongo lookups in cases when - // documents are removed from the published set and need a replacement. - // XXX needs more thought on non-zero skip - // XXX "2" here is a "magic number" - var initialCursor = self._cursorForQuery({ limit: self._limit * 2 }); - var fetchedDocsCount = 0; - initialCursor.forEach(function (initialDoc) { - self._addMatching(initialDoc); - fetchedDocsCount++; - }); - - self._safeAppendToBuffer = fetchedDocsCount < self._limit * 2; + self._runQuery(); if (self._stopped) throw new Error("oplog stopped quite early"); @@ -604,25 +592,33 @@ _.extend(OplogObserveDriver.prototype, { // Defer so that we don't block. We don't need finishIfNeedToPollQuery here // because SwitchedToQuery is not called in QUERYING mode. Meteor.defer(function () { - // subtle note: _published does not contain _id fields, but newResults - // does - var newResults = new LocalCollection._IdMap; - var newBuffer = new LocalCollection._IdMap; - // XXX 2 is a "magic number" meaning there is an extra chunk of docs for - // buffer if such is needed. - var cursor = self._cursorForQuery({ limit: self._limit * 2 }); - cursor.forEach(function (doc, i) { - if (!self._limit || i < self._limit) - newResults.set(doc._id, doc); - else - newBuffer.set(doc._id, doc); - }); - - self._publishNewResults(newResults, newBuffer); + self._runQuery(); self._doneQuerying(); }); }, + _runQuery: function () { + var self = this; + var newResults = new LocalCollection._IdMap; + var newBuffer = new LocalCollection._IdMap; + + // Query 2x documents as the half excluded from the original query will go + // into unpublished buffer to reduce additional Mongo lookups in cases when + // documents are removed from the published set and need a replacement. + // XXX needs more thought on non-zero skip + // XXX 2 is a "magic number" meaning there is an extra chunk of docs for + // buffer if such is needed. + var cursor = self._cursorForQuery({ limit: self._limit * 2 }); + cursor.forEach(function (doc, i) { + if (!self._limit || i < self._limit) + newResults.set(doc._id, doc); + else + newBuffer.set(doc._id, doc); + }); + + self._publishNewResults(newResults, newBuffer); + }, + // Transitions to QUERYING and runs another query, or (if already in QUERYING) // ensures that we will query again later. // From 6c595e7b8d206bad7b4b3f482be5f1023d43904c Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 26 Feb 2014 21:53:07 -0800 Subject: [PATCH 92/93] A remove a redundant clone --- packages/mongo-livedata/oplog_observe_driver.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 7512fe74f4..5c83afdd6e 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -333,8 +333,6 @@ _.extend(OplogObserveDriver.prototype, { }, _handleDoc: function (id, newDoc) { var self = this; - newDoc = _.clone(newDoc); - var matchesNow = newDoc && self._matcher.documentMatches(newDoc).result; var publishedBefore = self._published.has(id); From e4c5f6ac97580fbb269c63549efd23a8e9b12447 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Sat, 1 Mar 2014 01:13:00 -0800 Subject: [PATCH 93/93] Oplog limits test: big initial set, repolls, safeToAppend flag --- .../mongo-livedata/mongo_livedata_tests.js | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index ea0349b1d8..f155b85fc5 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -1055,6 +1055,113 @@ if (Meteor.isServer) { test.equal(o.state[docId1], {_id: docId1, y: 1222}); clearOutput(o); + onComplete(); + }); + + Tinytest.addAsync("mongo-livedata - observe sorted, limited, big initial set" + idGeneration, function (test, onComplete) { + var run = test.runId(); + var coll = new Meteor.Collection("observeLimit-"+run, collectionOptions); + + var observer = function () { + var state = {}; + var output = []; + var callbacks = { + changed: function (newDoc) { + output.push({changed: newDoc._id}); + state[newDoc._id] = newDoc; + }, + added: function (newDoc) { + output.push({added: newDoc._id}); + state[newDoc._id] = newDoc; + }, + removed: function (oldDoc) { + output.push({removed: oldDoc._id}); + delete state[oldDoc._id]; + } + }; + var handle = coll.find({}, {sort: {x: 1, y: 1}, limit: 3}) + .observe(callbacks); + + return {output: output, handle: handle, state: state}; + }; + var clearOutput = function (o) { o.output.splice(0, o.output.length); }; + var ins = function (doc) { + var id; runInFence(function () { id = coll.insert(doc); }); + return id; + }; + var rem = function (id) { + runInFence(function () { coll.remove(id); }); + }; + // tests '_id' subfields for all documents in oplog buffer + var testOplogBufferIds = function (ids) { + var bufferIds = []; + o.handle._multiplexer._observeDriver._unpublishedBuffer.forEach(function (x, id) { + bufferIds.push(id); + }); + + test.isTrue(setsEqual(ids, bufferIds), "expected: " + ids + "; got: " + bufferIds); + }; + var testSafeAppendToBufferFlag = function (expected) { + if (expected) + test.isTrue(o.handle._multiplexer._observeDriver._safeAppendToBuffer); + else + test.isFalse(o.handle._multiplexer._observeDriver._safeAppendToBuffer); + }; + + var ids = {}; + _.each([2, 4, 1, 3, 5, 5, 9, 1, 3, 2, 5], function (x, i) { + ids[i] = ins({ x: x, y: i }); + }); + + var o = observer(); + var usesOplog = o.handle._multiplexer._observeDriver._usesOplog; + // x: [1 1 2 | 2 3 3] 4 5 5 5 9 + // id: [2 7 0 | 9 3 8] 1 4 5 10 6 + + test.length(o.output, 3); + test.isTrue(setsEqual([{added: ids[2]}, {added: ids[7]}, {added: ids[0]}], o.output)); + usesOplog && testOplogBufferIds([ids[9], ids[3], ids[8]]); + usesOplog && testSafeAppendToBufferFlag(false); + clearOutput(o); + + rem(ids[0]); + // x: [1 1 2 | 3 3] 4 5 5 5 9 + // id: [2 7 9 | 3 8] 1 4 5 10 6 + test.length(o.output, 2); + test.isTrue(setsEqual([{removed: ids[0]}, {added: ids[9]}], o.output)); + usesOplog && testOplogBufferIds([ids[3], ids[8]]); + usesOplog && testSafeAppendToBufferFlag(false); + clearOutput(o); + + rem(ids[7]); + // x: [1 2 3 | 3] 4 5 5 5 9 + // id: [2 9 3 | 8] 1 4 5 10 6 + test.length(o.output, 2); + test.isTrue(setsEqual([{removed: ids[7]}, {added: ids[3]}], o.output)); + usesOplog && testOplogBufferIds([ids[8]]); + usesOplog && testSafeAppendToBufferFlag(false); + clearOutput(o); + + rem(ids[3]); + // x: [1 2 3 | 4 5 5] 5 9 + // id: [2 9 8 | 1 4 5] 10 6 + test.length(o.output, 2); + test.isTrue(setsEqual([{removed: ids[3]}, {added: ids[8]}], o.output)); + usesOplog && testOplogBufferIds([ids[1], ids[4], ids[5]]); + usesOplog && testSafeAppendToBufferFlag(false); + clearOutput(o); + + rem({ x: {$lt: 4} }); + // x: [4 5 5 | 5 9] + // id: [1 4 5 | 10 6] + test.length(o.output, 6); + test.isTrue(setsEqual([{removed: ids[2]}, {removed: ids[9]}, {removed: ids[8]}, + {added: ids[5]}, {added: ids[4]}, {added: ids[1]}], o.output)); + usesOplog && testOplogBufferIds([ids[10], ids[6]]); + usesOplog && testSafeAppendToBufferFlag(true); + clearOutput(o); + + onComplete(); }); }