From 4d2097912d3034d416e540ea44cb5af79e374569 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 14 Aug 2013 13:36:44 -0700 Subject: [PATCH 001/190] Add cursorSupportedByOplogTailing. --- packages/mongo-livedata/mongo_driver.js | 39 +++++++++++++++++++++++++ packages/mongo-livedata/oplog_tests.js | 32 ++++++++++++++++++++ packages/mongo-livedata/package.js | 3 ++ 3 files changed, 74 insertions(+) create mode 100644 packages/mongo-livedata/oplog_tests.js diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index aa68b16ba6..e0d9b676bc 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1264,9 +1264,48 @@ MongoConnection.prototype._observeChangesTailable = function ( }; }; +// Does our oplog tailing code support this cursor? For now, we are being very +// conservative and allowing only simple queries with simple options. +var cursorSupportedByOplogTailing = function (cursorDescription) { + // First, check the options. + var options = cursorDescription.options; + + // We don't yet implement field filtering for oplog tailing (just because it's + // not implemented, not because there's a deep problem with implementing it). + if (options.fields) return false; + + // This option (which are mostly used for sorted cursors) require us to figure + // out where a given document fits in an order to know if it's included or + // not, and we don't track that information when doing oplog tailing. + if (options.limit || options.skip) return false; + + // For now, we're just dealing with equality queries: no $operators, regexps, + // or $and/$or/$where/etc clauses. We can expand the scope of what we're + // comfortable processing later. + return _.all(cursorDescription.selector, function (value, field) { + // No logical operators like $and. + if (field.substr(0, 1) === '$') + return false; + // We only allow scalars, not sub-documents or $operators or RegExp. + // XXX Date would be easy too, though I doubt anyone is doing equality + // lookups on dates + return typeof value === "string" || + typeof value === "number" || + typeof value === "boolean" || + value === null || + value instanceof Meteor.Collection.ObjectID; + }); +}; + + + // XXX We probably need to find a better way to expose this. Right now // it's only used by tests, but in fact you need it in normal // operation to interact with capped collections (eg, Galaxy uses it). MongoInternals.MongoTimestamp = MongoDB.Timestamp; MongoInternals.Connection = MongoConnection; + +MongoTest = { + cursorSupportedByOplogTailing: cursorSupportedByOplogTailing +}; diff --git a/packages/mongo-livedata/oplog_tests.js b/packages/mongo-livedata/oplog_tests.js new file mode 100644 index 0000000000..d8f27c7727 --- /dev/null +++ b/packages/mongo-livedata/oplog_tests.js @@ -0,0 +1,32 @@ +var OplogCollection = new Meteor.Collection("oplog-" + Random.id()); + +Tinytest.add("mongo-livedata - oplog - cursorSupportedByOplogTailing", function (test) { + var supported = function (expected, selector) { + var cursor = OplogCollection.find(selector); + test.equal( + MongoTest.cursorSupportedByOplogTailing(cursor._cursorDescription), + expected); + }; + + supported(true, "asdf"); + supported(true, 1234); + supported(true, new Meteor.Collection.ObjectID()); + + supported(true, {_id: "asdf"}); + supported(true, {_id: 1234}); + supported(true, {_id: new Meteor.Collection.ObjectID()}); + + supported(true, {foo: "asdf", + bar: 1234, + baz: new Meteor.Collection.ObjectID(), + eeney: true, + miney: false, + moe: null}); + + supported(true, {}); + + supported(false, {$and: [{foo: "asdf"}, {bar: "baz"}]}); + supported(false, {foo: {x: 1}}); + supported(false, {foo: {$gt: 1}}); + supported(false, {foo: [1, 2, 3]}); +}); diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 400bf7de31..194ac21192 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -35,6 +35,8 @@ Package.on_use(function (api) { // Stuff that should be exposed via a real API, but we haven't yet. api.export('MongoInternals', 'server'); + // For tests only. + api.export('MongoTest', 'server'); api.add_files('mongo_driver.js', 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); @@ -53,4 +55,5 @@ Package.on_test(function (api) { api.add_files('allow_tests.js', ['client', 'server']); api.add_files('collection_tests.js', ['client', 'server']); api.add_files('observe_changes_tests.js', ['client', 'server']); + api.add_files('oplog_tests.js', 'server'); }); From 878dfe9a1f4badae84feefba389ac2c9ac245118 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 14 Aug 2013 19:08:47 -0700 Subject: [PATCH 002/190] In "meteor run", run mongo in repl-set mode, so that oplog is available. Note: this is pretty slow. Before merging this branch, should cache the fact that replset has been initiated on port N so that on the next run, if the port hasn't changed, we don't need to re-initiate. --- packages/mongo-livedata/mongo_driver.js | 1 + tools/mongo_runner.js | 84 +++++++++++++++++----- tools/run.js | 94 ++++++++++++------------- 3 files changed, 116 insertions(+), 63 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index e0d9b676bc..788afe70d7 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1305,6 +1305,7 @@ var cursorSupportedByOplogTailing = function (cursorDescription) { MongoInternals.MongoTimestamp = MongoDB.Timestamp; MongoInternals.Connection = MongoConnection; +MongoInternals.NpmModule = MongoDB; MongoTest = { cursorSupportedByOplogTailing: cursorSupportedByOplogTailing diff --git a/tools/mongo_runner.js b/tools/mongo_runner.js index 10745d9141..56a77f574a 100644 --- a/tools/mongo_runner.js +++ b/tools/mongo_runner.js @@ -4,6 +4,7 @@ var path = require("path"); var files = require('./files.js'); var _ = require('underscore'); +var unipackage = require('./unipackage.js'); /** Internal. @@ -24,7 +25,7 @@ var find_mongo_pids = function (app_dir, port, callback) { _.each(stdout.split('\n'), function (ps_line) { // matches mongos we start. - var m = ps_line.match(/^\s*(\d+).+mongod .+--port (\d+) --dbpath (.+)(?:\/|\\)\.meteor(?:\/|\\)local(?:\/|\\)db\s*$/); + var m = ps_line.match(/^\s*(\d+).+mongod .+--port (\d+) --dbpath (.+)(?:\/|\\)\.meteor(?:\/|\\)local(?:\/|\\)db --replSet /); if (m && m.length === 4) { var found_pid = parseInt(m[1]); var found_port = parseInt(m[2]); @@ -125,10 +126,10 @@ var find_mongo_and_kill_it_dead = function (port, callback) { }); }; -exports.launch_mongo = function (app_dir, port, launch_callback, on_exit_callback) { +exports.launchMongo = function (options) { var handle = {stop: function (callback) { callback(); } }; - launch_callback = launch_callback || function () {}; - on_exit_callback = on_exit_callback || function () {}; + var onListen = options.onListen || function () {}; + var onExit = options.onExit || function () {}; // If we are passed an external mongo, assume it is launched and never // exits. Matches code in run.js:exports.run. @@ -136,7 +137,7 @@ exports.launch_mongo = function (app_dir, port, launch_callback, on_exit_callbac // Since it is externally managed, asking it to actually stop would be // impolite, so our stoppable handle is a noop if (process.env.MONGO_URL) { - launch_callback(); + onListen(); return handle; } @@ -146,24 +147,54 @@ exports.launch_mongo = function (app_dir, port, launch_callback, on_exit_callbac 'mongod'); // store data in app_dir - var data_path = path.join(app_dir, '.meteor', 'local', 'db'); - files.mkdir_p(data_path, 0755); + var dbPath = path.join(options.context.appDir, '.meteor', 'local', 'db'); + files.mkdir_p(dbPath, 0755); // add .gitignore if needed. - files.add_to_gitignore(path.join(app_dir, '.meteor'), 'local'); + files.add_to_gitignore(path.join(options.context.appDir, '.meteor'), 'local'); - find_mongo_and_kill_it_dead(port, function (err) { + // Load mongo-livedata so we'll be able to talk to it. + var mongoNpmModule = unipackage.load({ + library: options.context.library, + packages: [ 'mongo-livedata' ], + release: options.context.releaseVersion + })['mongo-livedata'].MongoInternals.NpmModule; + + find_mongo_and_kill_it_dead(options.port, function (err) { if (err) { - launch_callback({reason: "Can't kill running mongo: " + err.reason}); - return; + // XXX this was being passed to onListen and ignored before. should do + // something better. + throw {reason: "Can't kill running mongo: " + err.reason}; } + // Delete the "local" database. This removes any memory that this was part + // of a replSet; we will start one from scratch (easier and faster than + // falling over from one on a different port). + // XXX It's slow to do this every time! We should cache the port number on + // disk and only run this the first time or when the port number + // changes. + try { + var dbFiles = fs.readdirSync(dbPath); + } catch (e) { + if (!e || e.code !== 'ENOENT') + throw e; + } + _.each(dbFiles, function (dbFile) { + if (/^local\./.test(dbFile)) + fs.unlinkSync(path.join(dbPath, dbFile)); + }); + + // Start mongod with a dummy replSet and wait for it to listen. var child_process = require('child_process'); + var replSetName = 'dummy'; var proc = child_process.spawn(mongod_path, [ + // nb: cli-test.sh and find_mongo_pids assume that the next four arguments + // exist in this order without anything in between '--bind_ip', '127.0.0.1', '--smallfiles', '--nohttpinterface', - '--port', port, - '--dbpath', data_path + '--port', options.port, + '--dbpath', dbPath, + '--replSet', replSetName ]); var callOnExit = function (code, signal) { on_exit_callback(code, signal, stderrOutput); @@ -187,9 +218,30 @@ exports.launch_mongo = function (app_dir, port, launch_callback, on_exit_callbac proc.stdout.setEncoding('utf8'); proc.stdout.on('data', function (data) { - // process.stdout.write(data); - if (/ \[initandlisten\] waiting for connections on port/.test(data)) - launch_callback(); + // process.stdout.write("MONGO SAYS: " + data); + + if (/ \[rsMgr\] replSet PRIMARY/.test(data)) + onListen(); + + if (/ \[initandlisten\] waiting for connections on port/.test(data)) { + // Connect to it and start a replset. + var db = new mongoNpmModule.Db( + 'meteor', new mongoNpmModule.Server('127.0.0.1', options.port), + {safe: true}); + db.open(function(err, db) { + if (err) + throw err; + db.admin().command({ + replSetInitiate: { + _id: replSetName, + members: [{_id : 0, host: '127.0.0.1:' + options.port}] + } + }, function (err, result) { + if (err) + throw err; + }); + }); + } }); }); return handle; diff --git a/tools/run.js b/tools/run.js index da487b19f0..b9fc91e1f8 100644 --- a/tools/run.js +++ b/tools/run.js @@ -20,6 +20,7 @@ var unipackage = require('./unipackage.js'); var _ = require('underscore'); var inFiber = require('./fiber-helpers.js').inFiber; var Future = require('fibers/future'); +var Fiber = require('fibers'); ////////// Globals ////////// //XXX: Refactor to not have globals anymore? @@ -598,55 +599,54 @@ exports.run = function (context, options) { var mongoErrorTimer; var mongoStartupPrintTimer; var launch = function () { - Status.mongoHandle = mongo_runner.launch_mongo( - context.appDir, - mongoPort, - function () { // On Mongo startup complete - // don't print mongo startup is slow warning. - if (mongoStartupPrintTimer) { - clearTimeout(mongoStartupPrintTimer); - mongoStartupPrintTimer = null; + Fiber(function () { + Status.mongoHandle = mongo_runner.launchMongo({ + context: context, + port: mongoPort, + onListen: function () { // On Mongo startup complete + // don't print mongo startup is slow warning. + if (mongoStartupPrintTimer) { + clearTimeout(mongoStartupPrintTimer); + mongoStartupPrintTimer = null; + } + restartServer(); + }, + onExit: function (code, signal, stderr) { // On Mongo dead + if (Status.shuttingDown) { + return; + } + + // Print only last 20 lines of stderr. + stderr = stderr.split('\n').slice(-20).join('\n'); + + console.log( + stderr + "Unexpected mongo exit code " + code + ". Restarting.\n"); + + // if mongo dies 3 times with less than 5 seconds between each, + // declare it failed and die. + mongoErrorCount += 1; + if (mongoErrorCount >= 3) { + var explanation = mongoExitCodes.Codes[code]; + console.log("Can't start mongod\n"); + if (explanation) + console.log(explanation.longText); + if (explanation === mongoExitCodes.EXIT_NET_ERROR) { + console.log( + "\nCheck for other processes listening on port " + mongoPort + + "\nor other meteors running in the same project."); + } + if (!explanation && /GLIBC/i.test(stderr)) { + console.log( + "\nLooks like you are trying to run Meteor on an old Linux " + + "distribution. Meteor on Linux requires glibc version 2.9 " + + "or above. Try upgrading your distribution to the latest " + + "version."); + } + process.exit(1); + } } - restartServer(); - }, - function (code, signal, stderr) { // On Mongo dead - if (Status.shuttingDown) { - return; - } - - // Print only last 20 lines of stderr. - stderr = stderr.split('\n').slice(-20).join('\n'); - - console.log(stderr + "Unexpected mongo exit code " + code + ". Restarting.\n"); - - // if mongo dies 3 times with less than 5 seconds between each, - // declare it failed and die. - mongoErrorCount += 1; - if (mongoErrorCount >= 3) { - var explanation = mongoExitCodes.Codes[code]; - console.log("Can't start mongod\n"); - if (explanation) - console.log(explanation.longText); - if (explanation === mongoExitCodes.EXIT_NET_ERROR) - console.log("\nCheck for other processes listening on port " + mongoPort + - "\nor other meteors running in the same project."); - if (!explanation && /GLIBC/i.test(stderr)) - console.log("\nLooks like you are trying to run Meteor on an old Linux " + - "distribution. Meteor on Linux only supports Linux with glibc " + - "version 2.9 and above. Try upgrading your distribution " + - "to the latest version."); - process.exit(1); - } - if (mongoErrorTimer) - clearTimeout(mongoErrorTimer); - mongoErrorTimer = setTimeout(function () { - mongoErrorCount = 0; - mongoErrorTimer = null; - }, 5000); - - // Wait a sec to restart. - setTimeout(launch, 1000); }); + }).run(); }; startProxy(outerPort, innerPort, function () { From a832b11211cf7bc6c2148ce29e14b0c8248d2be9 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 14 Aug 2013 19:49:18 -0700 Subject: [PATCH 003/190] Factor tailing code out of observe-changes code. --- packages/mongo-livedata/mongo_driver.js | 111 +++++++++++++----------- 1 file changed, 61 insertions(+), 50 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 788afe70d7..841057ef88 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -804,6 +804,58 @@ _.extend(SynchronousCursor.prototype, { } }); +MongoConnection.prototype.tail = function (cursorDescription, docCallback) { + var self = this; + if (!cursorDescription.options.tailable) + throw new Error("Can only tail a tailable cursor"); + + var cursor = self._createSynchronousCursor(cursorDescription); + + var stopped = false; + var lastTS = undefined; + Meteor.defer(function () { + while (true) { + if (stopped) + return; + try { + var doc = cursor._nextObject(); + } catch (err) { + // There's no good way to figure out if this was actually an error + // from Mongo. Ah well. But either way, we need to retry the cursor + // (unless the failure was because the observe got stopped). + doc = null; + } + if (stopped) + return; + if (doc) { + // If a tailable cursor contains a "ts" field, use it to recreate the + // cursor on error. ("ts" is a standard that Mongo uses internally for + // the oplog, and there's a special flag that lets you do binary search + // on it instead of needing to use an index.) + lastTS = doc.ts; + docCallback(doc); + } else { + var newSelector = _.clone(cursorDescription.selector); + if (lastTS) { + newSelector.ts = {$gt: lastTS}; + } + // XXX maybe set replay flag + cursor = self._createSynchronousCursor(new CursorDescription( + cursorDescription.collectionName, + newSelector, + cursorDescription.options)); + } + } + }); + + return { + stop: function () { + stopped = true; + cursor.close(); + } + }; +}; + var nextObserveHandleId = 1; var ObserveHandle = function (liveResultsSet, callbacks) { var self = this; @@ -1209,59 +1261,18 @@ MongoConnection.prototype._observeChangesTailable = function ( + " tailable cursor without a " + (ordered ? "addedBefore" : "added") + " callback"); } - var cursor = self._createSynchronousCursor(cursorDescription); - var stopped = false; - var lastTS = undefined; - Meteor.defer(function () { - while (true) { - if (stopped) - return; - try { - var doc = cursor._nextObject(); - } catch (err) { - // There's no good way to figure out if this was actually an error from - // Mongo. Ah well. But either way, we need to retry the cursor (unless - // the failure was because the observe got stopped). - doc = null; - } - if (stopped) - return; - if (doc) { - var id = doc._id; - delete doc._id; - // If a tailable cursor contains a "ts" field, use it to recreate the - // cursor on error, and don't publish the field. ("ts" is a standard - // that Mongo uses internally for the oplog, and there's a special flag - // that lets you do binary search on it instead of needing to use an - // index.) - lastTS = doc.ts; - delete doc.ts; - if (ordered) { - callbacks.addedBefore(id, doc, null); - } else { - callbacks.added(id, doc); - } - } else { - var newSelector = _.clone(cursorDescription.selector); - if (lastTS) { - newSelector.ts = {$gt: lastTS}; - } - // XXX maybe set replay flag - cursor = self._createSynchronousCursor(new CursorDescription( - cursorDescription.collectionName, - newSelector, - cursorDescription.options)); - } + return self.tail(cursorDescription, function (doc) { + var id = doc._id; + delete doc._id; + // The ts is an implementation detail. Hide it. + delete doc.ts; + if (ordered) { + callbacks.addedBefore(id, doc, null); + } else { + callbacks.added(id, doc); } }); - - return { - stop: function () { - stopped = true; - cursor.close(); - } - }; }; // Does our oplog tailing code support this cursor? For now, we are being very From 66376562612c99b0ed303affdd714e42fd590d4b Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 14 Aug 2013 20:36:53 -0700 Subject: [PATCH 004/190] Don't reset the replset unless the port changes. (Resetting the replset is slow!) --- tools/mongo_runner.js | 203 +++++++++++++++++++++++------------------- 1 file changed, 113 insertions(+), 90 deletions(-) diff --git a/tools/mongo_runner.js b/tools/mongo_runner.js index 56a77f574a..bb901ac202 100644 --- a/tools/mongo_runner.js +++ b/tools/mongo_runner.js @@ -5,7 +5,7 @@ var files = require('./files.js'); var _ = require('underscore'); var unipackage = require('./unipackage.js'); - +var Fiber = require('fibers'); /** Internal. * @@ -152,97 +152,120 @@ exports.launchMongo = function (options) { // add .gitignore if needed. files.add_to_gitignore(path.join(options.context.appDir, '.meteor'), 'local'); - // Load mongo-livedata so we'll be able to talk to it. - var mongoNpmModule = unipackage.load({ - library: options.context.library, - packages: [ 'mongo-livedata' ], - release: options.context.releaseVersion - })['mongo-livedata'].MongoInternals.NpmModule; - find_mongo_and_kill_it_dead(options.port, function (err) { - if (err) { - // XXX this was being passed to onListen and ignored before. should do - // something better. - throw {reason: "Can't kill running mongo: " + err.reason}; - } - - // Delete the "local" database. This removes any memory that this was part - // of a replSet; we will start one from scratch (easier and faster than - // falling over from one on a different port). - // XXX It's slow to do this every time! We should cache the port number on - // disk and only run this the first time or when the port number - // changes. - try { - var dbFiles = fs.readdirSync(dbPath); - } catch (e) { - if (!e || e.code !== 'ENOENT') - throw e; - } - _.each(dbFiles, function (dbFile) { - if (/^local\./.test(dbFile)) - fs.unlinkSync(path.join(dbPath, dbFile)); - }); - - // Start mongod with a dummy replSet and wait for it to listen. - var child_process = require('child_process'); - var replSetName = 'dummy'; - var proc = child_process.spawn(mongod_path, [ - // nb: cli-test.sh and find_mongo_pids assume that the next four arguments - // exist in this order without anything in between - '--bind_ip', '127.0.0.1', - '--smallfiles', - '--nohttpinterface', - '--port', options.port, - '--dbpath', dbPath, - '--replSet', replSetName - ]); - var callOnExit = function (code, signal) { - on_exit_callback(code, signal, stderrOutput); - }; - handle.stop = function (callback) { - var tries = 0; - var exited = false; - proc.removeListener('exit', callOnExit); - proc.kill('SIGINT'); - callback && callback(err); - }; - - var stderrOutput = ''; - - proc.stderr.setEncoding('utf8'); - proc.stderr.on('data', function (data) { - stderrOutput += data; - }); - - proc.on('exit', callOnExit); - - proc.stdout.setEncoding('utf8'); - proc.stdout.on('data', function (data) { - // process.stdout.write("MONGO SAYS: " + data); - - if (/ \[rsMgr\] replSet PRIMARY/.test(data)) - onListen(); - - if (/ \[initandlisten\] waiting for connections on port/.test(data)) { - // Connect to it and start a replset. - var db = new mongoNpmModule.Db( - 'meteor', new mongoNpmModule.Server('127.0.0.1', options.port), - {safe: true}); - db.open(function(err, db) { - if (err) - throw err; - db.admin().command({ - replSetInitiate: { - _id: replSetName, - members: [{_id : 0, host: '127.0.0.1:' + options.port}] - } - }, function (err, result) { - if (err) - throw err; - }); - }); + Fiber(function (){ + if (err) { + // XXX this was being passed to onListen and ignored before. should do + // something better. + throw {reason: "Can't kill running mongo: " + err.reason}; } - }); + + var portFile = path.join(dbPath, 'METEOR-PORT'); + var createReplSet = true; + try { + createReplSet = +(fs.readFileSync(portFile)) !== options.port; + } catch (e) { + if (!e || e.code !== 'ENOENT') + throw e; + } + + // If this is the first time we're using this DB, or we changed port since + // the last time, then we want to destroying any existing replSet + // configuration and create a new one. First we delete the "local" database + // if it exists. (It's a pain and slow to change the port in an existing + // replSet configuration. It's also a little slow to initiate a new replSet, + // thus the attempt to not do it unless the port changes.) + if (createReplSet) { + try { + var dbFiles = fs.readdirSync(dbPath); + } catch (e) { + if (!e || e.code !== 'ENOENT') + throw e; + } + _.each(dbFiles, function (dbFile) { + if (/^local\./.test(dbFile)) + fs.unlinkSync(path.join(dbPath, dbFile)); + }); + + // Load mongo-livedata so we'll be able to talk to it. + var mongoNpmModule = unipackage.load({ + library: options.context.library, + packages: [ 'mongo-livedata' ], + release: options.context.releaseVersion + })['mongo-livedata'].MongoInternals.NpmModule; + } + + // Start mongod with a dummy replSet and wait for it to listen. + var child_process = require('child_process'); + var replSetName = 'dummy'; + var proc = child_process.spawn(mongod_path, [ + // nb: cli-test.sh and find_mongo_pids assume that the next four arguments + // exist in this order without anything in between + '--bind_ip', '127.0.0.1', + '--smallfiles', + '--nohttpinterface', + '--port', options.port, + '--dbpath', dbPath, + '--replSet', replSetName + ]); + + var stderrOutput = ''; + proc.stderr.setEncoding('utf8'); + proc.stderr.on('data', function (data) { + stderrOutput += data; + }); + + var callOnExit = function (code, signal) { + onExit(code, signal, stderrOutput); + }; + proc.on('exit', callOnExit); + + handle.stop = function (callback) { + var tries = 0; + var exited = false; + proc.removeListener('exit', callOnExit); + proc.kill('SIGINT'); + callback && callback(err); + }; + + proc.stdout.setEncoding('utf8'); + var readyMessages = 2; + proc.stdout.on('data', function (data) { + // process.stdout.write("MONGO SAYS: " + data); + + if (/ \[rsMgr\] replSet PRIMARY/.test(data)) { + if (--readyMessages === 0) { + if (createReplSet) + fs.writeFileSync(portFile, options.port); + onListen(); + } + } + + if (/ \[initandlisten\] waiting for connections on port/.test(data)) { + if (createReplSet) { + // Connect to it and start a replset. + var db = new mongoNpmModule.Db( + 'meteor', new mongoNpmModule.Server('127.0.0.1', options.port), + {safe: true}); + db.open(function(err, db) { + if (err) + throw err; + db.admin().command({ + replSetInitiate: { + _id: replSetName, + members: [{_id : 0, host: '127.0.0.1:' + options.port}] + } + }, function (err, result) { + if (err) + throw err; + }); + }); + } + if (--readyMessages === 0) + onListen(); + } + }); + }).run(); }); return handle; }; From 123c06ac3f6fe2d6337ba78b190a3ab21e5f22a5 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Sun, 18 Aug 2013 11:51:45 -0700 Subject: [PATCH 005/190] oplog checkpoint from friday --- packages/mongo-livedata/mongo_driver.js | 63 +++++++++++++++++++++++-- tools/run.js | 4 ++ 2 files changed, 63 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 841057ef88..d43b3e82c9 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -84,8 +84,9 @@ var replaceTypes = function (document, atomTransformer) { }; -MongoConnection = function (url) { +MongoConnection = function (url, connectionOptions) { var self = this; + connectionOptions = connectionOptions || {}; self._connectCallbacks = []; self._liveResultsSets = {}; @@ -122,6 +123,18 @@ MongoConnection = function (url) { }); }).run(); }); + + self._oplogHandle = null; + // XXX we should NOT be reading directly from the env here (this should be an + // argument to MongoConnection eg) but I want to wait for the AppConfig API to + // settle a little before thinking too hard about this + if (process.env.XXX_OPLOG_URL && !connectionOptions.isOplog) { + var dbName = Npm.require('url').parse(url).pathname.substr(1); + // Defer this, because it blocks. If we start observing cursors before the + // oplog handle is ready, they just don't get to use the oplog. + Meteor.defer(_.bind(self._startOplogTailing, + self, process.env.XXX_OPLOG_URL, dbName)); + } }; MongoConnection.prototype.close = function() { @@ -177,6 +190,44 @@ MongoConnection.prototype._maybeBeginWrite = function () { return {committed: function () {}}; }; +var OPLOG_COLLECTION = 'oplog.rs'; + +// Like Perl's quotemeta: quotes all regexp metacharacters. See +// https://github.com/substack/quotemeta/blob/master/index.js +var quotemeta = function (str) { + return String(str).replace(/(\W)/g, '\\$1'); +}; + +MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { + var self = this; + + var oplogConnection = new MongoConnection(oplogUrl, {isOplog: true}); + // Find the last oplog entry. Blocks until the connection is ready. + + var lastOplogEntry = oplogConnection.findOne( + OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); + + var oplogSelector = { + ns: new RegExp('^' + quotemeta(dbName) + '\\.'), + $or: [ + {op: {$in: ['i', 'u', 'd']}}, + {op: 'c', 'o.drop': {$exists: true}} + ] + }; + if (lastOplogEntry) + oplogSelector.ts = {$gt: lastOplogEntry.ts}; + + var cursorDescription = new CursorDescription( + OPLOG_COLLECTION, oplogSelector, {tailable: true}); + var handle = oplogConnection.tail(cursorDescription, function (doc) { + // Don't register the handle until after we've gotten one doc. + if (!self._oplogHandle) + self._oplogHandle = handle; + + console.log("OPLOG TAILING SEZ:", doc); + }); +}; + //////////// Public API ////////// // The write methods block until the database has confirmed the write (it may @@ -717,16 +768,20 @@ var SynchronousCursor = function (dbCursor, cursorDescription, options) { _.extend(SynchronousCursor.prototype, { _nextObject: function () { var self = this; + while (true) { var doc = self._synchronousNextObject().wait(); - if (!doc || typeof doc._id === 'undefined') return null; + + if (!doc) return null; doc = replaceTypes(doc, replaceMongoAtomWithMeteor); - if (!self._cursorDescription.options.tailable) { + if (!self._cursorDescription.options.tailable && _.has(doc, '_id')) { // Did Mongo give us duplicate documents in the same cursor? If so, // ignore this one. (Do this before the transform, since transform might // return some unrelated value.) We don't do this for tailable cursors, - // because we want to maintain O(1) memory usage. + // because we want to maintain O(1) memory usage. And if there isn't _id + // for some reason (maybe it's the oplog), then we don't do this either. + // (Be careful to do this for falsey but existing _id, though.) var strId = LocalCollection._idStringify(doc._id); if (self._visitedIds[strId]) continue; self._visitedIds[strId] = true; diff --git a/tools/run.js b/tools/run.js index b9fc91e1f8..4776c204d3 100644 --- a/tools/run.js +++ b/tools/run.js @@ -243,6 +243,7 @@ var startServer = function (options) { env.PORT = options.innerPort; env.MONGO_URL = options.mongoUrl; + env.XXX_OPLOG_URL = options.oplogUrl; env.ROOT_URL = options.rootUrl; if (options.settings) env.METEOR_SETTINGS = options.settings; @@ -413,6 +414,8 @@ exports.run = function (context, options) { // Allow override and use of external mongo. Matches code in launch_mongo. var mongoUrl = process.env.MONGO_URL || ("mongodb://127.0.0.1:" + mongoPort + "/meteor"); + var oplogUrl = process.env.MONGO_URL ? undefined + : "mongodb://127.0.01:" + mongoPort + "/local"; var firstRun = true; var serverHandle; @@ -565,6 +568,7 @@ exports.run = function (context, options) { outerPort: outerPort, innerPort: innerPort, mongoUrl: mongoUrl, + oplogUrl: oplogUrl, rootUrl: rootUrl, library: context.library, rawLogs: options.rawLogs, From ba63548d4d6be25ac203dc4cd02f7c579e51cfa2 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 19 Aug 2013 16:24:05 -0700 Subject: [PATCH 006/190] checkpoint monday morning --- packages/mongo-livedata/mongo_driver.js | 70 ++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index d43b3e82c9..9cbb4082d2 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -139,6 +139,13 @@ MongoConnection = function (url, connectionOptions) { MongoConnection.prototype.close = function() { var self = this; + + // XXX probably untested + var oplogHandle = self._oplogHandle; + self._oplogHandle = null; + if (oplogHandle) + oplogHandle.stop(); + // Use Future.wrap so that errors get thrown. This happens to // work even outside a fiber since the 'close' method is not // actually asynchronous. @@ -219,15 +226,70 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var cursorDescription = new CursorDescription( OPLOG_COLLECTION, oplogSelector, {tailable: true}); + + var callbacksByCollection = {}; + var handle = oplogConnection.tail(cursorDescription, function (doc) { // Don't register the handle until after we've gotten one doc. + // XXX do we want to actually process this doc? if (!self._oplogHandle) self._oplogHandle = handle; - console.log("OPLOG TAILING SEZ:", doc); + if (!doc.ns && doc.ns.length > dbName.length + 1 && + doc.ns.substr(0, dbName.length + 1) === (dbName + '.')) + throw new Error("Unexpected ns"); + + var collectionName = doc.ns.substr(dbName.length + 1); + + _.each(callbacksByCollection[collectionName], function (callback) { + callback(doc); + }); }); + + var nextId = 0; + handle.onOplogEntry = function (collectionName, callback) { + if (!_.has(callbacksByCollection, collectionName)) + callbacksByCollection[collectionName] = {}; + var callbackId = nextId++; + callbacksByCollection[collectionName][callbackId] = callback; + return { + stop: function () { + delete callbacksByCollection[collectionName][callbackId]; + } + }; + }; }; +MongoConnection.prototype._observeChangesWithOplog = function ( + cursorDescription, callbacks) { + var self = this; + var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { + console.log("A CHANGE TO THE DOC", op); + }); + + // XXX let's do this with race conditions first! + + var idSet = {}; + + if (callbacks.added) { + var initialCursor = new Cursor(self, cursorDescription); + initialCursor.forEach(function (initialDoc) { + var id = initialDoc._id; + delete initialDoc._id; + idSet[id] = true; + callbacks.added(id, initialDoc); + }); + } + + var observeHandle = { + stop: function () { + oplogHandle.stop(); + } + }; + return observeHandle; +}; + + //////////// Public API ////////// // The write methods block until the database has confirmed the write (it may @@ -937,6 +999,12 @@ MongoConnection.prototype._observeChanges = function ( return self._observeChangesTailable(cursorDescription, ordered, callbacks); } + // XXX maybe this should actually use deduping too? + if (self._oplogHandle && !ordered + && cursorSupportedByOplogTailing(cursorDescription)) { + return self._observeChangesWithOplog(cursorDescription, callbacks); + } + var observeKey = JSON.stringify( _.extend({ordered: ordered}, cursorDescription)); From cae886b2f0d245d3f8888ccfb0b42eff0345ee5a Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 19 Aug 2013 20:05:15 -0700 Subject: [PATCH 007/190] remove processing works. --- packages/mongo-livedata/mongo_driver.js | 26 +++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 9cbb4082d2..d84c486e3a 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -229,12 +229,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var callbacksByCollection = {}; - var handle = oplogConnection.tail(cursorDescription, function (doc) { - // Don't register the handle until after we've gotten one doc. - // XXX do we want to actually process this doc? - if (!self._oplogHandle) - self._oplogHandle = handle; - + self._oplogHandle = oplogConnection.tail(cursorDescription, function (doc) { if (!doc.ns && doc.ns.length > dbName.length + 1 && doc.ns.substr(0, dbName.length + 1) === (dbName + '.')) throw new Error("Unexpected ns"); @@ -247,7 +242,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { }); var nextId = 0; - handle.onOplogEntry = function (collectionName, callback) { + self._oplogHandle.onOplogEntry = function (collectionName, callback) { if (!_.has(callbacksByCollection, collectionName)) callbacksByCollection[collectionName] = {}; var callbackId = nextId++; @@ -263,14 +258,25 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { MongoConnection.prototype._observeChangesWithOplog = function ( cursorDescription, callbacks) { var self = this; - var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { - console.log("A CHANGE TO THE DOC", op); - }); // XXX let's do this with race conditions first! var idSet = {}; + var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { + if (op.op === 'd') { + // XXX check that ObjectId works here. (ie use idStringify or something) + var id = op.o._id; + if (_.has(idSet, id)) { + delete idSet[id]; + if (callbacks.removed) + callbacks.removed(id); + } + } else { + console.log("A CHANGE TO THE DOC", op); + } + }); + if (callbacks.added) { var initialCursor = new Cursor(self, cursorDescription); initialCursor.forEach(function (initialDoc) { From 1c8c7d171cf4fa32c1aa63edfc71ee89481db871 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 19 Aug 2013 20:23:52 -0700 Subject: [PATCH 008/190] very basic insert support --- packages/mongo-livedata/id_map.js | 27 ++++++++++++++ packages/mongo-livedata/mongo_driver.js | 48 +++++++++++++++++++++---- packages/mongo-livedata/package.js | 1 + 3 files changed, 69 insertions(+), 7 deletions(-) create mode 100644 packages/mongo-livedata/id_map.js diff --git a/packages/mongo-livedata/id_map.js b/packages/mongo-livedata/id_map.js new file mode 100644 index 0000000000..fa093c305e --- /dev/null +++ b/packages/mongo-livedata/id_map.js @@ -0,0 +1,27 @@ +IdMap = function () { + var self = this; + self.map = {}; +}; + +_.extend(IdMap.prototype, { + get: function (id) { + var self = this; + var key = LocalCollection._idStringify(id); + return self.map[key]; + }, + set: function (id, value) { + var self = this; + var key = LocalCollection._idStringify(id); + self.map[key] = value; + }, + remove: function(id) { + var self = this; + var key = LocalCollection._idStringify(id); + delete self.map[key]; + }, + has: function(id) { + var self = this; + var key = LocalCollection._idStringify(id); + return _.has(self.map, key); + } +}); diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index d84c486e3a..5d73a2dcc0 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -237,7 +237,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var collectionName = doc.ns.substr(dbName.length + 1); _.each(callbacksByCollection[collectionName], function (callback) { - callback(doc); + callback(EJSON.clone(doc)); }); }); @@ -260,18 +260,52 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var self = this; // XXX let's do this with race conditions first! + // + // the real way will involve special oplog handling during the initial cursor + // read. specifically: + // + // 1) start reading the oplog. for every document that could conceivably be + // relevant, cache a bit of information about what we saw. (eg, cache + // document for inserts, removal fact for removes, "needs poll" for updates. + // most recent overrides.) + // + // 2) read the initial set and send added messages. + // + // 3) write a sentinel to some field. + // + // 4) wait until that sentinel comes up through the oplog. + // + // 5) use the cached information (compared to what we already know) to send + // messages about things that changed right about then + // + // 6) now that we're in the "steady state", process ops more directly - var idSet = {}; + var idSet = new IdMap; + + var selector = LocalCollection._compileSelector(cursorDescription.selector); var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { + var id; if (op.op === 'd') { - // XXX check that ObjectId works here. (ie use idStringify or something) - var id = op.o._id; - if (_.has(idSet, id)) { - delete idSet[id]; + // XXX check that ObjectId works here + id = op.o._id; + if (idSet.has(id)) { + idSet.remove(id); if (callbacks.removed) callbacks.removed(id); } + } else if (op.op ==='i') { + id = op.o._id; + if (idSet.has(id)) + throw new Error("insert found for already-existing ID"); + + if (selector(op.o)) { + idSet.set(id, true); + if (callbacks.added) { + delete op.o._id; + callbacks.added(id, op.o); + } + } } else { console.log("A CHANGE TO THE DOC", op); } @@ -282,7 +316,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( initialCursor.forEach(function (initialDoc) { var id = initialDoc._id; delete initialDoc._id; - idSet[id] = true; + idSet.set(id, true); callbacks.added(id, initialDoc); }); } diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 194ac21192..3827dbb929 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -38,6 +38,7 @@ Package.on_use(function (api) { // For tests only. api.export('MongoTest', 'server'); + api.add_files('id_map.js', 'server'); api.add_files('mongo_driver.js', 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); From abcc7d9bb92b8c7299c96fe3d22047883d917c75 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 19 Aug 2013 21:22:08 -0700 Subject: [PATCH 009/190] very basic update support --- packages/mongo-livedata/id_map.js | 13 ++- packages/mongo-livedata/mongo_driver.js | 104 +++++++++++++++++++----- 2 files changed, 94 insertions(+), 23 deletions(-) diff --git a/packages/mongo-livedata/id_map.js b/packages/mongo-livedata/id_map.js index fa093c305e..160ee505c4 100644 --- a/packages/mongo-livedata/id_map.js +++ b/packages/mongo-livedata/id_map.js @@ -14,14 +14,23 @@ _.extend(IdMap.prototype, { var key = LocalCollection._idStringify(id); self.map[key] = value; }, - remove: function(id) { + remove: function (id) { var self = this; var key = LocalCollection._idStringify(id); delete self.map[key]; }, - has: function(id) { + has: function (id) { var self = this; var key = LocalCollection._idStringify(id); return _.has(self.map, key); + }, + // XXX used? + setDefault: function (id, def) { + var self = this; + var key = LocalCollection._idStringify(id); + if (_.has(self.map, key)) + return self.map[key]; + self.map[key] = def; + return def; } }); diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 5d73a2dcc0..4bd26adcf3 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -255,6 +255,18 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { }; }; +var modifierTopLevelFields = function (mod) { + var fields = {}; + _.each(mod, function (mapping, op) { + if (op !== '$set' && op != '$unset') + throw new Error("Unknown oplog operation " + op); + _.each(mapping, function (value, field) { + fields[field.split('.')[0]] = true; + }); + }); + return _.keys(fields); +}; + MongoConnection.prototype._observeChangesWithOplog = function ( cursorDescription, callbacks) { var self = this; @@ -282,44 +294,94 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var idSet = new IdMap; + var changedFields = new IdMap; + var selector = LocalCollection._compileSelector(cursorDescription.selector); + var add = function (doc) { + var id = doc._id; + idSet.set(id, true); + if (callbacks.added) { + delete doc._id; + callbacks.added(id, doc); + } + }; + + var remove = function (id) { + idSet.remove(id); + changedFields.remove(id); + if (callbacks.removed) { + callbacks.removed(id); + } + }; + var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { var id; if (op.op === 'd') { // XXX check that ObjectId works here id = op.o._id; - if (idSet.has(id)) { - idSet.remove(id); - if (callbacks.removed) - callbacks.removed(id); - } - } else if (op.op ==='i') { + if (idSet.has(id)) + remove(id); + } else if (op.op === 'i') { id = op.o._id; if (idSet.has(id)) throw new Error("insert found for already-existing ID"); - if (selector(op.o)) { - idSet.set(id, true); - if (callbacks.added) { - delete op.o._id; - callbacks.added(id, op.o); - } + if (selector(op.o)) + add(op.o); + } else if (op.op === 'u') { + id = op.o2._id; + var fields = changedFields.get(id); + if (!fields) { + fields = {}; + changedFields.set(id, fields); + Fiber(function (){ + // XXX problem is, the result of this findOne is delivered at a random + // time, not necessarily synced with other stuff that may be coming + // down the oplog. how much does this matter? + var updatedDoc = self.findOne( + cursorDescription.collectionName, {_id: id}); + + // XXX in what circumstances does this !== fields? + var myChangedFields = changedFields.get(id); + // Did we process a remove while we were waiting? + if (!myChangedFields) + return; + + // Delete this record from myChangedFields atomically before anything + // that might yield (even selector might yield if it has $where!) + changedFields.remove(id); + + var matchesNow = updatedDoc && selector(updatedDoc); + var matchedBefore = idSet.has(id); + + if (matchesNow && !matchedBefore) { + add(updatedDoc); + } else if (matchedBefore && !matchesNow) { + remove(id); + } else if (matchesNow) { + if (callbacks.changed) { + // XXX this assumes that every field we saw a set/unset on + // actually changed. otherwise we may send out something + // redundant. + callbacks.changed( + id, _.pick(updatedDoc, _.keys(myChangedFields))); + } + } + }).run(); } + _.each(modifierTopLevelFields(op.o), function (field) { + fields[field] = true; + }); } else { console.log("A CHANGE TO THE DOC", op); } }); - if (callbacks.added) { - var initialCursor = new Cursor(self, cursorDescription); - initialCursor.forEach(function (initialDoc) { - var id = initialDoc._id; - delete initialDoc._id; - idSet.set(id, true); - callbacks.added(id, initialDoc); - }); - } + var initialCursor = new Cursor(self, cursorDescription); + initialCursor.forEach(function (initialDoc) { + add(initialDoc); + }); var observeHandle = { stop: function () { From c4c98371943cfaa373cd0c54a480ca579714d32b Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 19 Aug 2013 22:29:08 -0700 Subject: [PATCH 010/190] fix $unset --- packages/mongo-livedata/mongo_driver.js | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 4bd26adcf3..c9943e861c 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -364,8 +364,12 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // XXX this assumes that every field we saw a set/unset on // actually changed. otherwise we may send out something // redundant. - callbacks.changed( - id, _.pick(updatedDoc, _.keys(myChangedFields))); + var changed = {}; + _.each(myChangedFields, function (unused, fieldName) { + changed[fieldName] = _.has(updatedDoc, fieldName) + ? updatedDoc[fieldName] : undefined; + }); + callbacks.changed(id, changed); } } }).run(); From d3d285426d54b20facc6c6c9994fe20544d4e60a Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 10 Sep 2013 17:20:40 -0700 Subject: [PATCH 011/190] somewhat implement write fence (latency compensation) --- packages/mongo-livedata/mongo_driver.js | 137 ++++++++++++++++++------ 1 file changed, 106 insertions(+), 31 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index c9943e861c..9c4ab6e051 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -199,6 +199,13 @@ MongoConnection.prototype._maybeBeginWrite = function () { var OPLOG_COLLECTION = 'oplog.rs'; +var WRITE_COLLECTION = 'meteor_livedata_Writes'; +// XXX This is problematic if our RNG isn't seeded well enough. +var myServerId = Random.id(); +var nextWriteId = 1; +// XXX doc +var outstandingWrites = []; + // Like Perl's quotemeta: quotes all regexp metacharacters. See // https://github.com/substack/quotemeta/blob/master/index.js var quotemeta = function (str) { @@ -229,6 +236,24 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var callbacksByCollection = {}; + var processFence = function (doc) { + if (doc.op !== 'i' && doc.op !== 'u') + return; + var serverId = (doc.op === 'i' ? doc.o._id : doc.o2._id); + if (serverId !== myServerId) + return; + var writeId = + (doc.op === 'i' ? doc.o.write : (doc.o.$set && doc.o.$set.write)); + if (typeof writeId !== 'number') + return; + // Process all writes up to this point. + while (!_.isEmpty(outstandingWrites) + && outstandingWrites[0].writeId <= writeId) { + var write = outstandingWrites.shift(); + write.write.committed(); + } + }; + self._oplogHandle = oplogConnection.tail(cursorDescription, function (doc) { if (!doc.ns && doc.ns.length > dbName.length + 1 && doc.ns.substr(0, dbName.length + 1) === (dbName + '.')) @@ -236,6 +261,11 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var collectionName = doc.ns.substr(dbName.length + 1); + if (collectionName === WRITE_COLLECTION) { + processFence(doc); + return; + } + _.each(callbacksByCollection[collectionName], function (callback) { callback(EJSON.clone(doc)); }); @@ -255,6 +285,9 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { }; }; +// XXX you can actually get a replacement doc instead of $set/$unset! this +// completely messes with the attempt to do a non-ID-polling process of +// updates... var modifierTopLevelFields = function (mod) { var fields = {}; _.each(mod, function (mapping, op) { @@ -382,6 +415,30 @@ MongoConnection.prototype._observeChangesWithOplog = function ( } }); + // XXX ordering w.r.t. everything else? + var listenersHandle = listenAll( + cursorDescription, function (notification, complete) { + // If we're not in a write fence, we don't have to do anything. That's + // because + var fence = DDPServer._CurrentWriteFence.get(); + if (!fence) { + complete(); + return; + } + var writeId = nextWriteId++; + var write = fence.beginWrite(); + outstandingWrites.push({writeId: writeId, write: write}); + + // Use direct write to Node Mongo driver so we don't end up with recursive + // fence stuff. Need to disable 'safe' because we aren't providing a + // callback. + var writeCollection = self._getCollection(WRITE_COLLECTION); + writeCollection.update({_id: myServerId}, {$set: {write: writeId}}, + {upsert: true, safe: false}); + complete(); + } + ); + var initialCursor = new Cursor(self, cursorDescription); initialCursor.forEach(function (initialDoc) { add(initialDoc); @@ -1156,6 +1213,38 @@ MongoConnection.prototype._observeChanges = function ( return observeHandle; }; +// Listen for the invalidation messages that will trigger us to poll the +// database for changes. If this selector specifies specific IDs, specify them +// here, so that updates to different specific IDs don't cause us to poll. +// listenCallback is the same kind of (notification, complete) callback passed +// to InvalidationCrossbar.listen. +var listenAll = function (cursorDescription, listenCallback) { + var listeners = []; + var listenOnTrigger = function (trigger) { + listeners.push(DDPServer._InvalidationCrossbar.listen( + trigger, listenCallback)); + }; + + var key = {collection: cursorDescription.collectionName}; + var specificIds = LocalCollection._idsMatchedBySelector( + cursorDescription.selector); + if (specificIds) { + _.each(specificIds, function (id) { + listenOnTrigger(_.extend({id: id}, key)); + }); + } else { + listenOnTrigger(key); + } + + return { + stop: function () { + _.each(listeners, function (listener) { + listener.stop(); + }); + } + }; +}; + var LiveResultsSet = function (cursorDescription, mongoHandle, ordered, stopCallback, testOnlyPollCallback) { var self = this; @@ -1194,37 +1283,23 @@ var LiveResultsSet = function (cursorDescription, mongoHandle, ordered, self._taskQueue = new Meteor._SynchronousQueue(); - // Listen for the invalidation messages that will trigger us to poll the - // database for changes. If this selector specifies specific IDs, specify them - // here, so that updates to different specific IDs don't cause us to poll. - var listenOnTrigger = function (trigger) { - var listener = DDPServer._InvalidationCrossbar.listen( - trigger, function (notification, complete) { - // When someone does a transaction that might affect us, schedule a poll - // of the database. If that transaction happens inside of a write fence, - // block the fence until we've polled and notified observers. - var fence = DDPServer._CurrentWriteFence.get(); - if (fence) - self._pendingWrites.push(fence.beginWrite()); - // Ensure a poll is scheduled... but if we already know that one is, - // don't hit the throttled _ensurePollIsScheduled function (which might - // lead to us calling it unnecessarily in 50ms). - if (self._pollsScheduledButNotStarted === 0) - self._ensurePollIsScheduled(); - complete(); - }); - self._stopCallbacks.push(function () { listener.stop(); }); - }; - var key = {collection: cursorDescription.collectionName}; - var specificIds = LocalCollection._idsMatchedBySelector( - cursorDescription.selector); - if (specificIds) { - _.each(specificIds, function (id) { - listenOnTrigger(_.extend({id: id}, key)); - }); - } else { - listenOnTrigger(key); - } + var listenersHandle = listenAll( + cursorDescription, function (notification, complete) { + // When someone does a transaction that might affect us, schedule a poll + // of the database. If that transaction happens inside of a write fence, + // block the fence until we've polled and notified observers. + var fence = DDPServer._CurrentWriteFence.get(); + if (fence) + self._pendingWrites.push(fence.beginWrite()); + // Ensure a poll is scheduled... but if we already know that one is, + // don't hit the throttled _ensurePollIsScheduled function (which might + // lead to us calling it unnecessarily in 50ms). + if (self._pollsScheduledButNotStarted === 0) + self._ensurePollIsScheduled(); + complete(); + } + ); + self._stopCallbacks.push(function () { listenersHandle.stop(); }); // Map from handle ID to ObserveHandle. self._observeHandles = {}; From ffc70c1611335382029644916c7b93048ccf4c01 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 11 Sep 2013 11:17:37 -0700 Subject: [PATCH 012/190] xxx now --- packages/mongo-livedata/mongo_driver.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 9c4ab6e051..a39485a2d1 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -325,6 +325,10 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // // 6) now that we're in the "steady state", process ops more directly + // XXX NOW: replace idSet/changedFields with simply currently published + // results, ok??? that should simplify things, and allow the implementation of + // "replace" (noodles) + var idSet = new IdMap; var changedFields = new IdMap; From 0344e946fd898321218764e1500da5019eee2c1b Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 12 Sep 2013 11:18:11 -0700 Subject: [PATCH 013/190] more progress --- packages/mongo-livedata/mongo_driver.js | 28 +++++++++++++------------ 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index a39485a2d1..933ab7a460 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -329,27 +329,27 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // results, ok??? that should simplify things, and allow the implementation of // "replace" (noodles) - var idSet = new IdMap; + // XXX DOC: map id -> currently published fields + // (which of course is also the same as what is tracked in merge box, + // ah well) + var published = new IdMap; + // XXX KILL THESE + var idSet = new IdMap; var changedFields = new IdMap; var selector = LocalCollection._compileSelector(cursorDescription.selector); var add = function (doc) { var id = doc._id; - idSet.set(id, true); - if (callbacks.added) { - delete doc._id; - callbacks.added(id, doc); - } + delete doc._id; + published.set(id, doc); + callbacks.added && callbacks.added(id, doc); }; var remove = function (id) { - idSet.remove(id); - changedFields.remove(id); - if (callbacks.removed) { - callbacks.removed(id); - } + published.remove(id); + callbacks.removed && callbacks.removed(id); }; var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { @@ -357,13 +357,15 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (op.op === 'd') { // XXX check that ObjectId works here id = op.o._id; - if (idSet.has(id)) + if (published.has(id)) remove(id); } else if (op.op === 'i') { id = op.o._id; - if (idSet.has(id)) + if (published.has(id)) throw new Error("insert found for already-existing ID"); + // XXX what if selector yields? for now it can't but later it could have + // $where if (selector(op.o)) add(op.o); } else if (op.op === 'u') { From a93f742b3aaeb8f4af3cd1c149fa126393bea712 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 12 Sep 2013 20:49:37 -0700 Subject: [PATCH 014/190] ok, rewrite is done. now "replace" updates work too. also give up on idea of knowing what fields changed (more correct, does require an in memory diff). many tests pass. --- packages/mongo-livedata/mongo_driver.js | 107 ++++++++++-------------- 1 file changed, 43 insertions(+), 64 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 933ab7a460..531ee16254 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -273,6 +273,9 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var nextId = 0; self._oplogHandle.onOplogEntry = function (collectionName, callback) { + callback = Meteor.bindEnvironment(callback, function (err) { + Meteor._debug("Error in oplog callback", err.stack); + }); if (!_.has(callbacksByCollection, collectionName)) callbacksByCollection[collectionName] = {}; var callbackId = nextId++; @@ -285,21 +288,6 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { }; }; -// XXX you can actually get a replacement doc instead of $set/$unset! this -// completely messes with the attempt to do a non-ID-polling process of -// updates... -var modifierTopLevelFields = function (mod) { - var fields = {}; - _.each(mod, function (mapping, op) { - if (op !== '$set' && op != '$unset') - throw new Error("Unknown oplog operation " + op); - _.each(mapping, function (value, field) { - fields[field.split('.')[0]] = true; - }); - }); - return _.keys(fields); -}; - MongoConnection.prototype._observeChangesWithOplog = function ( cursorDescription, callbacks) { var self = this; @@ -334,12 +322,9 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // ah well) var published = new IdMap; - // XXX KILL THESE - var idSet = new IdMap; - var changedFields = new IdMap; - var selector = LocalCollection._compileSelector(cursorDescription.selector); + // XXX add mutates its argument, which could get confusing var add = function (doc) { var id = doc._id; delete doc._id; @@ -359,6 +344,8 @@ MongoConnection.prototype._observeChangesWithOplog = function ( id = op.o._id; if (published.has(id)) remove(id); + + // XXX this needs to cancel any in-progress "ID lookup" for the document } else if (op.op === 'i') { id = op.o._id; if (published.has(id)) @@ -370,54 +357,45 @@ MongoConnection.prototype._observeChangesWithOplog = function ( add(op.o); } else if (op.op === 'u') { id = op.o2._id; - var fields = changedFields.get(id); - if (!fields) { - fields = {}; - changedFields.set(id, fields); - Fiber(function (){ - // XXX problem is, the result of this findOne is delivered at a random - // time, not necessarily synced with other stuff that may be coming - // down the oplog. how much does this matter? - var updatedDoc = self.findOne( - cursorDescription.collectionName, {_id: id}); - // XXX in what circumstances does this !== fields? - var myChangedFields = changedFields.get(id); - // Did we process a remove while we were waiting? - if (!myChangedFields) - return; + // Is this a modifier ($set/$unset, which may require us to poll the + // database to figure out if the whole document matches the selector) or a + // replacement (in which case we can just directly re-evaluate the + // selector)? + var isModifier = _.has(op.o, '$set') || _.has(op.o, '$unset'); - // Delete this record from myChangedFields atomically before anything - // that might yield (even selector might yield if it has $where!) - changedFields.remove(id); - - var matchesNow = updatedDoc && selector(updatedDoc); - var matchedBefore = idSet.has(id); - - if (matchesNow && !matchedBefore) { - add(updatedDoc); - } else if (matchedBefore && !matchesNow) { - remove(id); - } else if (matchesNow) { - if (callbacks.changed) { - // XXX this assumes that every field we saw a set/unset on - // actually changed. otherwise we may send out something - // redundant. - var changed = {}; - _.each(myChangedFields, function (unused, fieldName) { - changed[fieldName] = _.has(updatedDoc, fieldName) - ? updatedDoc[fieldName] : undefined; - }); - callbacks.changed(id, changed); - } - } - }).run(); + var newDoc; + if (isModifier) { + // XXX problem is, the result of this findOne is delivered at a random + // time, not necessarily synced with other stuff that may be coming down + // the oplog. also, we should coalesce multiple pings of the same + // document ("ID queue"). also, we shouldn't read fields that aren't + // necessary to evaluate selector or to publish. + newDoc = self.findOne(cursorDescription.collectionName, {_id: id}); + } else { + newDoc = op.o; + } + + var matchesNow = newDoc && selector(newDoc); + var matchedBefore = published.has(id); + if (matchesNow && !matchedBefore) { + add(newDoc); + } else if (matchedBefore && !matchesNow) { + remove(id); + } else if (matchesNow) { + var oldDoc = published.get(id); + if (!oldDoc) + throw Error("thought that " + id + " was there!"); + published.set(id, newDoc); + if (callbacks.changed) { + var changed = LocalCollection._makeChangedFields(newDoc, oldDoc); + if (!_.isEmpty(changed)) { + callbacks.changed(id, changed); + } + } } - _.each(modifierTopLevelFields(op.o), function (field) { - fields[field] = true; - }); } else { - console.log("A CHANGE TO THE DOC", op); + console.log("SURPRISING FOR NOW OPERATION (eg drop collection)", op); } }); @@ -1602,7 +1580,8 @@ var cursorSupportedByOplogTailing = function (cursorDescription) { // For now, we're just dealing with equality queries: no $operators, regexps, // or $and/$or/$where/etc clauses. We can expand the scope of what we're - // comfortable processing later. + // comfortable processing later. ($where will get pretty scary since it will + // allow selector processing to yield!) return _.all(cursorDescription.selector, function (value, field) { // No logical operators like $and. if (field.substr(0, 1) === '$') From 5a4ae936aa06e3d48aa5e170f73cd462fa8d1ccf Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 12 Sep 2013 21:17:26 -0700 Subject: [PATCH 015/190] Until I do the right thing, it's slightly more right to do the query before listening. --- packages/mongo-livedata/mongo_driver.js | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 531ee16254..25a0e50df4 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -337,6 +337,12 @@ MongoConnection.prototype._observeChangesWithOplog = function ( callbacks.removed && callbacks.removed(id); }; + // XXX the ordering here is wrong + var initialCursor = new Cursor(self, cursorDescription); + initialCursor.forEach(function (initialDoc) { + add(initialDoc); + }); + var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { var id; if (op.op === 'd') { @@ -353,8 +359,9 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // XXX what if selector yields? for now it can't but later it could have // $where - if (selector(op.o)) + if (selector(op.o)) { add(op.o); + } } else if (op.op === 'u') { id = op.o2._id; @@ -423,11 +430,6 @@ MongoConnection.prototype._observeChangesWithOplog = function ( } ); - var initialCursor = new Cursor(self, cursorDescription); - initialCursor.forEach(function (initialDoc) { - add(initialDoc); - }); - var observeHandle = { stop: function () { oplogHandle.stop(); From 5ffb4a9a1552fe1b50fa6e2837575e02ae5f96a3 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 12 Sep 2013 21:25:58 -0700 Subject: [PATCH 016/190] handle _id properly in replace. many tests pass. --- packages/mongo-livedata/mongo_driver.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 25a0e50df4..6506dcb0b6 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -380,7 +380,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // necessary to evaluate selector or to publish. newDoc = self.findOne(cursorDescription.collectionName, {_id: id}); } else { - newDoc = op.o; + newDoc = _.extend({_id: id}, op.o); } var matchesNow = newDoc && selector(newDoc); @@ -393,6 +393,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var oldDoc = published.get(id); if (!oldDoc) throw Error("thought that " + id + " was there!"); + delete newDoc._id; published.set(id, newDoc); if (callbacks.changed) { var changed = LocalCollection._makeChangedFields(newDoc, oldDoc); From c9c465bcfb7a3d4ca4f8219da8a636d7c665cfe1 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 12 Sep 2013 22:09:04 -0700 Subject: [PATCH 017/190] all tests now pass (mostly because I made sure the failing tests avoid oplog, but they are testing basically implementation details) there's a "insert found for already-existing ID" somewhere though trasnform: null, interesting... --- packages/mongo-livedata/mongo_driver.js | 4 +++- packages/mongo-livedata/mongo_livedata_tests.js | 10 ++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 6506dcb0b6..fd12982bbf 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1150,7 +1150,9 @@ MongoConnection.prototype._observeChanges = function ( } // XXX maybe this should actually use deduping too? - if (self._oplogHandle && !ordered + if (self._oplogHandle && !ordered && !callbacks._testOnlyPollCallback + // XXX remove this when oplog does de-duping + && !cursorDescription.options._dontUseOplog && cursorSupportedByOplogTailing(cursorDescription)) { return self._observeChangesWithOplog(cursorDescription, callbacks); } diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index aee9670447..35f4f1c127 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -572,11 +572,12 @@ if (Meteor.isServer) { var coll = new Meteor.Collection("observeInCallback-"+run, collectionOptions); var callbackCalled = false; - var handle = coll.find().observe({ + // oplog doesn't do de-duping yet, so it doesn't throw on recursive observe + var handle = coll.find({}, {_dontUseOplog: true}).observe({ added: function (newDoc) { callbackCalled = true; test.throws(function () { - coll.find().observe({}); + coll.find({}, {_dontUseOplog: true}).observe(); }); } }); @@ -966,8 +967,9 @@ if (Meteor.isServer) { var handlesToStop = []; var observe = function (name, query) { var handle = coll.find(query).observeChanges({ - // Make sure that we only poll on invalidation, not due to time, - // and keep track of when we do. + // Make sure that we only poll on invalidation, not due to time, and + // keep track of when we do. Note: this option disables the use of + // oplogs (which admittedly is somewhat irrelevant to this feature). _testOnlyPollCallback: function () { polls[name] = (name in polls ? polls[name] + 1 : 1); } From 4f23be316855517ee538465378e3af2d443e9c01 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 13 Sep 2013 17:13:06 -0700 Subject: [PATCH 018/190] oops, stop listenersHandle --- packages/mongo-livedata/mongo_driver.js | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index fd12982bbf..4f61a1e7a5 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -433,6 +433,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var observeHandle = { stop: function () { + listenersHandle.stop(); oplogHandle.stop(); } }; From 6905807375d6eb7a56e6b30a3a182a2a39177623 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 13 Sep 2013 17:14:12 -0700 Subject: [PATCH 019/190] rename writes collection to sequencers. will be used for observe catchup too --- packages/mongo-livedata/mongo_driver.js | 39 +++++++++++++------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 4f61a1e7a5..925cff8b53 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -199,12 +199,12 @@ MongoConnection.prototype._maybeBeginWrite = function () { var OPLOG_COLLECTION = 'oplog.rs'; -var WRITE_COLLECTION = 'meteor_livedata_Writes'; +var SEQUENCE_COLLECTION = 'meteor_livedata_Sequencer'; // XXX This is problematic if our RNG isn't seeded well enough. var myServerId = Random.id(); -var nextWriteId = 1; +var nextSequenceId = 1; // XXX doc -var outstandingWrites = []; +var pendingSequences = []; // Like Perl's quotemeta: quotes all regexp metacharacters. See // https://github.com/substack/quotemeta/blob/master/index.js @@ -236,21 +236,21 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var callbacksByCollection = {}; - var processFence = function (doc) { + var processSequence = function (doc) { if (doc.op !== 'i' && doc.op !== 'u') return; var serverId = (doc.op === 'i' ? doc.o._id : doc.o2._id); if (serverId !== myServerId) return; - var writeId = - (doc.op === 'i' ? doc.o.write : (doc.o.$set && doc.o.$set.write)); - if (typeof writeId !== 'number') + var sequenceId = + (doc.op === 'i' ? doc.o.sequence : (doc.o.$set && doc.o.$set.sequence)); + if (typeof sequenceId !== 'number') return; - // Process all writes up to this point. - while (!_.isEmpty(outstandingWrites) - && outstandingWrites[0].writeId <= writeId) { - var write = outstandingWrites.shift(); - write.write.committed(); + // Process all sequence points up to this point. + while (!_.isEmpty(pendingSequences) + && pendingSequences[0].sequenceId <= sequenceId) { + var sequence = pendingSequences.shift(); + sequence.callback(); } }; @@ -261,8 +261,8 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var collectionName = doc.ns.substr(dbName.length + 1); - if (collectionName === WRITE_COLLECTION) { - processFence(doc); + if (collectionName === SEQUENCE_COLLECTION) { + processSequence(doc); return; } @@ -417,15 +417,18 @@ MongoConnection.prototype._observeChangesWithOplog = function ( complete(); return; } - var writeId = nextWriteId++; + var sequenceId = nextSequenceId++; var write = fence.beginWrite(); - outstandingWrites.push({writeId: writeId, write: write}); + pendingSequences.push({sequenceId: sequenceId, + callback: function () { + write.committed(); + }}); // Use direct write to Node Mongo driver so we don't end up with recursive // fence stuff. Need to disable 'safe' because we aren't providing a // callback. - var writeCollection = self._getCollection(WRITE_COLLECTION); - writeCollection.update({_id: myServerId}, {$set: {write: writeId}}, + var writeCollection = self._getCollection(SEQUENCE_COLLECTION); + writeCollection.update({_id: myServerId}, {$set: {sequence: sequenceId}}, {upsert: true, safe: false}); complete(); } From 85ada6a874d6ec4ce145240fcc75d74f094e04a8 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 17 Sep 2013 18:34:57 -0700 Subject: [PATCH 020/190] doc fetcher! --- packages/mongo-livedata/doc_fetcher.js | 60 ++++++++++++++++++++++++++ packages/mongo-livedata/id_map.js | 16 +++---- packages/mongo-livedata/package.js | 2 +- 3 files changed, 69 insertions(+), 9 deletions(-) create mode 100644 packages/mongo-livedata/doc_fetcher.js diff --git a/packages/mongo-livedata/doc_fetcher.js b/packages/mongo-livedata/doc_fetcher.js new file mode 100644 index 0000000000..11051b6129 --- /dev/null +++ b/packages/mongo-livedata/doc_fetcher.js @@ -0,0 +1,60 @@ +var Future = Npm.require('fibers/future'); + +DocFetcher = function (mongoConnection) { + var self = this; + self._mongoConnection = mongoConnection; + // Map from cache key -> [Future] + self._futuresForCacheKey = {}; +}; + +_.extend(DocFetcher.prototype, { + // Fetches document "id" from collectionName, returning it or null if not + // found. Throws other errors. Can yield. + // + // If you make multiple calls to fetch() with the same cacheKey (a string), + // DocFetcher may assume that they all return the same document. (It does + // not check to see if collectionName/id match.) + fetch: function (collectionName, id, cacheKey) { + var self = this; + + check(collectionName, String); + // id is some sort of scalar + check(cacheKey, String); + + // If there's already an in-progress fetch for this cache key, yield until + // it's done and return whatever it returns. + if (_.has(self._futuresForCacheKey, cacheKey)) { + var f = new Future; + self._futuresForCacheKey.push(f); + return f.wait(); + } + + var futures = self._futuresForCacheKey[cacheKey] = []; + + try { + var doc = self._mongoConnection.findOne( + collectionName, {_id: id}) || null; + // Return doc to all fibers that are blocking on us. Note that this array + // can continue to grow during calls to Future.return. + while (!_.isEmpty(futures)) { + // Clone the document so that the various calls to fetch don't return + // objects that are intertwingled with each other. Clone before popping + // the future, so that if clone throws, the error gets thrown to the + // next future instead of that fiber hanging. + var clonedDoc = EJSON.clone(doc); + futures.pop().return(clonedDoc); + } + } catch (e) { + while (!_.isEmpty(futures)) { + futures.pop().throw(e); + } + throw e; + } finally { + // XXX consider keeping the doc around for a period of time before + // removing from the cache + delete self._futuresForCacheKey[cacheKey]; + } + + return doc; + } +}); diff --git a/packages/mongo-livedata/id_map.js b/packages/mongo-livedata/id_map.js index 160ee505c4..57ee9cd8d9 100644 --- a/packages/mongo-livedata/id_map.js +++ b/packages/mongo-livedata/id_map.js @@ -1,36 +1,36 @@ IdMap = function () { var self = this; - self.map = {}; + self._map = {}; }; _.extend(IdMap.prototype, { get: function (id) { var self = this; var key = LocalCollection._idStringify(id); - return self.map[key]; + return self._map[key]; }, set: function (id, value) { var self = this; var key = LocalCollection._idStringify(id); - self.map[key] = value; + self._map[key] = value; }, remove: function (id) { var self = this; var key = LocalCollection._idStringify(id); - delete self.map[key]; + delete self._map[key]; }, has: function (id) { var self = this; var key = LocalCollection._idStringify(id); - return _.has(self.map, key); + return _.has(self._map, key); }, // XXX used? setDefault: function (id, def) { var self = this; var key = LocalCollection._idStringify(id); - if (_.has(self.map, key)) - return self.map[key]; - self.map[key] = def; + if (_.has(self._map, key)) + return self._map[key]; + self._map[key] = def; return def; } }); diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 3827dbb929..5389cd1157 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -38,7 +38,7 @@ Package.on_use(function (api) { // For tests only. api.export('MongoTest', 'server'); - api.add_files('id_map.js', 'server'); + api.add_files(['id_map.js', 'doc_fetcher.js'], 'server'); api.add_files('mongo_driver.js', 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); From 33528ddbc388e2147a7e7f0089fa8a67253238d0 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 18 Sep 2013 13:41:24 -0700 Subject: [PATCH 021/190] test doc fetcher --- packages/mongo-livedata/doc_fetcher.js | 2 +- packages/mongo-livedata/doc_fetcher_tests.js | 38 ++++++++++++++++++++ packages/mongo-livedata/mongo_driver.js | 3 +- packages/mongo-livedata/package.js | 3 +- 4 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 packages/mongo-livedata/doc_fetcher_tests.js diff --git a/packages/mongo-livedata/doc_fetcher.js b/packages/mongo-livedata/doc_fetcher.js index 11051b6129..a33eabc755 100644 --- a/packages/mongo-livedata/doc_fetcher.js +++ b/packages/mongo-livedata/doc_fetcher.js @@ -25,7 +25,7 @@ _.extend(DocFetcher.prototype, { // it's done and return whatever it returns. if (_.has(self._futuresForCacheKey, cacheKey)) { var f = new Future; - self._futuresForCacheKey.push(f); + self._futuresForCacheKey[cacheKey].push(f); return f.wait(); } diff --git a/packages/mongo-livedata/doc_fetcher_tests.js b/packages/mongo-livedata/doc_fetcher_tests.js new file mode 100644 index 0000000000..cf4e05a8d0 --- /dev/null +++ b/packages/mongo-livedata/doc_fetcher_tests.js @@ -0,0 +1,38 @@ +var Fiber = Npm.require('fibers'); +var Future = Npm.require('fibers/future'); + +Tinytest.add("mongo-livedata - doc fetcher", function (test) { + var collName = "docfetcher-" + Random.id(); + var collection = new Meteor.Collection(collName); + var id1 = collection.insert({x: 1}); + var id2 = collection.insert({y: 2}); + + var fetcher = new MongoTest.DocFetcher( + MongoInternals.defaultRemoteCollectionDriver().mongo); + + // Test basic operation. + test.equal(fetcher.fetch(collName, id1, Random.id()), + {_id: id1, x: 1}); + test.equal(fetcher.fetch(collName, "nonexistent!", Random.id()), null); + + var future = new Future; + var fetched = false; + var cacheKey = Random.id(); + Fiber(function () { + var d = fetcher.fetch(collName, id2, cacheKey); + fetched = true; + future.return(d); + }).run(); + // The fetcher yields: + test.isFalse(fetched); + + // Now ask for another document with the same cache key. Because a fetch for + // that cache key is in flight, we will get the other fetch's document, not + // this random document. + var doc2a = fetcher.fetch(collName, Random.id(), cacheKey); + // Finally, wait for the original fetch to return: + var doc2b = future.wait(); + var expected = {_id: id2, y: 2}; + test.equal(doc2a, expected); + test.equal(doc2b, expected); +}); diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 925cff8b53..19f604b8d0 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1617,5 +1617,6 @@ MongoInternals.Connection = MongoConnection; MongoInternals.NpmModule = MongoDB; MongoTest = { - cursorSupportedByOplogTailing: cursorSupportedByOplogTailing + cursorSupportedByOplogTailing: cursorSupportedByOplogTailing, + DocFetcher: DocFetcher }; diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 5389cd1157..b9179eb05e 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -36,7 +36,7 @@ Package.on_use(function (api) { // Stuff that should be exposed via a real API, but we haven't yet. api.export('MongoInternals', 'server'); // For tests only. - api.export('MongoTest', 'server'); + api.export('MongoTest', 'server', {testOnly: true}); api.add_files(['id_map.js', 'doc_fetcher.js'], 'server'); api.add_files('mongo_driver.js', 'server'); @@ -57,4 +57,5 @@ Package.on_test(function (api) { api.add_files('collection_tests.js', ['client', 'server']); api.add_files('observe_changes_tests.js', ['client', 'server']); api.add_files('oplog_tests.js', 'server'); + api.add_files('doc_fetcher_tests.js', 'server'); }); From 0017eacbe91d776499c19007df858e208bf52b12 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 18 Sep 2013 13:58:13 -0700 Subject: [PATCH 022/190] use docfetcher --- packages/mongo-livedata/mongo_driver.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 19f604b8d0..3e9113d8a9 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -124,6 +124,7 @@ MongoConnection = function (url, connectionOptions) { }).run(); }); + self._docFetcher = new DocFetcher(self); self._oplogHandle = null; // XXX we should NOT be reading directly from the env here (this should be an // argument to MongoConnection eg) but I want to wait for the AppConfig API to @@ -375,10 +376,10 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (isModifier) { // XXX problem is, the result of this findOne is delivered at a random // time, not necessarily synced with other stuff that may be coming down - // the oplog. also, we should coalesce multiple pings of the same - // document ("ID queue"). also, we shouldn't read fields that aren't + // the oplog. also, we shouldn't read fields that aren't // necessary to evaluate selector or to publish. - newDoc = self.findOne(cursorDescription.collectionName, {_id: id}); + newDoc = self._docFetcher.fetch(cursorDescription.collectionName, id, + op.ts.toString()); } else { newDoc = _.extend({_id: id}, op.o); } From c5c19114ab3d05e571c958a3a6f47eff8b2c8cf5 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 18 Sep 2013 17:18:34 -0700 Subject: [PATCH 023/190] refactor oplog stuff into its own file --- packages/mongo-livedata/mongo_driver.js | 179 +++--------------------- packages/mongo-livedata/oplog.js | 145 +++++++++++++++++++ packages/mongo-livedata/package.js | 4 +- 3 files changed, 169 insertions(+), 159 deletions(-) create mode 100644 packages/mongo-livedata/oplog.js diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 3e9113d8a9..65d1aa9335 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -213,6 +213,26 @@ var quotemeta = function (str) { return String(str).replace(/(\W)/g, '\\$1'); }; +// Calls `callback` once the oplog has been processed up to a point that is +// roughly "now". Specifically, it does a dummy write which is then detected +// by the connection's oplog tailer. +// XXX This could be a read instead of a write, getting the last `ts` +// in oplog? +MongoConnection.prototype._callWhenOplogProcessed = function (callback) { + var self = this; + + var sequenceId = nextSequenceId++; + pendingSequences.push({sequenceId: sequenceId, + callback: callback}); + + // Use direct write to Node Mongo driver so we don't end up with recursive + // fence stuff. Need to disable 'safe' because we aren't providing a callback. + var writeCollection = self._getCollection(SEQUENCE_COLLECTION); + writeCollection.update({_id: myServerId}, {$set: {sequence: sequenceId}}, + {upsert: true, safe: false}); +}; + + MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var self = this; @@ -289,161 +309,6 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { }; }; -MongoConnection.prototype._observeChangesWithOplog = function ( - cursorDescription, callbacks) { - var self = this; - - // XXX let's do this with race conditions first! - // - // the real way will involve special oplog handling during the initial cursor - // read. specifically: - // - // 1) start reading the oplog. for every document that could conceivably be - // relevant, cache a bit of information about what we saw. (eg, cache - // document for inserts, removal fact for removes, "needs poll" for updates. - // most recent overrides.) - // - // 2) read the initial set and send added messages. - // - // 3) write a sentinel to some field. - // - // 4) wait until that sentinel comes up through the oplog. - // - // 5) use the cached information (compared to what we already know) to send - // messages about things that changed right about then - // - // 6) now that we're in the "steady state", process ops more directly - - // XXX NOW: replace idSet/changedFields with simply currently published - // results, ok??? that should simplify things, and allow the implementation of - // "replace" (noodles) - - // XXX DOC: map id -> currently published fields - // (which of course is also the same as what is tracked in merge box, - // ah well) - var published = new IdMap; - - var selector = LocalCollection._compileSelector(cursorDescription.selector); - - // XXX add mutates its argument, which could get confusing - var add = function (doc) { - var id = doc._id; - delete doc._id; - published.set(id, doc); - callbacks.added && callbacks.added(id, doc); - }; - - var remove = function (id) { - published.remove(id); - callbacks.removed && callbacks.removed(id); - }; - - // XXX the ordering here is wrong - var initialCursor = new Cursor(self, cursorDescription); - initialCursor.forEach(function (initialDoc) { - add(initialDoc); - }); - - var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { - var id; - if (op.op === 'd') { - // XXX check that ObjectId works here - id = op.o._id; - if (published.has(id)) - remove(id); - - // XXX this needs to cancel any in-progress "ID lookup" for the document - } else if (op.op === 'i') { - id = op.o._id; - if (published.has(id)) - throw new Error("insert found for already-existing ID"); - - // XXX what if selector yields? for now it can't but later it could have - // $where - if (selector(op.o)) { - add(op.o); - } - } else if (op.op === 'u') { - id = op.o2._id; - - // Is this a modifier ($set/$unset, which may require us to poll the - // database to figure out if the whole document matches the selector) or a - // replacement (in which case we can just directly re-evaluate the - // selector)? - var isModifier = _.has(op.o, '$set') || _.has(op.o, '$unset'); - - var newDoc; - if (isModifier) { - // XXX problem is, the result of this findOne is delivered at a random - // time, not necessarily synced with other stuff that may be coming down - // the oplog. also, we shouldn't read fields that aren't - // necessary to evaluate selector or to publish. - newDoc = self._docFetcher.fetch(cursorDescription.collectionName, id, - op.ts.toString()); - } else { - newDoc = _.extend({_id: id}, op.o); - } - - var matchesNow = newDoc && selector(newDoc); - var matchedBefore = published.has(id); - if (matchesNow && !matchedBefore) { - add(newDoc); - } else if (matchedBefore && !matchesNow) { - remove(id); - } else if (matchesNow) { - var oldDoc = published.get(id); - if (!oldDoc) - throw Error("thought that " + id + " was there!"); - delete newDoc._id; - published.set(id, newDoc); - if (callbacks.changed) { - var changed = LocalCollection._makeChangedFields(newDoc, oldDoc); - if (!_.isEmpty(changed)) { - callbacks.changed(id, changed); - } - } - } - } else { - console.log("SURPRISING FOR NOW OPERATION (eg drop collection)", op); - } - }); - - // XXX ordering w.r.t. everything else? - var listenersHandle = listenAll( - cursorDescription, function (notification, complete) { - // If we're not in a write fence, we don't have to do anything. That's - // because - var fence = DDPServer._CurrentWriteFence.get(); - if (!fence) { - complete(); - return; - } - var sequenceId = nextSequenceId++; - var write = fence.beginWrite(); - pendingSequences.push({sequenceId: sequenceId, - callback: function () { - write.committed(); - }}); - - // Use direct write to Node Mongo driver so we don't end up with recursive - // fence stuff. Need to disable 'safe' because we aren't providing a - // callback. - var writeCollection = self._getCollection(SEQUENCE_COLLECTION); - writeCollection.update({_id: myServerId}, {$set: {sequence: sequenceId}}, - {upsert: true, safe: false}); - complete(); - } - ); - - var observeHandle = { - stop: function () { - listenersHandle.stop(); - oplogHandle.stop(); - } - }; - return observeHandle; -}; - //////////// Public API ////////// @@ -858,7 +723,7 @@ var CursorDescription = function (collectionName, selector, options) { self.options = options || {}; }; -var Cursor = function (mongo, cursorDescription) { +Cursor = function (mongo, cursorDescription) { var self = this; self._mongo = mongo; @@ -1212,7 +1077,7 @@ MongoConnection.prototype._observeChanges = function ( // here, so that updates to different specific IDs don't cause us to poll. // listenCallback is the same kind of (notification, complete) callback passed // to InvalidationCrossbar.listen. -var listenAll = function (cursorDescription, listenCallback) { +listenAll = function (cursorDescription, listenCallback) { var listeners = []; var listenOnTrigger = function (trigger) { listeners.push(DDPServer._InvalidationCrossbar.listen( diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js new file mode 100644 index 0000000000..11ef7018db --- /dev/null +++ b/packages/mongo-livedata/oplog.js @@ -0,0 +1,145 @@ +MongoConnection.prototype._observeChangesWithOplog = function ( + cursorDescription, callbacks) { + var self = this; + + // XXX let's do this with race conditions first! + // + // the real way will involve special oplog handling during the initial cursor + // read. specifically: + // + // 1) start reading the oplog. for every document that could conceivably be + // relevant, cache a bit of information about what we saw. (eg, cache + // document for inserts, removal fact for removes, "needs poll" for updates. + // most recent overrides.) + // + // 2) read the initial set and send added messages. + // + // 3) write a sentinel to some field. + // + // 4) wait until that sentinel comes up through the oplog. + // + // 5) use the cached information (compared to what we already know) to send + // messages about things that changed right about then + // + // 6) now that we're in the "steady state", process ops more directly + + // XXX NOW: replace idSet/changedFields with simply currently published + // results, ok??? that should simplify things, and allow the implementation of + // "replace" (noodles) + + // XXX DOC: map id -> currently published fields + // (which of course is also the same as what is tracked in merge box, + // ah well) + var published = new IdMap; + + var selector = LocalCollection._compileSelector(cursorDescription.selector); + + // XXX add mutates its argument, which could get confusing + var add = function (doc) { + var id = doc._id; + delete doc._id; + published.set(id, doc); + callbacks.added && callbacks.added(id, doc); + }; + + var remove = function (id) { + published.remove(id); + callbacks.removed && callbacks.removed(id); + }; + + // XXX the ordering here is wrong + var initialCursor = new Cursor(self, cursorDescription); + initialCursor.forEach(function (initialDoc) { + add(initialDoc); + }); + + var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { + var id; + if (op.op === 'd') { + // XXX check that ObjectId works here + id = op.o._id; + if (published.has(id)) + remove(id); + + // XXX this needs to cancel any in-progress "ID lookup" for the document + } else if (op.op === 'i') { + id = op.o._id; + if (published.has(id)) + throw new Error("insert found for already-existing ID"); + + // XXX what if selector yields? for now it can't but later it could have + // $where + if (selector(op.o)) { + add(op.o); + } + } else if (op.op === 'u') { + id = op.o2._id; + + // Is this a modifier ($set/$unset, which may require us to poll the + // database to figure out if the whole document matches the selector) or a + // replacement (in which case we can just directly re-evaluate the + // selector)? + var isModifier = _.has(op.o, '$set') || _.has(op.o, '$unset'); + + var newDoc; + if (isModifier) { + // XXX problem is, the result of this findOne is delivered at a random + // time, not necessarily synced with other stuff that may be coming down + // the oplog. also, we shouldn't read fields that aren't + // necessary to evaluate selector or to publish. + newDoc = self._docFetcher.fetch(cursorDescription.collectionName, id, + op.ts.toString()); + } else { + newDoc = _.extend({_id: id}, op.o); + } + + var matchesNow = newDoc && selector(newDoc); + var matchedBefore = published.has(id); + if (matchesNow && !matchedBefore) { + add(newDoc); + } else if (matchedBefore && !matchesNow) { + remove(id); + } else if (matchesNow) { + var oldDoc = published.get(id); + if (!oldDoc) + throw Error("thought that " + id + " was there!"); + delete newDoc._id; + published.set(id, newDoc); + if (callbacks.changed) { + var changed = LocalCollection._makeChangedFields(newDoc, oldDoc); + if (!_.isEmpty(changed)) { + callbacks.changed(id, changed); + } + } + } + } else { + console.log("SURPRISING FOR NOW OPERATION (eg drop collection)", op); + } + }); + + // XXX ordering w.r.t. everything else? + var listenersHandle = listenAll( + cursorDescription, function (notification, complete) { + // If we're not in a write fence, we don't have to do anything. That's + // because + var fence = DDPServer._CurrentWriteFence.get(); + if (!fence) { + complete(); + return; + } + var write = fence.beginWrite(); + self._callWhenOplogProcessed(function () { + write.committed(); + }); + complete(); + } + ); + + var observeHandle = { + stop: function () { + listenersHandle.stop(); + oplogHandle.stop(); + } + }; + return observeHandle; +}; diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index b9179eb05e..4fbf18d2a4 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -38,8 +38,8 @@ Package.on_use(function (api) { // For tests only. api.export('MongoTest', 'server', {testOnly: true}); - api.add_files(['id_map.js', 'doc_fetcher.js'], 'server'); - api.add_files('mongo_driver.js', 'server'); + api.add_files(['id_map.js', 'doc_fetcher.js', 'mongo_driver.js', + 'oplog.js'], 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); api.add_files('collection.js', ['client', 'server']); From fbfda21dfbe5778f1e4813e54eac69afb597f67e Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 18 Sep 2013 18:51:35 -0700 Subject: [PATCH 024/190] whoa, we're halfway there --- packages/mongo-livedata/id_map.js | 4 + packages/mongo-livedata/mongo_driver.js | 6 +- packages/mongo-livedata/oplog.js | 139 +++++++++++++----------- 3 files changed, 81 insertions(+), 68 deletions(-) diff --git a/packages/mongo-livedata/id_map.js b/packages/mongo-livedata/id_map.js index 57ee9cd8d9..9510797731 100644 --- a/packages/mongo-livedata/id_map.js +++ b/packages/mongo-livedata/id_map.js @@ -24,6 +24,10 @@ _.extend(IdMap.prototype, { var key = LocalCollection._idStringify(id); return _.has(self._map, key); }, + isEmpty: function () { + var self = this; + return _.isEmpty(self._map); + }, // XXX used? setDefault: function (id, def) { var self = this; diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 65d1aa9335..2610ee4fc4 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -244,10 +244,8 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var oplogSelector = { ns: new RegExp('^' + quotemeta(dbName) + '\\.'), - $or: [ - {op: {$in: ['i', 'u', 'd']}}, - {op: 'c', 'o.drop': {$exists: true}} - ] + // XXX also handle drop collection, etc + op: {$in: ['i', 'u', 'd']} }; if (lastOplogEntry) oplogSelector.ts = {$gt: lastOplogEntry.ts}; diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 11ef7018db..f22814fcb5 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -1,45 +1,39 @@ +var Future = Npm.require('fibers/future'); + +var PHASE = { + INITIALIZING: 1, + FETCHING: 2, + STEADY: 3 +}; + +var idForOp = function (op) { + if (op.op === 'd') + return op.o._id; + else if (op.op === 'i') + return op.o._id; + else if (op.op === 'u') + return op.o2._id; + else + throw Error("Unknown op: " + EJSON.stringify(op)); +}; + MongoConnection.prototype._observeChangesWithOplog = function ( cursorDescription, callbacks) { var self = this; - // XXX let's do this with race conditions first! - // - // the real way will involve special oplog handling during the initial cursor - // read. specifically: - // - // 1) start reading the oplog. for every document that could conceivably be - // relevant, cache a bit of information about what we saw. (eg, cache - // document for inserts, removal fact for removes, "needs poll" for updates. - // most recent overrides.) - // - // 2) read the initial set and send added messages. - // - // 3) write a sentinel to some field. - // - // 4) wait until that sentinel comes up through the oplog. - // - // 5) use the cached information (compared to what we already know) to send - // messages about things that changed right about then - // - // 6) now that we're in the "steady state", process ops more directly + var phase = PHASE.INITIALIZING; - // XXX NOW: replace idSet/changedFields with simply currently published - // results, ok??? that should simplify things, and allow the implementation of - // "replace" (noodles) - - // XXX DOC: map id -> currently published fields - // (which of course is also the same as what is tracked in merge box, - // ah well) var published = new IdMap; - var selector = LocalCollection._compileSelector(cursorDescription.selector); - // XXX add mutates its argument, which could get confusing + var curiousity = new IdMap; + var add = function (doc) { var id = doc._id; - delete doc._id; - published.set(id, doc); - callbacks.added && callbacks.added(id, doc); + var fields = EJSON.clone(doc); + delete fields._id; + published.set(id, fields); + callbacks.added && callbacks.added(id, EJSON.clone(fields)); }; var remove = function (id) { @@ -47,53 +41,46 @@ MongoConnection.prototype._observeChangesWithOplog = function ( callbacks.removed && callbacks.removed(id); }; - // XXX the ordering here is wrong - var initialCursor = new Cursor(self, cursorDescription); - initialCursor.forEach(function (initialDoc) { - add(initialDoc); - }); + var beCurious = function () { + throw Error("I AM CURIOUS") + }; - var oplogHandle = self._oplogHandle.onOplogEntry(cursorDescription.collectionName, function (op) { - var id; + var oplogEntryHandlers = {}; + oplogEntryHandlers[PHASE.INITIALIZING] = function (op) { + curiousity.set(idForOp(op), op.ts.toString()); + }; + oplogEntryHandlers[PHASE.FETCHING] = function (op) { + // XXX now + }; + oplogEntryHandlers[PHASE.STEADY] = function (op) { + var id = idForOp(op); if (op.op === 'd') { - // XXX check that ObjectId works here - id = op.o._id; if (published.has(id)) remove(id); - - // XXX this needs to cancel any in-progress "ID lookup" for the document } else if (op.op === 'i') { - id = op.o._id; if (published.has(id)) throw new Error("insert found for already-existing ID"); // XXX what if selector yields? for now it can't but later it could have // $where - if (selector(op.o)) { + if (selector(op.o)) add(op.o); - } } else if (op.op === 'u') { - id = op.o2._id; - // Is this a modifier ($set/$unset, which may require us to poll the // database to figure out if the whole document matches the selector) or a // replacement (in which case we can just directly re-evaluate the // selector)? var isModifier = _.has(op.o, '$set') || _.has(op.o, '$unset'); - var newDoc; if (isModifier) { - // XXX problem is, the result of this findOne is delivered at a random - // time, not necessarily synced with other stuff that may be coming down - // the oplog. also, we shouldn't read fields that aren't - // necessary to evaluate selector or to publish. - newDoc = self._docFetcher.fetch(cursorDescription.collectionName, id, - op.ts.toString()); - } else { - newDoc = _.extend({_id: id}, op.o); + curiousity.set(id, op.ts.toString()); + phase = PHASE.FETCHING; + beCurious(); + return; } - var matchesNow = newDoc && selector(newDoc); + var newDoc = _.extend({_id: id}, op.o); + var matchesNow = selector(newDoc); var matchedBefore = published.has(id); if (matchesNow && !matchedBefore) { add(newDoc); @@ -107,15 +94,21 @@ MongoConnection.prototype._observeChangesWithOplog = function ( published.set(id, newDoc); if (callbacks.changed) { var changed = LocalCollection._makeChangedFields(newDoc, oldDoc); - if (!_.isEmpty(changed)) { + if (!_.isEmpty(changed)) callbacks.changed(id, changed); - } } } } else { - console.log("SURPRISING FOR NOW OPERATION (eg drop collection)", op); + throw Error("XXX SURPRISING OPERATION: " + op); } - }); + }; + + + var oplogHandle = self._oplogHandle.onOplogEntry( + cursorDescription.collectionName, function (op) { + oplogEntryHandlers[phase](op); + } + ); // XXX ordering w.r.t. everything else? var listenersHandle = listenAll( @@ -135,11 +128,29 @@ MongoConnection.prototype._observeChangesWithOplog = function ( } ); - var observeHandle = { + var initialCursor = new Cursor(self, cursorDescription); + initialCursor.forEach(function (initialDoc) { + add(initialDoc); + }); + + var catchUpFuture = new Future; + self._callWhenOplogProcessed(catchUpFuture.resolver()); + catchUpFuture.wait(); + + if (phase !== PHASE.INITIALIZING) + throw Error("Phase unexpectedly " + phase); + + if (curiousity.isEmpty()) { + phase = PHASE.STEADY; + } else { + phase = PHASE.FETCHING; + Meteor.defer(beCurious); + } + + return { stop: function () { listenersHandle.stop(); oplogHandle.stop(); } }; - return observeHandle; }; From ff000110a01aa72c183f14f739e6e2806f96f2e1 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 19 Sep 2013 10:23:43 -0700 Subject: [PATCH 025/190] only one mongo-livedata test fails --- packages/mongo-livedata/id_map.js | 11 +++++ packages/mongo-livedata/oplog.js | 79 ++++++++++++++++++++++--------- 2 files changed, 68 insertions(+), 22 deletions(-) diff --git a/packages/mongo-livedata/id_map.js b/packages/mongo-livedata/id_map.js index 9510797731..cf513fa49c 100644 --- a/packages/mongo-livedata/id_map.js +++ b/packages/mongo-livedata/id_map.js @@ -28,6 +28,17 @@ _.extend(IdMap.prototype, { var self = this; return _.isEmpty(self._map); }, + clear: function () { + var self = this; + self._map = {}; + }, + each: function (iterator) { + var self = this; + _.each(self._map, function (value, key, obj) { + var context = this; + iterator.call(context, value, LocalCollection._idParse(key), obj); + }); + }, // XXX used? setDefault: function (id, def) { var self = this; diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index f22814fcb5..0ffe9739bd 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -1,3 +1,4 @@ +var Fiber = Npm.require('fibers'); var Future = Npm.require('fibers/future'); var PHASE = { @@ -41,8 +42,57 @@ MongoConnection.prototype._observeChangesWithOplog = function ( callbacks.removed && callbacks.removed(id); }; + // XXX mutates newDoc, that's weird + var handleDoc = function (id, newDoc) { + var matchesNow = newDoc && selector(newDoc); + var matchedBefore = published.has(id); + if (matchesNow && !matchedBefore) { + add(newDoc); + } else if (matchedBefore && !matchesNow) { + remove(id); + } else if (matchesNow) { + var oldDoc = published.get(id); + if (!oldDoc) + throw Error("thought that " + id + " was there!"); + delete newDoc._id; + published.set(id, newDoc); + if (callbacks.changed) { + var changed = LocalCollection._makeChangedFields( + EJSON.clone(newDoc), oldDoc); + if (!_.isEmpty(changed)) + callbacks.changed(id, changed); + } + } + }; + var beCurious = function () { - throw Error("I AM CURIOUS") + phase = PHASE.FETCHING; + while (!curiousity.isEmpty()) { + if (phase !== PHASE.FETCHING) + throw new Error("Surprising phase in beCurious: " + phase); + + var futures = []; + curiousity.each(function (cacheKey, id) { + // Run each until they yield. This implies that curiousity should not be + // updated during this loop. + Fiber(function () { + var f = new Future; + futures.push(f); + var doc = self._docFetcher.fetch(cursorDescription.collectionName, id, + cacheKey); + handleDoc(id, doc); + f.return(); + }).run(); + }); + curiousity.clear(); + Future.wait(futures); + // Throw if any throw. + // XXX this means the observe will now be stalled + _.each(futures, function (f) { + f.get(); + }); + } + phase = PHASE.STEADY; }; var oplogEntryHandlers = {}; @@ -50,7 +100,9 @@ MongoConnection.prototype._observeChangesWithOplog = function ( curiousity.set(idForOp(op), op.ts.toString()); }; oplogEntryHandlers[PHASE.FETCHING] = function (op) { - // XXX now + // XXX we can probably actually handle some operations directly (eg, + // insert/remove/replace if they don't conflict with "outstanding" fetches) + curiousity.set(idForOp(op), op.ts.toString()); }; oplogEntryHandlers[PHASE.STEADY] = function (op) { var id = idForOp(op); @@ -74,30 +126,12 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (isModifier) { curiousity.set(id, op.ts.toString()); - phase = PHASE.FETCHING; beCurious(); return; } - var newDoc = _.extend({_id: id}, op.o); - var matchesNow = selector(newDoc); - var matchedBefore = published.has(id); - if (matchesNow && !matchedBefore) { - add(newDoc); - } else if (matchedBefore && !matchesNow) { - remove(id); - } else if (matchesNow) { - var oldDoc = published.get(id); - if (!oldDoc) - throw Error("thought that " + id + " was there!"); - delete newDoc._id; - published.set(id, newDoc); - if (callbacks.changed) { - var changed = LocalCollection._makeChangedFields(newDoc, oldDoc); - if (!_.isEmpty(changed)) - callbacks.changed(id, changed); - } - } + + handleDoc(id, newDoc); } else { throw Error("XXX SURPRISING OPERATION: " + op); } @@ -121,6 +155,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( return; } var write = fence.beginWrite(); + // XXX this also has to wait for steady!!! self._callWhenOplogProcessed(function () { write.committed(); }); From ab34a277fc1fdb438f551a5b415ace0b919c1d3d Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 19 Sep 2013 10:26:55 -0700 Subject: [PATCH 026/190] make test do what it says --- packages/mongo-livedata/observe_changes_tests.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/mongo-livedata/observe_changes_tests.js b/packages/mongo-livedata/observe_changes_tests.js index 94efe37af8..2b01302728 100644 --- a/packages/mongo-livedata/observe_changes_tests.js +++ b/packages/mongo-livedata/observe_changes_tests.js @@ -20,7 +20,7 @@ _.each ([{added:'added', forceOrdered: true}, if (forceOrdered) callbacks.push("movedBefore"); withCallbackLogger(test, - [added, "changed", "removed"], + callbacks, Meteor.isServer, function (logger) { var barid = c.insert({thing: "stuff"}); From cd0bdecaecdbccb775d1721bc7115ca8517db086 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 19 Sep 2013 10:59:11 -0700 Subject: [PATCH 027/190] keep prototype on timestamps when they are cloned (all tests pass) --- packages/mongo-livedata/mongo_driver.js | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 2610ee4fc4..aca94e98dc 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -28,6 +28,14 @@ var replaceNames = function (filter, thing) { return thing; }; +// Ensure that EJSON.clone keeps a Timestamp as a Timestamp (instead of just +// doing a structural clone). +// XXX how ok is this? what if there are multiple copies of MongoDB loaded? +MongoDB.Timestamp.prototype.clone = function () { + // Timestamps should be immutable. + return this; +}; + var makeMongoLegal = function (name) { return "EJSON" + name; }; var unmakeMongoLegal = function (name) { return name.substr(5); }; @@ -42,6 +50,13 @@ var replaceMongoAtomWithMeteor = function (document) { if (document["EJSON$type"] && document["EJSON$value"]) { return EJSON.fromJSONValue(replaceNames(unmakeMongoLegal, document)); } + if (document instanceof MongoDB.Timestamp) { + // For now, the Meteor representation of a Mongo timestamp type (not a date! + // this is a weird internal thing used in the oplog!) is the same as the + // Mongo representation. We need to do this explicitly or else we would do a + // structural clone and lose the prototype. + return document; + } return undefined; }; @@ -54,7 +69,15 @@ var replaceMeteorAtomWithMongo = function (document) { } if (document instanceof Meteor.Collection.ObjectID) { return new MongoDB.ObjectID(document.toHexString()); - } else if (EJSON._isCustomType(document)) { + } + if (document instanceof MongoDB.Timestamp) { + // For now, the Meteor representation of a Mongo timestamp type (not a date! + // this is a weird internal thing used in the oplog!) is the same as the + // Mongo representation. We need to do this explicitly or else we would do a + // structural clone and lose the prototype. + return document; + } + if (EJSON._isCustomType(document)) { return replaceNames(makeMongoLegal, EJSON.toJSONValue(document)); } // It is not ordinarily possible to stick dollar-sign keys into mongo From 40befb802366b8cfe5550746ccd1183d4f74c82b Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 20 Sep 2013 16:20:23 -0700 Subject: [PATCH 028/190] minor improvements --- packages/mongo-livedata/oplog.js | 15 ++++++++++----- tools/mongo_runner.js | 29 ++++++++++++++++------------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 0ffe9739bd..96f70c34d0 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -13,7 +13,7 @@ var idForOp = function (op) { else if (op.op === 'i') return op.o._id; else if (op.op === 'u') - return op.o2._id; + return op.o2._id; else throw Error("Unknown op: " + EJSON.stringify(op)); }; @@ -27,6 +27,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var published = new IdMap; var selector = LocalCollection._compileSelector(cursorDescription.selector); + // XXX eliminate "curious" name var curiousity = new IdMap; var add = function (doc) { @@ -72,7 +73,9 @@ MongoConnection.prototype._observeChangesWithOplog = function ( throw new Error("Surprising phase in beCurious: " + phase); var futures = []; - curiousity.each(function (cacheKey, id) { + var currentlyFetching = curiousity; + curiousity = new IdMap; + currentlyFetching.each(function (cacheKey, id) { // Run each until they yield. This implies that curiousity should not be // updated during this loop. Fiber(function () { @@ -84,7 +87,6 @@ MongoConnection.prototype._observeChangesWithOplog = function ( f.return(); }).run(); }); - curiousity.clear(); Future.wait(futures); // Throw if any throw. // XXX this means the observe will now be stalled @@ -125,13 +127,16 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var isModifier = _.has(op.o, '$set') || _.has(op.o, '$unset'); if (isModifier) { + // XXX in many cases, we can just apply the modifier directly (eg, if we + // are already publishing the document). + // XXX for not-currently-published docs, if we can guarantee the + // irrelevance of the change, we can skip it curiousity.set(id, op.ts.toString()); beCurious(); return; } - var newDoc = _.extend({_id: id}, op.o); - handleDoc(id, newDoc); + handleDoc(id, _.extend({_id: id}, op.o)); } else { throw Error("XXX SURPRISING OPERATION: " + op); } diff --git a/tools/mongo_runner.js b/tools/mongo_runner.js index bb901ac202..4581d28c5f 100644 --- a/tools/mongo_runner.js +++ b/tools/mongo_runner.js @@ -229,18 +229,16 @@ exports.launchMongo = function (options) { }; proc.stdout.setEncoding('utf8'); - var readyMessages = 2; - proc.stdout.on('data', function (data) { - // process.stdout.write("MONGO SAYS: " + data); - - if (/ \[rsMgr\] replSet PRIMARY/.test(data)) { - if (--readyMessages === 0) { - if (createReplSet) - fs.writeFileSync(portFile, options.port); - onListen(); - } + var listening = false; + var replSetReady = false; + var maybeCallOnListen = function () { + if (listening && replSetReady) { + if (createReplSet) + fs.writeFileSync(portFile, options.port); + onListen(); } - + }; + proc.stdout.on('data', function (data) { if (/ \[initandlisten\] waiting for connections on port/.test(data)) { if (createReplSet) { // Connect to it and start a replset. @@ -261,8 +259,13 @@ exports.launchMongo = function (options) { }); }); } - if (--readyMessages === 0) - onListen(); + listening = true; + maybeCallOnListen(); + } + + if (/ \[rsMgr\] replSet PRIMARY/.test(data)) { + replSetReady = true; + maybeCallOnListen(); } }); }).run(); From 9cbb5946c4f43aa40b5d433645c861a9216a8c3c Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 20 Sep 2013 16:34:07 -0700 Subject: [PATCH 029/190] don't poll the database if we can just execute the modifier directly --- packages/mongo-livedata/oplog.js | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 96f70c34d0..e3ed2841e5 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -124,19 +124,25 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // database to figure out if the whole document matches the selector) or a // replacement (in which case we can just directly re-evaluate the // selector)? - var isModifier = _.has(op.o, '$set') || _.has(op.o, '$unset'); + var isReplace = !_.has(op.o, '$set') && !_.has(op.o, '$unset'); - if (isModifier) { - // XXX in many cases, we can just apply the modifier directly (eg, if we - // are already publishing the document). + if (isReplace) { + handleDoc(id, _.extend({_id: id}, op.o)); + } else if (published.has(id)) { + // Oh great, we actually know what the document is, so we can apply + // this directly. + // XXX this assumes no field filtering + var newDoc = EJSON.clone(published.get(id)); + newDoc._id = id; + LocalCollection._modify(newDoc, op.o); + handleDoc(id, newDoc); + } else { // XXX for not-currently-published docs, if we can guarantee the // irrelevance of the change, we can skip it curiousity.set(id, op.ts.toString()); beCurious(); return; } - - handleDoc(id, _.extend({_id: id}, op.o)); } else { throw Error("XXX SURPRISING OPERATION: " + op); } From 7f85f38110e2b53347e332e7d2b4af3adbd2d831 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 3 Oct 2013 14:56:38 -0700 Subject: [PATCH 030/190] Change repl set name from dummy to meteor. This name ends up in the "meteor mongo" prompt, and a prompt of "dummy:PRIMARY>" is way less welcoming than "meteor:PRIMARY>". --- tools/mongo_runner.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/mongo_runner.js b/tools/mongo_runner.js index 4581d28c5f..cc56f4e369 100644 --- a/tools/mongo_runner.js +++ b/tools/mongo_runner.js @@ -197,7 +197,7 @@ exports.launchMongo = function (options) { // Start mongod with a dummy replSet and wait for it to listen. var child_process = require('child_process'); - var replSetName = 'dummy'; + var replSetName = 'meteor'; var proc = child_process.spawn(mongod_path, [ // nb: cli-test.sh and find_mongo_pids assume that the next four arguments // exist in this order without anything in between From dbbda54c682e0952a1f2b1edde36f402d0ca607c Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 3 Oct 2013 16:12:27 -0700 Subject: [PATCH 031/190] First implementation of _isSelectorAffectedByModifier. --- packages/minimongo/selector.js | 37 ++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index 3a1e5394a8..4bdead6640 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -786,3 +786,40 @@ LocalCollection._compileSort = function (spec, cursor) { }; }; +// Returns true if the modifier applied to some document may change the result +// of matching the document by selector +// The modifier is always in a form of Object: +// - $set +// - 'a.b.22.z': value +// - 'foo.bar': 42 +// - $unset +// - 'abc.d': 1 +LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { + // safe check for $set/$unset being objects + modifier = _.extend({ $set: {}, $unset: {} }, modifier); + var modifiedPaths = _.keys(modifier.$set).concat(_.keys(modifier.$unset)); + var meaningfulPaths = getPaths(selector); + return _.any(modifiedPaths, function (path) { + path = removeNumericsKeys(path); + return _.any(meaningfulPaths, function (meaningfulPath) { + // It's full prefix + return path.indexOf(meaningfulPath) === 0; + }); + }); + + function removeNumericsKeys (path) { + return _.filter(path.split('.'), isNaN).join('.'); + } + + function getPaths (sel, parentKeys) { + parentKeys = parentKeys || []; + return _.chain(sel).map(function (v, k) { + // we don't know how to handle $where because it can be anything + if (k === "$where") + return ''; // matches everything + if (_.has(LOGICAL_OPERATORS, k)) + return getPaths(v, parentKeys.concat(k)); + return parentKeys.concat(k).join('.'); + }).flatten().uniq().value(); + } +}; From c3c79a94eae44175a96f36dc3b8a05041e47570b Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 3 Oct 2013 17:28:04 -0700 Subject: [PATCH 032/190] Tests for getPaths for selector and fix for a bug these tests caught --- packages/minimongo/minimongo_tests.js | 34 +++++++++++++++++++++++++++ packages/minimongo/selector.js | 24 ++++++++++--------- 2 files changed, 47 insertions(+), 11 deletions(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 3a2d2260eb..5b3e1a5dd0 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2380,3 +2380,37 @@ Tinytest.add("minimongo - $near operator tests", function (test) { }); }); +Tinytest.add("minimongo - modifier affects selector", function (test) { + function testSelectorPaths (sel, paths, desc) { + test.isTrue(_.isEqual(LocalCollection._getSelectorPaths(sel), paths), desc); + } + + testSelectorPaths({ + foo: { + bar: 3, + baz: 42 + } + }, ['foo'], "literal"); + + testSelectorPaths({ + foo: 42, + bar: 33 + }, ['foo', 'bar'], "literal"); + + testSelectorPaths({ + foo: [ 'something' ], + bar: "asdf" + }, ['foo', 'bar'], "literal"); + + testSelectorPaths({ + a: { $lt: 3 }, + b: "you know, literal", + 'path.is.complicated': { $not: { $regex: 'acme.*corp' } } + }, ['a', 'b', 'path.is.complicated'], "literal + operators"); + + testSelectorPaths({ + $or: [{ 'a.b': 1 }, { 'a.b.c': { $lt: 22 } }, + {$and: [{ 'x.d': { $ne: 5, $gte: 433 } }, { 'a.b': 234 }]}] + }, ['a.b', 'a.b.c', 'x.d'], 'group operators + duplicates'); +}); + diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index 4bdead6640..74729b7152 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -811,15 +811,17 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { return _.filter(path.split('.'), isNaN).join('.'); } - function getPaths (sel, parentKeys) { - parentKeys = parentKeys || []; - return _.chain(sel).map(function (v, k) { - // we don't know how to handle $where because it can be anything - if (k === "$where") - return ''; // matches everything - if (_.has(LOGICAL_OPERATORS, k)) - return getPaths(v, parentKeys.concat(k)); - return parentKeys.concat(k).join('.'); - }).flatten().uniq().value(); - } +}; + +// Returns a list of key paths the given selector is looking for +var getPaths = LocalCollection._getSelectorPaths = function (sel, parentKeys) { + parentKeys = parentKeys || []; + return _.chain(sel).map(function (v, k) { + // we don't know how to handle $where because it can be anything + if (k === "$where") + return ''; // matches everything + if (_.has(LOGICAL_OPERATORS, k)) + return _.map(v, function (x) { return getPaths(x, parentKeys); }); + return parentKeys.concat(k).join('.'); + }).flatten().uniq().value(); }; From 480c353c65dd3e1c5516ec8b1967ac22b481f25e Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 3 Oct 2013 17:43:49 -0700 Subject: [PATCH 033/190] Some simplest tests for isSelectorAffectedByModifier and some tests fail :( --- packages/minimongo/minimongo_tests.js | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 5b3e1a5dd0..8288bc8baa 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2412,5 +2412,26 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { $or: [{ 'a.b': 1 }, { 'a.b.c': { $lt: 22 } }, {$and: [{ 'x.d': { $ne: 5, $gte: 433 } }, { 'a.b': 234 }]}] }, ['a.b', 'a.b.c', 'x.d'], 'group operators + duplicates'); + + function testSelectorAffectedByModifier (sel, mod, yes, desc) { + if (yes) + test.isTrue(LocalCollection._isSelectorAffectedByModifier(sel, mod, desc)); + else + test.isFalse(LocalCollection._isSelectorAffectedByModifier(sel, mod, desc)); + } + + function affected(sel, mod, desc) { + testSelectorAffectedByModifier(sel, mod, 1, desc); + } + function notAffected(sel, mod, desc) { + testSelectorAffectedByModifier(sel, mod, 0, desc); + } + + notAffected({ foo: 0 }, { $set: { bar: 1 } }, "simplest"); + affected({ foo: 0 }, { $set: { foo: 1 } }, "simplest"); + affected({ foo: 0 }, { $set: { 'foo.bar': 1 } }, "simplest"); + notAffected({ 'foo.bar': 0 }, { $set: { 'foo.baz': 1 } }, "simplest"); + affected({ 'foo.bar': 0 }, { $set: { 'foo.1': 1 } }, "simplest"); + affected({ 'foo.bar': 0 }, { $set: { 'foo.2.bar': 1 } }, "simplest"); }); From 11bf813f90c0629900bc44e2a959b333fd9214cf Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 3 Oct 2013 19:38:33 -0700 Subject: [PATCH 034/190] If foo changes, than foo.bar observation result may change. --- packages/minimongo/selector.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index 74729b7152..a49385127b 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -803,7 +803,8 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { path = removeNumericsKeys(path); return _.any(meaningfulPaths, function (meaningfulPath) { // It's full prefix - return path.indexOf(meaningfulPath) === 0; + return path.indexOf(meaningfulPath) === 0 + || meaningfulPath.indexOf(path) === 0; }); }); From a8201cc17e64f083bd35a4e53c05b9c4dff03c48 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 7 Oct 2013 18:09:53 -0700 Subject: [PATCH 035/190] Block on first call to observeChanges if there is an oplog handle that is still initializing. --- packages/mongo-livedata/mongo_driver.js | 145 ++++++++++++++---------- 1 file changed, 83 insertions(+), 62 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index aca94e98dc..815c4a1636 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -154,10 +154,7 @@ MongoConnection = function (url, connectionOptions) { // settle a little before thinking too hard about this if (process.env.XXX_OPLOG_URL && !connectionOptions.isOplog) { var dbName = Npm.require('url').parse(url).pathname.substr(1); - // Defer this, because it blocks. If we start observing cursors before the - // oplog handle is ready, they just don't get to use the oplog. - Meteor.defer(_.bind(self._startOplogTailing, - self, process.env.XXX_OPLOG_URL, dbName)); + self._startOplogTailing(process.env.XXX_OPLOG_URL, dbName); } }; @@ -259,75 +256,99 @@ MongoConnection.prototype._callWhenOplogProcessed = function (callback) { MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var self = this; - var oplogConnection = new MongoConnection(oplogUrl, {isOplog: true}); - // Find the last oplog entry. Blocks until the connection is ready. - - var lastOplogEntry = oplogConnection.findOne( - OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); - - var oplogSelector = { - ns: new RegExp('^' + quotemeta(dbName) + '\\.'), - // XXX also handle drop collection, etc - op: {$in: ['i', 'u', 'd']} - }; - if (lastOplogEntry) - oplogSelector.ts = {$gt: lastOplogEntry.ts}; - - var cursorDescription = new CursorDescription( - OPLOG_COLLECTION, oplogSelector, {tailable: true}); - + var stopped = false; + var tailHandle = null; + var readyFuture = new Future(); + var nextId = 0; var callbacksByCollection = {}; - var processSequence = function (doc) { - if (doc.op !== 'i' && doc.op !== 'u') - return; - var serverId = (doc.op === 'i' ? doc.o._id : doc.o2._id); - if (serverId !== myServerId) - return; - var sequenceId = - (doc.op === 'i' ? doc.o.sequence : (doc.o.$set && doc.o.$set.sequence)); - if (typeof sequenceId !== 'number') - return; - // Process all sequence points up to this point. - while (!_.isEmpty(pendingSequences) - && pendingSequences[0].sequenceId <= sequenceId) { - var sequence = pendingSequences.shift(); - sequence.callback(); + self._oplogHandle = { + stop: function () { + if (stopped) + return; + stopped = true; + if (tailHandle) + tailHandle.stop(); + }, + + onOplogEntry: function (collectionName, callback) { + if (stopped) + throw new Error("Called onOplogEntry on stopped handle!"); + + // Calling onOplogEntry requires us to wait for the tailing to be ready. + readyFuture.wait(); + + callback = Meteor.bindEnvironment(callback, function (err) { + Meteor._debug("Error in oplog callback", err.stack); + }); + if (!_.has(callbacksByCollection, collectionName)) + callbacksByCollection[collectionName] = {}; + var callbackId = nextId++; + callbacksByCollection[collectionName][callbackId] = callback; + return { + stop: function () { + delete callbacksByCollection[collectionName][callbackId]; + } + }; } }; - self._oplogHandle = oplogConnection.tail(cursorDescription, function (doc) { - if (!doc.ns && doc.ns.length > dbName.length + 1 && - doc.ns.substr(0, dbName.length + 1) === (dbName + '.')) - throw new Error("Unexpected ns"); + // Actually setting up the connection and tail blocks, so we do it "later". + Meteor.defer(function () { + var oplogConnection = new MongoConnection(oplogUrl, {isOplog: true}); - var collectionName = doc.ns.substr(dbName.length + 1); + // Find the last oplog entry. Blocks until the connection is ready. + var lastOplogEntry = oplogConnection.findOne( + OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); - if (collectionName === SEQUENCE_COLLECTION) { - processSequence(doc); - return; - } + var oplogSelector = { + ns: new RegExp('^' + quotemeta(dbName) + '\\.'), + // XXX also handle drop collection, etc + op: {$in: ['i', 'u', 'd']} + }; + if (lastOplogEntry) + oplogSelector.ts = {$gt: lastOplogEntry.ts}; - _.each(callbacksByCollection[collectionName], function (callback) { - callback(EJSON.clone(doc)); - }); - }); + var cursorDescription = new CursorDescription( + OPLOG_COLLECTION, oplogSelector, {tailable: true}); - var nextId = 0; - self._oplogHandle.onOplogEntry = function (collectionName, callback) { - callback = Meteor.bindEnvironment(callback, function (err) { - Meteor._debug("Error in oplog callback", err.stack); - }); - if (!_.has(callbacksByCollection, collectionName)) - callbacksByCollection[collectionName] = {}; - var callbackId = nextId++; - callbacksByCollection[collectionName][callbackId] = callback; - return { - stop: function () { - delete callbacksByCollection[collectionName][callbackId]; + var processSequence = function (doc) { + if (doc.op !== 'i' && doc.op !== 'u') + return; + var serverId = (doc.op === 'i' ? doc.o._id : doc.o2._id); + if (serverId !== myServerId) + return; + var sequenceId = + (doc.op === 'i' ? doc.o.sequence : + (doc.o.$set && doc.o.$set.sequence)); + if (typeof sequenceId !== 'number') + return; + // Process all sequence points up to this point. + while (!_.isEmpty(pendingSequences) + && pendingSequences[0].sequenceId <= sequenceId) { + var sequence = pendingSequences.shift(); + sequence.callback(); } }; - }; + + tailHandle = oplogConnection.tail(cursorDescription, function (doc) { + if (!doc.ns && doc.ns.length > dbName.length + 1 && + doc.ns.substr(0, dbName.length + 1) === (dbName + '.')) + throw new Error("Unexpected ns"); + + var collectionName = doc.ns.substr(dbName.length + 1); + + if (collectionName === SEQUENCE_COLLECTION) { + processSequence(doc); + return; + } + + _.each(callbacksByCollection[collectionName], function (callback) { + callback(EJSON.clone(doc)); + }); + }); + readyFuture.return(); + }); }; From 441279eb131806bd01e7a816cb52c6c9604800b8 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 7 Oct 2013 18:13:17 -0700 Subject: [PATCH 036/190] rename: 'options' should always be *our* options --- packages/mongo-livedata/mongo_driver.js | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 815c4a1636..2ccbc154a3 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -107,19 +107,19 @@ var replaceTypes = function (document, atomTransformer) { }; -MongoConnection = function (url, connectionOptions) { +MongoConnection = function (url, options) { var self = this; - connectionOptions = connectionOptions || {}; + options = options || {}; self._connectCallbacks = []; self._liveResultsSets = {}; - var options = {db: {safe: true}}; + var mongoOptions = {db: {safe: true}}; // Set autoReconnect to true, unless passed on the URL. Why someone // would want to set autoReconnect to false, I'm not really sure, but // keeping this for backwards compatibility for now. if (!(/[\?&]auto_?[rR]econnect=/.test(url))) { - options.server = {auto_reconnect: true}; + mongoOptions.server = {auto_reconnect: true}; } // Disable the native parser by default, unless specifically enabled @@ -131,10 +131,10 @@ MongoConnection = function (url, connectionOptions) { // to a different platform (aka deploy) // We should revisit this after binary npm module support lands. if (!(/[\?&]native_?[pP]arser=/.test(url))) { - options.db.native_parser = false; + mongoOptions.db.native_parser = false; } - MongoDB.connect(url, options, function(err, db) { + MongoDB.connect(url, mongoOptions, function(err, db) { if (err) throw err; self.db = db; @@ -152,7 +152,7 @@ MongoConnection = function (url, connectionOptions) { // XXX we should NOT be reading directly from the env here (this should be an // argument to MongoConnection eg) but I want to wait for the AppConfig API to // settle a little before thinking too hard about this - if (process.env.XXX_OPLOG_URL && !connectionOptions.isOplog) { + if (process.env.XXX_OPLOG_URL && !options.isOplog) { var dbName = Npm.require('url').parse(url).pathname.substr(1); self._startOplogTailing(process.env.XXX_OPLOG_URL, dbName); } From 7d1ab347886b7ca6509aecd7eb4365cbcfb2faf8 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 7 Oct 2013 18:17:09 -0700 Subject: [PATCH 037/190] Refactor MongoConnection options: oplogUrl, not isOplog Move hacky use of env var to a slightly more appropriate place --- packages/mongo-livedata/mongo_driver.js | 11 +++++------ packages/mongo-livedata/remote_collection_driver.js | 13 ++++++++++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 2ccbc154a3..aaa9bc0b6c 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -149,12 +149,11 @@ MongoConnection = function (url, options) { self._docFetcher = new DocFetcher(self); self._oplogHandle = null; - // XXX we should NOT be reading directly from the env here (this should be an - // argument to MongoConnection eg) but I want to wait for the AppConfig API to - // settle a little before thinking too hard about this - if (process.env.XXX_OPLOG_URL && !options.isOplog) { + + if (options.oplogUrl) { + // XXX this parse fails on mongo URLs with commas! var dbName = Npm.require('url').parse(url).pathname.substr(1); - self._startOplogTailing(process.env.XXX_OPLOG_URL, dbName); + self._startOplogTailing(options.oplogUrl, dbName); } }; @@ -295,7 +294,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { // Actually setting up the connection and tail blocks, so we do it "later". Meteor.defer(function () { - var oplogConnection = new MongoConnection(oplogUrl, {isOplog: true}); + var oplogConnection = new MongoConnection(oplogUrl); // Find the last oplog entry. Blocks until the connection is ready. var lastOplogEntry = oplogConnection.findOne( diff --git a/packages/mongo-livedata/remote_collection_driver.js b/packages/mongo-livedata/remote_collection_driver.js index 41502c17eb..552974aa5d 100644 --- a/packages/mongo-livedata/remote_collection_driver.js +++ b/packages/mongo-livedata/remote_collection_driver.js @@ -1,6 +1,7 @@ -MongoInternals.RemoteCollectionDriver = function (mongo_url) { +MongoInternals.RemoteCollectionDriver = function ( + mongo_url, options) { var self = this; - self.mongo = new MongoConnection(mongo_url); + self.mongo = new MongoConnection(mongo_url, options); }; _.extend(MongoInternals.RemoteCollectionDriver.prototype, { @@ -32,5 +33,11 @@ MongoInternals.defaultRemoteCollectionDriver = _.once(function () { if (! mongoUrl) throw new Error("MONGO_URL must be set in environment"); - return new MongoInternals.RemoteCollectionDriver(mongoUrl); + var connectionOptions = {}; + // XXX we should NOT be reading directly from the env here; need to consult + // with naomi re: AppConfig + if (process.env.XXX_OPLOG_URL) + connectionOptions.oplogUrl = process.env.XXX_OPLOG_URL; + + return new MongoInternals.RemoteCollectionDriver(mongoUrl, connectionOptions); }); From badf20c35aeda2eeec20306fbfa76993a65e80a9 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 7 Oct 2013 18:19:10 -0700 Subject: [PATCH 038/190] LICENSE for quotemeta, add comment about duplication --- LICENSE.txt | 1 + packages/mongo-livedata/mongo_driver.js | 1 + 2 files changed, 2 insertions(+) diff --git a/LICENSE.txt b/LICENSE.txt index 8facf5235d..67bba7d36c 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -288,6 +288,7 @@ shell-quote: https://github.com/substack/node-shell-quote deep-equal: https://github.com/substack/node-deep-equal editor: https://github.com/substack/node-editor minimist: https://github.com/substack/node-minimist +quotemeta: https://github.com/substack/quotemeta ---------- Copyright 2010, 2011, 2012, 2013 James Halliday (mail@substack.net) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index aaa9bc0b6c..548856a41e 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -228,6 +228,7 @@ var pendingSequences = []; // Like Perl's quotemeta: quotes all regexp metacharacters. See // https://github.com/substack/quotemeta/blob/master/index.js +// XXX this is duplicated with accounts_server.js var quotemeta = function (str) { return String(str).replace(/(\W)/g, '\\$1'); }; From 16ac0d7ba27756b3d3978b006db42c3658baf317 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 7 Oct 2013 18:20:47 -0700 Subject: [PATCH 039/190] Fix bad error check. --- packages/mongo-livedata/mongo_driver.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 548856a41e..cffb9099ac 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -332,8 +332,8 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { }; tailHandle = oplogConnection.tail(cursorDescription, function (doc) { - if (!doc.ns && doc.ns.length > dbName.length + 1 && - doc.ns.substr(0, dbName.length + 1) === (dbName + '.')) + if (!(doc.ns && doc.ns.length > dbName.length + 1 && + doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) throw new Error("Unexpected ns"); var collectionName = doc.ns.substr(dbName.length + 1); From 10f3e9c90ffa589870f371d0efa8e09f204e59df Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 7 Oct 2013 20:51:47 -0700 Subject: [PATCH 040/190] Add a comment about a non-redundant stopped check. --- packages/mongo-livedata/mongo_driver.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index cffb9099ac..86d58ffb48 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1004,6 +1004,8 @@ MongoConnection.prototype.tail = function (cursorDescription, docCallback) { // (unless the failure was because the observe got stopped). doc = null; } + // Since cursor._nextObject can yield, we need to check again to see if + // we've been stopped before calling the callback. if (stopped) return; if (doc) { From df73dca22293c06ba28fbce36a3ce511b72f9b6a Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 7 Oct 2013 20:56:20 -0700 Subject: [PATCH 041/190] Make a comment about something important into an XXX comment. --- packages/mongo-livedata/mongo_driver.js | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 86d58ffb48..79611a3385 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1490,6 +1490,7 @@ var cursorSupportedByOplogTailing = function (cursorDescription) { // We don't yet implement field filtering for oplog tailing (just because it's // not implemented, not because there's a deep problem with implementing it). + // XXX Implementing field filtering should be a priority. if (options.fields) return false; // This option (which are mostly used for sorted cursors) require us to figure From 2dc25d77aed7b759ebf76a2e1adbac300f936577 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 9 Oct 2013 15:02:02 -0700 Subject: [PATCH 042/190] Fix prefix check. Remove all numeric keys from sel --- packages/minimongo/minimongo_tests.js | 12 ++++++++++++ packages/minimongo/selector.js | 11 ++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 8288bc8baa..1899f87501 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2433,5 +2433,17 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { notAffected({ 'foo.bar': 0 }, { $set: { 'foo.baz': 1 } }, "simplest"); affected({ 'foo.bar': 0 }, { $set: { 'foo.1': 1 } }, "simplest"); affected({ 'foo.bar': 0 }, { $set: { 'foo.2.bar': 1 } }, "simplest"); + + notAffected({ 'foo': 0 }, { $set: { 'foobaz': 1 } }, "correct prefix check"); + notAffected({ 'foobar': 0 }, { $unset: { 'foo': 1 } }, "correct prefix check"); + notAffected({ 'foo.bar': 0 }, { $unset: { 'foob': 1 } }, "correct prefix check"); + + // XXX once we consider all the array/non-array operators separately, this + // should become notAffected. Until then it's fine to let it "match" and + // affect. + //notAffected({ 'foo.3.bar': 0 }, { $set: { 'foo.2.bar': 1 } }, "observe for an array element"); + affected({ 'foo.3.bar': 0 }, { $set: { 'foo.2.bar': 1 } }, "observe for an array element"); + + affected({ 'foo.3.bar': 0 }, { $set: { 'foo.3.bar': 1 } }, "observe for an array element"); }); diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index a49385127b..b37b2cd2e7 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -802,9 +802,9 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { return _.any(modifiedPaths, function (path) { path = removeNumericsKeys(path); return _.any(meaningfulPaths, function (meaningfulPath) { - // It's full prefix - return path.indexOf(meaningfulPath) === 0 - || meaningfulPath.indexOf(path) === 0; + meaningfulPath = removeNumericsKeys(meaningfulPath); + return isPathPrefix(path, meaningfulPath) + || isPathPrefix(meaningfulPath, path); }); }); @@ -812,6 +812,11 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { return _.filter(path.split('.'), isNaN).join('.'); } + function isPathPrefix (s, t) { + var pos = t.indexOf(s); + return pos === 0 + && (pos + s.length === t.length || t[pos + s.length] === '.'); + } }; // Returns a list of key paths the given selector is looking for From a75470bcf310890c2d6edbf8c2720763ed4feac4 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 9 Oct 2013 15:06:55 -0700 Subject: [PATCH 043/190] Fix the way we remove integer fields --- packages/minimongo/minimongo_tests.js | 3 +++ packages/minimongo/selector.js | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 1899f87501..e553a0af4a 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2438,6 +2438,9 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { notAffected({ 'foobar': 0 }, { $unset: { 'foo': 1 } }, "correct prefix check"); notAffected({ 'foo.bar': 0 }, { $unset: { 'foob': 1 } }, "correct prefix check"); + notAffected({ 'foo.Infinity.x': 0 }, { $unset: { 'foo.x': 1 } }, "we convert integer fields correctly"); + notAffected({ 'foo.1e3.x': 0 }, { $unset: { 'foo.x': 1 } }, "we convert integer fields correctly"); + // XXX once we consider all the array/non-array operators separately, this // should become notAffected. Until then it's fine to let it "match" and // affect. diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index b37b2cd2e7..c227b2009a 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -809,7 +809,7 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { }); function removeNumericsKeys (path) { - return _.filter(path.split('.'), isNaN).join('.'); + return _.filter(path.split('.'), notNumber).join('.'); } function isPathPrefix (s, t) { @@ -817,6 +817,11 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { return pos === 0 && (pos + s.length === t.length || t[pos + s.length] === '.'); } + + // returns true if string can't be converted to integer + function notNumber (s) { + return !/^[0-9]+$/.test(s); + } }; // Returns a list of key paths the given selector is looking for From b5d9df66c1a265b64d49f3c16367a7dbd76b7bee Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 9 Oct 2013 15:24:00 -0700 Subject: [PATCH 044/190] Tests for "observe array element" case ex.: collection.find({ 'foo.0.bar': 2 }) // => find docs with bar of first element of array foo being 2 --- packages/minimongo/minimongo_tests.js | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index e553a0af4a..8996efc487 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2441,12 +2441,11 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { notAffected({ 'foo.Infinity.x': 0 }, { $unset: { 'foo.x': 1 } }, "we convert integer fields correctly"); notAffected({ 'foo.1e3.x': 0 }, { $unset: { 'foo.x': 1 } }, "we convert integer fields correctly"); - // XXX once we consider all the array/non-array operators separately, this - // should become notAffected. Until then it's fine to let it "match" and - // affect. - //notAffected({ 'foo.3.bar': 0 }, { $set: { 'foo.2.bar': 1 } }, "observe for an array element"); - affected({ 'foo.3.bar': 0 }, { $set: { 'foo.2.bar': 1 } }, "observe for an array element"); - affected({ 'foo.3.bar': 0 }, { $set: { 'foo.3.bar': 1 } }, "observe for an array element"); + + notAffected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.3.bar': 1 } }, "delicate work with numeric fields in selector"); + notAffected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.bar': 1 } }, "delicate work with numeric fields in selector"); + affected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.4.bar': 1 } }, "delicate work with numeric fields in selector"); + affected({ 'foo.bar.baz': 0 }, { $unset: { 'foo.3.bar': 1 } }, "delicate work with numeric fields in selector"); }); From 1c17d8af44819e77c22c7e4faa170ea2ac59a237 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 9 Oct 2013 15:52:38 -0700 Subject: [PATCH 045/190] More work with numeric keys to be more precise answering if modifier affects selector. --- packages/minimongo/selector.js | 45 +++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index c227b2009a..4887bbcf9e 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -800,27 +800,38 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { var modifiedPaths = _.keys(modifier.$set).concat(_.keys(modifier.$unset)); var meaningfulPaths = getPaths(selector); return _.any(modifiedPaths, function (path) { - path = removeNumericsKeys(path); + var mod = path.split('.'); return _.any(meaningfulPaths, function (meaningfulPath) { - meaningfulPath = removeNumericsKeys(meaningfulPath); - return isPathPrefix(path, meaningfulPath) - || isPathPrefix(meaningfulPath, path); + var sel = meaningfulPath.split('.'); + var i = 0, j = 0; + + while (i < sel.length && j < mod.length) { + if (numericKey(sel[i]) && numericKey(mod[j])) { + // foo.4.bar, foo.4 => good + // foo.3.bar, foo.4 => bad + if (sel[i] == mod[j]) + i++, j++; + else + return false; + } else if (numericKey(sel[i])) { + // foo.4.bar, foo.bar => bad + return false; + } else if (numericKey(mod[j])) { + j++; + } else if (sel[i] == mod[j]) + i++, j++; + else + return false; + } + + // One is a prefix of another, taking numeric fields into account + return true; }); }); - function removeNumericsKeys (path) { - return _.filter(path.split('.'), notNumber).join('.'); - } - - function isPathPrefix (s, t) { - var pos = t.indexOf(s); - return pos === 0 - && (pos + s.length === t.length || t[pos + s.length] === '.'); - } - - // returns true if string can't be converted to integer - function notNumber (s) { - return !/^[0-9]+$/.test(s); + // string can be converted to integer + function numericKey (s) { + return /^[0-9]+$/.test(s); } }; From 7fc451da3ec1f8bbef33169182ae8e8b5bc49809 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 10 Oct 2013 13:54:56 -0700 Subject: [PATCH 046/190] Glasser's comments --- packages/minimongo/selector.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index 4887bbcf9e..f79cf2c362 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -807,18 +807,18 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { while (i < sel.length && j < mod.length) { if (numericKey(sel[i]) && numericKey(mod[j])) { - // foo.4.bar, foo.4 => good - // foo.3.bar, foo.4 => bad - if (sel[i] == mod[j]) + // foo.4.bar selector affected by foo.4 modifier + // foo.3.bar selector unaffected by foo.4 modifier + if (sel[i] === mod[j]) i++, j++; else return false; } else if (numericKey(sel[i])) { - // foo.4.bar, foo.bar => bad + // foo.4.bar selector unaffected by foo.bar modifier return false; } else if (numericKey(mod[j])) { j++; - } else if (sel[i] == mod[j]) + } else if (sel[i] === mod[j]) i++, j++; else return false; From 37120d186c2aacb5ab83f80c4e110e770901cb76 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 10 Oct 2013 14:28:56 -0700 Subject: [PATCH 047/190] One more test --- packages/minimongo/minimongo_tests.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 8996efc487..5e8ae4c7f6 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2419,7 +2419,7 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { else test.isFalse(LocalCollection._isSelectorAffectedByModifier(sel, mod, desc)); } - + function affected(sel, mod, desc) { testSelectorAffectedByModifier(sel, mod, 1, desc); } @@ -2447,5 +2447,7 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { notAffected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.bar': 1 } }, "delicate work with numeric fields in selector"); affected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.4.bar': 1 } }, "delicate work with numeric fields in selector"); affected({ 'foo.bar.baz': 0 }, { $unset: { 'foo.3.bar': 1 } }, "delicate work with numeric fields in selector"); + + affected({ 'foo.0.bar': 0 }, { $set: { 'foo.0.0.bar' } }, "delicate work with nested arrays and selectors by indecies"); }); From 3c39614e50a3eb3a130c015a52186fc315713540 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 10 Oct 2013 15:08:32 -0700 Subject: [PATCH 048/190] Fix typo in test. --- packages/minimongo/minimongo_tests.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 5e8ae4c7f6..975cef503c 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2448,6 +2448,6 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { affected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.4.bar': 1 } }, "delicate work with numeric fields in selector"); affected({ 'foo.bar.baz': 0 }, { $unset: { 'foo.3.bar': 1 } }, "delicate work with numeric fields in selector"); - affected({ 'foo.0.bar': 0 }, { $set: { 'foo.0.0.bar' } }, "delicate work with nested arrays and selectors by indecies"); + affected({ 'foo.0.bar': 0 }, { $set: { 'foo.0.0.bar': 1 } }, "delicate work with nested arrays and selectors by indecies"); }); From 75f60372037dc260b1b9cfcef512686c3a18516a Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 10 Oct 2013 15:01:30 -0700 Subject: [PATCH 049/190] Set internal Mongo replay flag when tailing oplog. --- packages/mongo-livedata/.npm/package/npm-shrinkwrap.json | 2 +- packages/mongo-livedata/mongo_driver.js | 5 +++++ packages/mongo-livedata/package.js | 5 ++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/.npm/package/npm-shrinkwrap.json b/packages/mongo-livedata/.npm/package/npm-shrinkwrap.json index fbddd059bb..b9467c7006 100644 --- a/packages/mongo-livedata/.npm/package/npm-shrinkwrap.json +++ b/packages/mongo-livedata/.npm/package/npm-shrinkwrap.json @@ -1,7 +1,7 @@ { "dependencies": { "mongodb": { - "version": "1.3.19", + "from": "https://github.com/meteor/node-mongodb-native/tarball/ab633e288bf0b77b7b7444897f42a76cb09aea0e", "dependencies": { "bson": { "version": "0.2.2" diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 79611a3385..00c2c96e89 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -854,6 +854,11 @@ MongoConnection.prototype._createSynchronousCursor = function( // ... and to keep querying the server indefinitely rather than just 5 times // if there's no more data. mongoOptions.numberOfRetries = -1; + // And if this cursor specifies a 'ts', then set the undocumented oplog + // replay flag, which does a special scan to find the first document + // (instead of creating an index on ts). + if (cursorDescription.selector.ts) + mongoOptions.oplogReplay = true; } var dbCursor = collection.find( diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 4fbf18d2a4..d3c7acbf2c 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -12,7 +12,10 @@ Package.describe({ internal: true }); -Npm.depends({mongodb: "1.3.19"}); +Npm.depends({ + // 1.3.19, plus a patch to add oplogReplay flag. + mongodb: "https://github.com/meteor/node-mongodb-native/tarball/ab633e288bf0b77b7b7444897f42a76cb09aea0e" +}); Package.on_use(function (api) { api.use(['random', 'ejson', 'json', 'underscore', 'minimongo', 'logging', From 12f5f4e87ecd5cddb65f80f5265cd5b78945f2ab Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 10 Oct 2013 15:12:21 -0700 Subject: [PATCH 050/190] Another test to ensure correct observation of object literals. --- packages/minimongo/minimongo_tests.js | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 975cef503c..40aedf2c4b 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2413,6 +2413,24 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { {$and: [{ 'x.d': { $ne: 5, $gte: 433 } }, { 'a.b': 234 }]}] }, ['a.b', 'a.b.c', 'x.d'], 'group operators + duplicates'); + // When top-level value is an object, it is treated as a literal, + // so when you query col.find({ a: { foo: 1, bar: 2 } }) + // it doesn't mean you are looking for anything that has 'a.foo' to be 1 and + // 'a.bar' to be 2, instead you are looking for 'a' to be exatly that object + // with exatly that order of keys. { a: { foo: 1, bar: 2, baz: 3 } } wouldn't + // match it. That's why in this selector 'a' would be important key, not a.foo + // and a.bar. + testSelectorPaths({ + a: { + foo: 1, + bar: 2 + }, + 'b.c': { + literal: "object", + but: "we still observe any changes in 'b.c'" + } + }, ['a', 'b.c'], "literal object"); + function testSelectorAffectedByModifier (sel, mod, yes, desc) { if (yes) test.isTrue(LocalCollection._isSelectorAffectedByModifier(sel, mod, desc)); From 3dc3ad9997775c87200c03015ba6f833b640a1d9 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Thu, 10 Oct 2013 15:30:51 -0700 Subject: [PATCH 051/190] Simplify getPaths - method for getting important key paths from a selector object. --- packages/minimongo/selector.js | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index f79cf2c362..8bd74efb8d 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -836,14 +836,15 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { }; // Returns a list of key paths the given selector is looking for -var getPaths = LocalCollection._getSelectorPaths = function (sel, parentKeys) { - parentKeys = parentKeys || []; +var getPaths = LocalCollection._getSelectorPaths = function (sel) { return _.chain(sel).map(function (v, k) { // we don't know how to handle $where because it can be anything if (k === "$where") return ''; // matches everything + // we branch from $or/$and/$nor operator if (_.has(LOGICAL_OPERATORS, k)) - return _.map(v, function (x) { return getPaths(x, parentKeys); }); - return parentKeys.concat(k).join('.'); + return _.map(v, getPaths); + // the value is a literal or some comparison operator + return k; }).flatten().uniq().value(); }; From 0ddf232773eb887c6af9f54707806b0c2eb09153 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 10 Oct 2013 16:09:02 -0700 Subject: [PATCH 052/190] Restore accidentally-deleted mongo restart code. Thanks, cli-test.sh! --- tools/run.js | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/run.js b/tools/run.js index 4776c204d3..fb8506b3f6 100644 --- a/tools/run.js +++ b/tools/run.js @@ -648,6 +648,16 @@ exports.run = function (context, options) { } process.exit(1); } + + if (mongoErrorTimer) + clearTimeout(mongoErrorTimer); + mongoErrorTimer = setTimeout(function () { + mongoErrorCount = 0; + mongoErrorTimer = null; + }, 5000); + + // Wait a sec to restart. + setTimeout(launch, 1000); } }); }).run(); From acb6decb6a2b2383fd2db17ff8aa350a5741f053 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 10 Oct 2013 16:59:44 -0700 Subject: [PATCH 053/190] Make run-tools-tests.sh pass. --- scripts/cli-test.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/cli-test.sh b/scripts/cli-test.sh index ee8ed9af06..8351bcd89e 100755 --- a/scripts/cli-test.sh +++ b/scripts/cli-test.sh @@ -41,7 +41,9 @@ elif [ "$METEOR_WAREHOUSE_DIR" ]; then INSTALLED_METEOR=t export METEOR_TEST_NO_SPRINGBOARD=t if [ -z "$TEST_RELEASE" ]; then - TEST_RELEASE="0.6.5-rc12" + # We need a release whose mongo-livedata exports + # MongoInternals.NpmModule. + TEST_RELEASE="oplog-alpha1" fi METEOR="$METEOR --release=$TEST_RELEASE" # some random non-official release From 81d77f916ba1d0b8c017db205eb5c675f6eb2d1f Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 10 Oct 2013 17:09:43 -0700 Subject: [PATCH 054/190] Link to a PR and use latest version. --- packages/mongo-livedata/.npm/package/npm-shrinkwrap.json | 2 +- packages/mongo-livedata/package.js | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/mongo-livedata/.npm/package/npm-shrinkwrap.json b/packages/mongo-livedata/.npm/package/npm-shrinkwrap.json index b9467c7006..e6d581a1fd 100644 --- a/packages/mongo-livedata/.npm/package/npm-shrinkwrap.json +++ b/packages/mongo-livedata/.npm/package/npm-shrinkwrap.json @@ -1,7 +1,7 @@ { "dependencies": { "mongodb": { - "from": "https://github.com/meteor/node-mongodb-native/tarball/ab633e288bf0b77b7b7444897f42a76cb09aea0e", + "from": "https://github.com/meteor/node-mongodb-native/tarball/779bbac916a751f305d84c727a6cc7dfddab7924", "dependencies": { "bson": { "version": "0.2.2" diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index d3c7acbf2c..f0ad107cdb 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -13,8 +13,9 @@ Package.describe({ }); Npm.depends({ - // 1.3.19, plus a patch to add oplogReplay flag. - mongodb: "https://github.com/meteor/node-mongodb-native/tarball/ab633e288bf0b77b7b7444897f42a76cb09aea0e" + // 1.3.19, plus a patch to add oplogReplay flag: + // https://github.com/mongodb/node-mongodb-native/pull/1108 + mongodb: "https://github.com/meteor/node-mongodb-native/tarball/779bbac916a751f305d84c727a6cc7dfddab7924" }); Package.on_use(function (api) { From 73b45f2fa2b4abcb2e88a0e319d722b50a319bda Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 10 Oct 2013 17:16:36 -0700 Subject: [PATCH 055/190] Actually use Slava's function in oplog tailing. Tests pass, but I have not confirmed that it actually has the desired end-to-end effect. --- packages/mongo-livedata/oplog.js | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index e3ed2841e5..a182a9f442 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -137,8 +137,13 @@ MongoConnection.prototype._observeChangesWithOplog = function ( LocalCollection._modify(newDoc, op.o); handleDoc(id, newDoc); } else { - // XXX for not-currently-published docs, if we can guarantee the - // irrelevance of the change, we can skip it + // If the selector is not affected by the modifier, no need to do + // anything! + if (!LocalCollection._isSelectorAffectedByModifier( + cursorDescription.selector, op.o)) { + return; + } + curiousity.set(id, op.ts.toString()); beCurious(); return; From f24be0684f5b7cc2b209b59a82277a5792d970c7 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Fri, 11 Oct 2013 15:35:26 -0700 Subject: [PATCH 056/190] Use MinimongoTest object to export private functions for unit tests --- packages/minimongo/minimongo.js | 3 +++ packages/minimongo/minimongo_tests.js | 4 ++-- packages/minimongo/package.js | 1 + packages/minimongo/selector.js | 5 +++-- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/packages/minimongo/minimongo.js b/packages/minimongo/minimongo.js index 2ddb61b84d..86d9c2045c 100644 --- a/packages/minimongo/minimongo.js +++ b/packages/minimongo/minimongo.js @@ -32,6 +32,9 @@ LocalCollection = function (name) { this.paused = false; }; +// Object exported only for unit testing. +// Use it to export private functions to test in Tinytest. +MinimongoTest = {}; LocalCollection._applyChanges = function (doc, changeFields) { _.each(changeFields, function (value, key) { diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 40aedf2c4b..2a5aca1854 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -264,7 +264,7 @@ Tinytest.add("minimongo - lookup", function (test) { Tinytest.add("minimongo - selector_compiler", function (test) { var matches = function (should_match, selector, doc) { - var does_match = LocalCollection._matches(selector, doc); + var does_match = MinimongoTest.matches(selector, doc); if (does_match != should_match) { // XXX super janky test.fail({type: "minimongo-ordering", @@ -2382,7 +2382,7 @@ Tinytest.add("minimongo - $near operator tests", function (test) { Tinytest.add("minimongo - modifier affects selector", function (test) { function testSelectorPaths (sel, paths, desc) { - test.isTrue(_.isEqual(LocalCollection._getSelectorPaths(sel), paths), desc); + test.isTrue(_.isEqual(MinimongoTest.getSelectorPaths(sel), paths), desc); } testSelectorPaths({ diff --git a/packages/minimongo/package.js b/packages/minimongo/package.js index dec89d4788..f70ccddabb 100644 --- a/packages/minimongo/package.js +++ b/packages/minimongo/package.js @@ -5,6 +5,7 @@ Package.describe({ Package.on_use(function (api) { api.export('LocalCollection'); + api.export('MinimongoTest', { testOnly: true }); api.use(['underscore', 'json', 'ejson', 'ordered-dict', 'deps', 'random', 'ordered-dict']); // This package is used for geo-location queries such as $near diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index 8bd74efb8d..6a1f9766f4 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -540,7 +540,7 @@ LocalCollection._f = { // For unit tests. True if the given document matches the given // selector. -LocalCollection._matches = function (selector, doc) { +MinimongoTest.matches = function (selector, doc) { return (LocalCollection._compileSelector(selector))(doc); }; @@ -799,6 +799,7 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { modifier = _.extend({ $set: {}, $unset: {} }, modifier); var modifiedPaths = _.keys(modifier.$set).concat(_.keys(modifier.$unset)); var meaningfulPaths = getPaths(selector); + return _.any(modifiedPaths, function (path) { var mod = path.split('.'); return _.any(meaningfulPaths, function (meaningfulPath) { @@ -836,7 +837,7 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { }; // Returns a list of key paths the given selector is looking for -var getPaths = LocalCollection._getSelectorPaths = function (sel) { +var getPaths = MinimongoTest.getSelectorPaths = function (sel) { return _.chain(sel).map(function (v, k) { // we don't know how to handle $where because it can be anything if (k === "$where") From aae9fc23db74663fc2a7411888a50b21da12db61 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 9 Oct 2013 18:13:11 -0700 Subject: [PATCH 057/190] replace sequencer writes with reads. --- packages/mongo-livedata/mongo_driver.js | 131 +++++++++++++----------- packages/mongo-livedata/oplog.js | 4 +- 2 files changed, 75 insertions(+), 60 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 00c2c96e89..53f58f8a0e 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -219,13 +219,6 @@ MongoConnection.prototype._maybeBeginWrite = function () { var OPLOG_COLLECTION = 'oplog.rs'; -var SEQUENCE_COLLECTION = 'meteor_livedata_Sequencer'; -// XXX This is problematic if our RNG isn't seeded well enough. -var myServerId = Random.id(); -var nextSequenceId = 1; -// XXX doc -var pendingSequences = []; - // Like Perl's quotemeta: quotes all regexp metacharacters. See // https://github.com/substack/quotemeta/blob/master/index.js // XXX this is duplicated with accounts_server.js @@ -233,34 +226,23 @@ var quotemeta = function (str) { return String(str).replace(/(\W)/g, '\\$1'); }; -// Calls `callback` once the oplog has been processed up to a point that is -// roughly "now". Specifically, it does a dummy write which is then detected -// by the connection's oplog tailer. -// XXX This could be a read instead of a write, getting the last `ts` -// in oplog? -MongoConnection.prototype._callWhenOplogProcessed = function (callback) { - var self = this; - - var sequenceId = nextSequenceId++; - pendingSequences.push({sequenceId: sequenceId, - callback: callback}); - - // Use direct write to Node Mongo driver so we don't end up with recursive - // fence stuff. Need to disable 'safe' because we aren't providing a callback. - var writeCollection = self._getCollection(SEQUENCE_COLLECTION); - writeCollection.update({_id: myServerId}, {$set: {sequence: sequenceId}}, - {upsert: true, safe: false}); -}; - - MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var self = this; var stopped = false; + var oplogConnection = null; var tailHandle = null; var readyFuture = new Future(); var nextId = 0; var callbacksByCollection = {}; + var lastProcessedTS = null; + var baseOplogSelector = { + ns: new RegExp('^' + quotemeta(dbName) + '\\.'), + // XXX also handle drop collection, etc + op: {$in: ['i', 'u', 'd']} + }; + // XXX doc + var pendingSequencers = []; self._oplogHandle = { stop: function () { @@ -290,47 +272,75 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { delete callbacksByCollection[collectionName][callbackId]; } }; + }, + + // Calls `callback` once the oplog has been processed up to a point that is + // roughly "now": specifically, once we've processed all ops that are + // currently visible. + // XXX become convinced that this is actually safe even if oplogConnection + // is some kind of pool + callWhenProcessedLatest: function (callback) { + if (stopped) + throw new Error("Called callWhenProcessedLatest on stopped handle!"); + + // Calling onOplogEntry requries us to wait for the oplog connection to be + // ready. + readyFuture.wait(); + + // Except for during startup, we DON'T block. + Fiber(function () { + // We need to make the selector at least as restrictive as the actual + // tailing selector (ie, we need to specify the DB name) or else we + // might find a TS that won't show up in the actual tail stream. + var lastEntry = oplogConnection.findOne( + OPLOG_COLLECTION, baseOplogSelector, {sort: {$natural: -1}}); + if (!lastEntry) { + // Really, nothing in the oplog? Well, we've processed everything. + callback(); + return; + } + var ts = lastEntry.ts; + if (!ts) + throw Error("oplog entry without ts: " + EJSON.stringify(lastEntry)); + + if (lastProcessedTS && ts.lessThanOrEqual(lastProcessedTS)) { + // We've already caught up to here. + callback(); + return; + } + + if (!_.isEmpty(pendingSequencers) + && _.last(pendingSequencers).ts.greaterThan(ts)) { + throw Error("found misordered oplog"); + } + + pendingSequencers.push({ts: ts, + callback: callback}); + }).run(); } }; // Actually setting up the connection and tail blocks, so we do it "later". Meteor.defer(function () { - var oplogConnection = new MongoConnection(oplogUrl); + oplogConnection = new MongoConnection(oplogUrl); // Find the last oplog entry. Blocks until the connection is ready. var lastOplogEntry = oplogConnection.findOne( OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); - var oplogSelector = { - ns: new RegExp('^' + quotemeta(dbName) + '\\.'), - // XXX also handle drop collection, etc - op: {$in: ['i', 'u', 'd']} - }; - if (lastOplogEntry) + var oplogSelector = _.clone(baseOplogSelector); + if (lastOplogEntry) { + // Start after the last entry that currently exists. oplogSelector.ts = {$gt: lastOplogEntry.ts}; + // If there are any calls to callWhenProcessedLatest before any other + // oplog entries show up, allow callWhenProcessedLatest to call its + // callback immediately. + lastProcessedTS = lastOplogEntry.ts; + } var cursorDescription = new CursorDescription( OPLOG_COLLECTION, oplogSelector, {tailable: true}); - var processSequence = function (doc) { - if (doc.op !== 'i' && doc.op !== 'u') - return; - var serverId = (doc.op === 'i' ? doc.o._id : doc.o2._id); - if (serverId !== myServerId) - return; - var sequenceId = - (doc.op === 'i' ? doc.o.sequence : - (doc.o.$set && doc.o.$set.sequence)); - if (typeof sequenceId !== 'number') - return; - // Process all sequence points up to this point. - while (!_.isEmpty(pendingSequences) - && pendingSequences[0].sequenceId <= sequenceId) { - var sequence = pendingSequences.shift(); - sequence.callback(); - } - }; - tailHandle = oplogConnection.tail(cursorDescription, function (doc) { if (!(doc.ns && doc.ns.length > dbName.length + 1 && doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) @@ -338,14 +348,19 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var collectionName = doc.ns.substr(dbName.length + 1); - if (collectionName === SEQUENCE_COLLECTION) { - processSequence(doc); - return; - } - _.each(callbacksByCollection[collectionName], function (callback) { callback(EJSON.clone(doc)); }); + + // Now that we've processed this operation, process pending sequencers. + if (!doc.ts) + throw Error("oplog entry without ts: " + EJSON.stringify(doc)); + lastProcessedTS = doc.ts; + while (!_.isEmpty(pendingSequencers) + && pendingSequencers[0].ts.lessThanOrEqual(lastProcessedTS)) { + var sequencer = pendingSequencers.shift(); + sequencer.callback(); + } }); readyFuture.return(); }); diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index a182a9f442..5786b0fd6c 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -172,7 +172,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( } var write = fence.beginWrite(); // XXX this also has to wait for steady!!! - self._callWhenOplogProcessed(function () { + self._oplogHandle.callWhenProcessedLatest(function () { write.committed(); }); complete(); @@ -185,7 +185,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( }); var catchUpFuture = new Future; - self._callWhenOplogProcessed(catchUpFuture.resolver()); + self._oplogHandle.callWhenProcessedLatest(catchUpFuture.resolver()); catchUpFuture.wait(); if (phase !== PHASE.INITIALIZING) From 599a2f928e3793dd65b8d3c079aed84d3db62a71 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 16 Oct 2013 14:12:07 -0700 Subject: [PATCH 058/190] Separate oplog tail queries from other operations. This prevents the "look up last oplog entry" queries from taking several seconds 1/5 of the time when the query is on the same pooled connection as the awaitdata tail query. --- packages/mongo-livedata/mongo_driver.js | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 53f58f8a0e..3c1ccbe8ce 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -229,8 +229,9 @@ var quotemeta = function (str) { MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var self = this; + var oplogQueryConnection = null; + var oplogTailConnection = null; var stopped = false; - var oplogConnection = null; var tailHandle = null; var readyFuture = new Future(); var nextId = 0; @@ -292,7 +293,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { // We need to make the selector at least as restrictive as the actual // tailing selector (ie, we need to specify the DB name) or else we // might find a TS that won't show up in the actual tail stream. - var lastEntry = oplogConnection.findOne( + var lastEntry = oplogQueryConnection.findOne( OPLOG_COLLECTION, baseOplogSelector, {sort: {$natural: -1}}); if (!lastEntry) { // Really, nothing in the oplog? Well, we've processed everything. @@ -320,12 +321,23 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { } }; - // Actually setting up the connection and tail blocks, so we do it "later". + // Setting up the connections and tail handler is a blocking operation, so we + // do it "later". Meteor.defer(function () { - oplogConnection = new MongoConnection(oplogUrl); + // We make two separate connections to Mongo. The Node Mongo driver + // implements a naive round-robin connection pool: each "connection" is a + // pool of several (5 by default) TCP connections, and each request is + // rotated through the pools. Tailable cursor queries block on the server + // until there is some data to return (or until a few seconds have + // passed). So if the connection pool used for tailing cursors is the same + // pool used for other queries, the other queries will be delayed by seconds + // 1/5 of the time. + // XXX set the pool size for oplogTailConnection to 1 + oplogTailConnection = new MongoConnection(oplogUrl); + oplogQueryConnection = new MongoConnection(oplogUrl); // Find the last oplog entry. Blocks until the connection is ready. - var lastOplogEntry = oplogConnection.findOne( + var lastOplogEntry = oplogQueryConnection.findOne( OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); var oplogSelector = _.clone(baseOplogSelector); @@ -341,7 +353,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var cursorDescription = new CursorDescription( OPLOG_COLLECTION, oplogSelector, {tailable: true}); - tailHandle = oplogConnection.tail(cursorDescription, function (doc) { + tailHandle = oplogTailConnection.tail(cursorDescription, function (doc) { if (!(doc.ns && doc.ns.length > dbName.length + 1 && doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) throw new Error("Unexpected ns"); From 19e41af82a42d04e9bb987ef3aa545e6174c21b1 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 16 Oct 2013 14:21:01 -0700 Subject: [PATCH 059/190] Only use one connection for oplog tailing. --- packages/mongo-livedata/mongo_driver.js | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 3c1ccbe8ce..b4b908df21 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -113,13 +113,13 @@ MongoConnection = function (url, options) { self._connectCallbacks = []; self._liveResultsSets = {}; - var mongoOptions = {db: {safe: true}}; + var mongoOptions = {db: {safe: true}, server: {}}; // Set autoReconnect to true, unless passed on the URL. Why someone // would want to set autoReconnect to false, I'm not really sure, but // keeping this for backwards compatibility for now. if (!(/[\?&]auto_?[rR]econnect=/.test(url))) { - mongoOptions.server = {auto_reconnect: true}; + mongoOptions.server.auto_reconnect = true; } // Disable the native parser by default, unless specifically enabled @@ -134,6 +134,12 @@ MongoConnection = function (url, options) { mongoOptions.db.native_parser = false; } + // XXX maybe we should have a better way of allowing users to configure the + // underlying Mongo driver + if (_.has(options, 'poolSize')) { + mongoOptions.server.poolSize = 1; + } + MongoDB.connect(url, mongoOptions, function(err, db) { if (err) throw err; @@ -332,8 +338,10 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { // passed). So if the connection pool used for tailing cursors is the same // pool used for other queries, the other queries will be delayed by seconds // 1/5 of the time. - // XXX set the pool size for oplogTailConnection to 1 - oplogTailConnection = new MongoConnection(oplogUrl); + // + // The tail connection will only ever be running a single tail command, so + // it only needs to make one underlying TCP connection. + oplogTailConnection = new MongoConnection(oplogUrl, {poolSize: 1}); oplogQueryConnection = new MongoConnection(oplogUrl); // Find the last oplog entry. Blocks until the connection is ready. From 0d45c5f0098c48133d184f2072a2fc240c716146 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 18 Oct 2013 15:10:36 -0700 Subject: [PATCH 060/190] Limit poolsize for "last oplog entry" to 1. This ensures that we allocate sequencers in increasing order. --- packages/mongo-livedata/mongo_driver.js | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index b4b908df21..c7c0e438ed 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -235,7 +235,7 @@ var quotemeta = function (str) { MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var self = this; - var oplogQueryConnection = null; + var oplogLastEntryConnection = null; var oplogTailConnection = null; var stopped = false; var tailHandle = null; @@ -299,7 +299,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { // We need to make the selector at least as restrictive as the actual // tailing selector (ie, we need to specify the DB name) or else we // might find a TS that won't show up in the actual tail stream. - var lastEntry = oplogQueryConnection.findOne( + var lastEntry = oplogLastEntryConnection.findOne( OPLOG_COLLECTION, baseOplogSelector, {sort: {$natural: -1}}); if (!lastEntry) { // Really, nothing in the oplog? Well, we've processed everything. @@ -342,10 +342,13 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { // The tail connection will only ever be running a single tail command, so // it only needs to make one underlying TCP connection. oplogTailConnection = new MongoConnection(oplogUrl, {poolSize: 1}); - oplogQueryConnection = new MongoConnection(oplogUrl); + // XXX better docs, but: it's to get monotonic results + // XXX is it safe to say "if there's an in flight query, just use its + // results"? I don't think so but should consider that + oplogLastEntryConnection = new MongoConnection(oplogUrl, {poolSize: 1}); // Find the last oplog entry. Blocks until the connection is ready. - var lastOplogEntry = oplogQueryConnection.findOne( + var lastOplogEntry = oplogLastEntryConnection.findOne( OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); var oplogSelector = _.clone(baseOplogSelector); From bcbc73bc4add9f5a61b678981c160055c6113056 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 18 Oct 2013 15:53:19 -0700 Subject: [PATCH 061/190] Add more info to the misordered oplog errors. --- packages/mongo-livedata/mongo_driver.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index c7c0e438ed..34f43a335f 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -318,7 +318,9 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { if (!_.isEmpty(pendingSequencers) && _.last(pendingSequencers).ts.greaterThan(ts)) { - throw Error("found misordered oplog"); + throw Error("found misordered oplog: " + + _.last(pendingSequencers).ts.toString() + " vs " + + ts.toString()); } pendingSequencers.push({ts: ts, From f797a49beb1837939133da6af553a39d0ce2136f Mon Sep 17 00:00:00 2001 From: David Glasser Date: Sat, 19 Oct 2013 00:01:17 -0700 Subject: [PATCH 062/190] Support oplog when MONGO_URL is a replset. Node's URL parser doesn't support Mongo's "URLs" which contain commas in the hostname part. Instead of trying to parse, just extract the database name from ... the database! --- packages/mongo-livedata/mongo_driver.js | 31 ++++++++++++++++--------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 34f43a335f..79d57128fd 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -157,9 +157,11 @@ MongoConnection = function (url, options) { self._oplogHandle = null; if (options.oplogUrl) { - // XXX this parse fails on mongo URLs with commas! - var dbName = Npm.require('url').parse(url).pathname.substr(1); - self._startOplogTailing(options.oplogUrl, dbName); + var dbNameFuture = new Future; + self._withDb(function (db) { + dbNameFuture.return(db.databaseName); + }); + self._startOplogTailing(options.oplogUrl, dbNameFuture); } }; @@ -232,7 +234,8 @@ var quotemeta = function (str) { return String(str).replace(/(\W)/g, '\\$1'); }; -MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { +MongoConnection.prototype._startOplogTailing = function (oplogUrl, + dbNameFuture) { var self = this; var oplogLastEntryConnection = null; @@ -243,11 +246,15 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var nextId = 0; var callbacksByCollection = {}; var lastProcessedTS = null; - var baseOplogSelector = { - ns: new RegExp('^' + quotemeta(dbName) + '\\.'), - // XXX also handle drop collection, etc - op: {$in: ['i', 'u', 'd']} - }; + // Lazily calculate the basic selector. Don't call baseOplogSelector() at the + // top level of this function, because we don't want this function to block. + var baseOplogSelector = _.once(function () { + return { + ns: new RegExp('^' + quotemeta(dbNameFuture.wait()) + '\\.'), + // XXX also handle drop collection, etc + op: {$in: ['i', 'u', 'd']} + }; + }); // XXX doc var pendingSequencers = []; @@ -300,7 +307,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { // tailing selector (ie, we need to specify the DB name) or else we // might find a TS that won't show up in the actual tail stream. var lastEntry = oplogLastEntryConnection.findOne( - OPLOG_COLLECTION, baseOplogSelector, {sort: {$natural: -1}}); + OPLOG_COLLECTION, baseOplogSelector(), {sort: {$natural: -1}}); if (!lastEntry) { // Really, nothing in the oplog? Well, we've processed everything. callback(); @@ -353,7 +360,9 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbName) { var lastOplogEntry = oplogLastEntryConnection.findOne( OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); - var oplogSelector = _.clone(baseOplogSelector); + var dbName = dbNameFuture.wait(); + + var oplogSelector = _.clone(baseOplogSelector()); if (lastOplogEntry) { // Start after the last entry that currently exists. oplogSelector.ts = {$gt: lastOplogEntry.ts}; From 9cc8ca6c2bbebabcaac7fff901d32ee230662c26 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Sat, 19 Oct 2013 00:24:12 -0700 Subject: [PATCH 063/190] Don't fire write fence until observes are steady. --- packages/mongo-livedata/oplog.js | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 5786b0fd6c..7f5b857b5c 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -94,7 +94,17 @@ MongoConnection.prototype._observeChangesWithOplog = function ( f.get(); }); } + beSteady(); + }; + + var writesToCommitWhenWeReachSteady = []; + var beSteady = function () { phase = PHASE.STEADY; + var writes = writesToCommitWhenWeReachSteady; + writesToCommitWhenWeReachSteady = []; + _.each(writes, function (w) { + w.committed(); + }); }; var oplogEntryHandlers = {}; @@ -163,17 +173,20 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // XXX ordering w.r.t. everything else? var listenersHandle = listenAll( cursorDescription, function (notification, complete) { - // If we're not in a write fence, we don't have to do anything. That's - // because + // If we're not in a write fence, we don't have to do anything. var fence = DDPServer._CurrentWriteFence.get(); if (!fence) { complete(); return; } var write = fence.beginWrite(); - // XXX this also has to wait for steady!!! + // This write cannot complete until we've caught up to "this point" in the + // oplog, and then made it back to the steady state. self._oplogHandle.callWhenProcessedLatest(function () { - write.committed(); + if (phase === PHASE.STEADY) + write.committed(); + else + writesToCommitWhenWeReachSteady.push(write); }); complete(); } @@ -192,7 +205,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( throw Error("Phase unexpectedly " + phase); if (curiousity.isEmpty()) { - phase = PHASE.STEADY; + beSteady(); } else { phase = PHASE.FETCHING; Meteor.defer(beCurious); From b4a598b3e523149a30bed1377369d69dbf5122eb Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 21 Oct 2013 17:53:15 -0700 Subject: [PATCH 064/190] Close connection to DB used to set up oplog. --- tools/mongo_runner.js | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/mongo_runner.js b/tools/mongo_runner.js index cc56f4e369..69500088d3 100644 --- a/tools/mongo_runner.js +++ b/tools/mongo_runner.js @@ -256,6 +256,7 @@ exports.launchMongo = function (options) { }, function (err, result) { if (err) throw err; + db.close(true); }); }); } From 6ff2c1c9ce656aec32e4e264ccf07a742bd9801d Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 21 Oct 2013 18:59:34 -0700 Subject: [PATCH 065/190] Set Mongo connection pool size better. Fixes (for now) misordered oplog errors. --- packages/mongo-livedata/mongo_driver.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 79d57128fd..84933c7f6b 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -113,7 +113,7 @@ MongoConnection = function (url, options) { self._connectCallbacks = []; self._liveResultsSets = {}; - var mongoOptions = {db: {safe: true}, server: {}}; + var mongoOptions = {db: {safe: true}, server: {}, replSet: {}}; // Set autoReconnect to true, unless passed on the URL. Why someone // would want to set autoReconnect to false, I'm not really sure, but @@ -137,7 +137,10 @@ MongoConnection = function (url, options) { // XXX maybe we should have a better way of allowing users to configure the // underlying Mongo driver if (_.has(options, 'poolSize')) { - mongoOptions.server.poolSize = 1; + // If we just set this for "server", replSet will override it. If we just + // set it for replSet, it will be ignored if we're not using a replSet. + mongoOptions.server.poolSize = options.poolSize; + mongoOptions.replSet.poolSize = options.poolSize; } MongoDB.connect(url, mongoOptions, function(err, db) { From 3960fcc96393266bf6eeed998ff0e197ac42a338 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 21 Oct 2013 19:00:02 -0700 Subject: [PATCH 066/190] More usable error messages for misordered oplog. --- packages/mongo-livedata/mongo_driver.js | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 84933c7f6b..dd36270299 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -237,6 +237,10 @@ var quotemeta = function (str) { return String(str).replace(/(\W)/g, '\\$1'); }; +var showTS = function (ts) { + return "Timestamp(" + ts.getHighBits() + ", " + ts.getLowBits() + ")"; +}; + MongoConnection.prototype._startOplogTailing = function (oplogUrl, dbNameFuture) { var self = this; @@ -329,8 +333,8 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, if (!_.isEmpty(pendingSequencers) && _.last(pendingSequencers).ts.greaterThan(ts)) { throw Error("found misordered oplog: " - + _.last(pendingSequencers).ts.toString() + " vs " - + ts.toString()); + + showTS(_.last(pendingSequencers).ts) + " vs " + + showTS(ts)); } pendingSequencers.push({ts: ts, From 2f1ebbbcc7c82857d9ece9039902e9faa3196678 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 22 Oct 2013 00:27:08 -0700 Subject: [PATCH 067/190] Restructure code to "allow" misordered sequencers. Throw in an assertion that they aren't misordered anyway. This will be removed before merge. But it should never fire for a singleton replset. --- packages/mongo-livedata/mongo_driver.js | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index dd36270299..d1f9ea0516 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -330,15 +330,25 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, return; } - if (!_.isEmpty(pendingSequencers) - && _.last(pendingSequencers).ts.greaterThan(ts)) { + var insertAfter = pendingSequencers.length; + while (insertAfter - 1 > 0 + && pendingSequencers[insertAfter - 1].ts.greaterThan(ts)) { + insertAfter--; + } + + // XXX this can occur if we fail over from one primary to another. so + // this check needs to be removed before we merge oplog. that said, it + // has been helpful so far at proving that we are properly using + // poolSize 1. Also, we could keep something like it if we could + // actually detect failover; see + // https://github.com/mongodb/node-mongodb-native/issues/1120 + if (insertAfter !== pendingSequencers.length) { throw Error("found misordered oplog: " + showTS(_.last(pendingSequencers).ts) + " vs " + showTS(ts)); } - pendingSequencers.push({ts: ts, - callback: callback}); + pendingSequencers.splice(insertAfter, 0, {ts: ts, callback: callback}); }).run(); } }; From 24913f862b3d03024feef8c5905d3a1dd6d5ba95 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 22 Oct 2013 00:28:35 -0700 Subject: [PATCH 068/190] When FETCHING, process add/delete/replace directly Unless they are for one of the currently-fetching IDs. Note that this can reorder observes! But so can poll-based observe. That said, I think it can reorder them in a more noticable way. Specifically, with poll-based observe, if you observe two operations A1 and A2 on a doc A, then you are guaranteed to have seen any operation on any document that occured before A1. This is not the case after this commit for oplog-based observe. (Not sure if it was before this commit...) --- packages/mongo-livedata/oplog.js | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 7f5b857b5c..1bf3f67fd0 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -29,6 +29,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // XXX eliminate "curious" name var curiousity = new IdMap; + var currentlyFetching = new IdMap; var add = function (doc) { var id = doc._id; @@ -73,7 +74,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( throw new Error("Surprising phase in beCurious: " + phase); var futures = []; - var currentlyFetching = curiousity; + currentlyFetching = curiousity; curiousity = new IdMap; currentlyFetching.each(function (cacheKey, id) { // Run each until they yield. This implies that curiousity should not be @@ -93,6 +94,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( _.each(futures, function (f) { f.get(); }); + currentlyFetching = new IdMap; } beSteady(); }; @@ -112,12 +114,23 @@ MongoConnection.prototype._observeChangesWithOplog = function ( curiousity.set(idForOp(op), op.ts.toString()); }; oplogEntryHandlers[PHASE.FETCHING] = function (op) { - // XXX we can probably actually handle some operations directly (eg, - // insert/remove/replace if they don't conflict with "outstanding" fetches) - curiousity.set(idForOp(op), op.ts.toString()); + var id = idForOp(op); + // We can handle non-modify changes to things that we aren't fetching, + // directly. }; + // We can use the same handler for STEADY and FETCHING; the main difference is + // that FETCHING has non-empty currentlyFetching and/or curiousity. oplogEntryHandlers[PHASE.STEADY] = function (op) { var id = idForOp(op); + // If we're already fetching this one, or about to, we can't optimize; make + // sure that we fetch it again if necessary. + if (currentlyFetching.has(id) || curiousity.has(id)) { + if (phase !== PHASE.FETCHING) + throw Error("map not empty during steady phase"); + curiousity.set(id, op.ts.toString()); + return; + } + if (op.op === 'd') { if (published.has(id)) remove(id); @@ -155,13 +168,15 @@ MongoConnection.prototype._observeChangesWithOplog = function ( } curiousity.set(id, op.ts.toString()); - beCurious(); + if (phase === PHASE.STEADY) + beCurious(); return; } } else { throw Error("XXX SURPRISING OPERATION: " + op); } }; + oplogEntryHandlers[PHASE.FETCHING] = oplogEntryHandlers[PHASE.STEADY]; var oplogHandle = self._oplogHandle.onOplogEntry( From 2f5ed9a3d336a24a7ae5fa6199c4b892f12c9eb9 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 22 Oct 2013 00:42:14 -0700 Subject: [PATCH 069/190] Stop using weird "curiousity" nomenclature. --- packages/mongo-livedata/oplog.js | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 1bf3f67fd0..795b5e32ea 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -27,8 +27,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var published = new IdMap; var selector = LocalCollection._compileSelector(cursorDescription.selector); - // XXX eliminate "curious" name - var curiousity = new IdMap; + var needToFetch = new IdMap; var currentlyFetching = new IdMap; var add = function (doc) { @@ -67,17 +66,17 @@ MongoConnection.prototype._observeChangesWithOplog = function ( } }; - var beCurious = function () { + var fetchModifiedDocuments = function () { phase = PHASE.FETCHING; - while (!curiousity.isEmpty()) { + while (!needToFetch.isEmpty()) { if (phase !== PHASE.FETCHING) - throw new Error("Surprising phase in beCurious: " + phase); + throw new Error("Surprising phase in fetchModifiedDocuments: " + phase); var futures = []; - currentlyFetching = curiousity; - curiousity = new IdMap; + currentlyFetching = needToFetch; + needToFetch = new IdMap; currentlyFetching.each(function (cacheKey, id) { - // Run each until they yield. This implies that curiousity should not be + // Run each until they yield. This implies that needToFetch will not be // updated during this loop. Fiber(function () { var f = new Future; @@ -111,7 +110,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var oplogEntryHandlers = {}; oplogEntryHandlers[PHASE.INITIALIZING] = function (op) { - curiousity.set(idForOp(op), op.ts.toString()); + needToFetch.set(idForOp(op), op.ts.toString()); }; oplogEntryHandlers[PHASE.FETCHING] = function (op) { var id = idForOp(op); @@ -119,15 +118,15 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // directly. }; // We can use the same handler for STEADY and FETCHING; the main difference is - // that FETCHING has non-empty currentlyFetching and/or curiousity. + // that FETCHING has non-empty currentlyFetching and/or needToFetch. oplogEntryHandlers[PHASE.STEADY] = function (op) { var id = idForOp(op); // If we're already fetching this one, or about to, we can't optimize; make // sure that we fetch it again if necessary. - if (currentlyFetching.has(id) || curiousity.has(id)) { + if (currentlyFetching.has(id) || needToFetch.has(id)) { if (phase !== PHASE.FETCHING) throw Error("map not empty during steady phase"); - curiousity.set(id, op.ts.toString()); + needToFetch.set(id, op.ts.toString()); return; } @@ -167,9 +166,9 @@ MongoConnection.prototype._observeChangesWithOplog = function ( return; } - curiousity.set(id, op.ts.toString()); + needToFetch.set(id, op.ts.toString()); if (phase === PHASE.STEADY) - beCurious(); + fetchModifiedDocuments(); return; } } else { @@ -219,11 +218,11 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (phase !== PHASE.INITIALIZING) throw Error("Phase unexpectedly " + phase); - if (curiousity.isEmpty()) { + if (needToFetch.isEmpty()) { beSteady(); } else { phase = PHASE.FETCHING; - Meteor.defer(beCurious); + Meteor.defer(fetchModifiedDocuments); } return { From c7329ff617356e73e8847e3bca39031f414a1a83 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 22 Oct 2013 16:24:54 -0700 Subject: [PATCH 070/190] Extra error checking around calling callbacks. --- packages/mongo-livedata/oplog.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 795b5e32ea..f021b0ec81 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -34,11 +34,15 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var id = doc._id; var fields = EJSON.clone(doc); delete fields._id; + if (published.has(id)) + throw Error("tried to add something already published " + id); published.set(id, fields); callbacks.added && callbacks.added(id, EJSON.clone(fields)); }; var remove = function (id) { + if (!published.has(id)) + throw Error("tried to remove something unpublished " + id); published.remove(id); callbacks.removed && callbacks.removed(id); }; From 2d170a09712942cbff3ed6071b6c8ea859989f16 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 22 Oct 2013 23:16:52 -0700 Subject: [PATCH 071/190] Add some facts to oplog. --- packages/mongo-livedata/mongo_driver.js | 4 ++++ packages/mongo-livedata/oplog.js | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index d1f9ea0516..ef4f0796d3 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -287,10 +287,14 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, if (!_.has(callbacksByCollection, collectionName)) callbacksByCollection[collectionName] = {}; var callbackId = nextId++; + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "oplog-watchers", 1); callbacksByCollection[collectionName][callbackId] = callback; return { stop: function () { delete callbacksByCollection[collectionName][callbackId]; + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "oplog-watchers", -1); } }; }, diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index f021b0ec81..69de5899c5 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -22,6 +22,9 @@ MongoConnection.prototype._observeChangesWithOplog = function ( cursorDescription, callbacks) { var self = this; + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "oplog-observers", 1); + var phase = PHASE.INITIALIZING; var published = new IdMap; @@ -233,6 +236,8 @@ MongoConnection.prototype._observeChangesWithOplog = function ( stop: function () { listenersHandle.stop(); oplogHandle.stop(); + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "oplog-observers", -1); } }; }; From 5aecf9cbf4629367b45b1fa1a29a1cd61de3ddc0 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 24 Oct 2013 20:09:30 -0700 Subject: [PATCH 072/190] don't use fibers in callWhenProcessedLatest --- packages/mongo-livedata/mongo_driver.js | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index ef4f0796d3..13b2fe5ad3 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -312,13 +312,16 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, // ready. readyFuture.wait(); - // Except for during startup, we DON'T block. - Fiber(function () { - // We need to make the selector at least as restrictive as the actual - // tailing selector (ie, we need to specify the DB name) or else we - // might find a TS that won't show up in the actual tail stream. - var lastEntry = oplogLastEntryConnection.findOne( - OPLOG_COLLECTION, baseOplogSelector(), {sort: {$natural: -1}}); + var coll = oplogLastEntryConnection._getCollection(OPLOG_COLLECTION); + // We need to make the selector at least as restrictive as the actual + // tailing selector (ie, we need to specify the DB name) or else we + // might find a TS that won't show up in the actual tail stream. + coll.findOne(baseOplogSelector(), {sort: {$natural: -1}}, function (err, lastEntry) { + if (err) { + console.log("OH NO ERROR", err); + return; + } + if (!lastEntry) { // Really, nothing in the oplog? Well, we've processed everything. callback(); @@ -353,7 +356,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, } pendingSequencers.splice(insertAfter, 0, {ts: ts, callback: callback}); - }).run(); + }); } }; From 2e5f96a8675623817d4ee18607eee988f24b29f7 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 24 Oct 2013 17:04:04 -0700 Subject: [PATCH 073/190] clear lots of vars on oplog stop also don't publish facts by default --- packages/facts/facts.js | 2 +- packages/mongo-livedata/oplog.js | 25 +++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/packages/facts/facts.js b/packages/facts/facts.js index 07acf1a5c4..ca1791bb74 100644 --- a/packages/facts/facts.js +++ b/packages/facts/facts.js @@ -63,7 +63,7 @@ if (Meteor.isServer) { }); } else { Facts.server = new Meteor.Collection(serverFactsCollection); - Meteor.subscribe("facts"); + // Meteor.subscribe("facts"); Template.serverFacts.factsByPackage = function () { return Facts.server.find(); diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 69de5899c5..4ac5e5a46c 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -22,6 +22,8 @@ MongoConnection.prototype._observeChangesWithOplog = function ( cursorDescription, callbacks) { var self = this; + var stopped = false; + Package.facts && Package.facts.Facts.incrementServerFact( "mongo-livedata", "oplog-observers", 1); @@ -75,7 +77,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var fetchModifiedDocuments = function () { phase = PHASE.FETCHING; - while (!needToFetch.isEmpty()) { + while (!stopped && !needToFetch.isEmpty()) { if (phase !== PHASE.FETCHING) throw new Error("Surprising phase in fetchModifiedDocuments: " + phase); @@ -90,7 +92,8 @@ MongoConnection.prototype._observeChangesWithOplog = function ( futures.push(f); var doc = self._docFetcher.fetch(cursorDescription.collectionName, id, cacheKey); - handleDoc(id, doc); + if (!stopped) + handleDoc(id, doc); f.return(); }).run(); }); @@ -234,8 +237,26 @@ MongoConnection.prototype._observeChangesWithOplog = function ( return { stop: function () { + if (stopped) + return; + stopped = true; listenersHandle.stop(); oplogHandle.stop(); + + published = null; + selector = null; + needToFetch = null; + currentlyFetching = null; + + _.each(writesToCommitWhenWeReachSteady, function (w) { + w.committed(); + }); + writesToCommitWhenWeReachSteady = null; + + oplogHandle = null; + listenersHandle = null; + initialCursor = null; + Package.facts && Package.facts.Facts.incrementServerFact( "mongo-livedata", "oplog-observers", -1); } From 062b475134c69f6878e8348a5fd37328bbc32311 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 24 Oct 2013 20:09:21 -0700 Subject: [PATCH 074/190] don't die --- packages/mongo-livedata/oplog.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 4ac5e5a46c..01b34e520c 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -207,7 +207,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // This write cannot complete until we've caught up to "this point" in the // oplog, and then made it back to the steady state. self._oplogHandle.callWhenProcessedLatest(function () { - if (phase === PHASE.STEADY) + if (stopped || phase === PHASE.STEADY) write.committed(); else writesToCommitWhenWeReachSteady.push(write); From 60f7aa75cde8d968f3707e9cc3c876935bfd239f Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 25 Oct 2013 00:20:34 -0700 Subject: [PATCH 075/190] a few random notes --- packages/mongo-livedata/mongo_driver.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 13b2fe5ad3..851e7c05be 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -272,6 +272,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, stopped = true; if (tailHandle) tailHandle.stop(); + // XXX should close connections too }, onOplogEntry: function (collectionName, callback) { @@ -316,9 +317,11 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, // We need to make the selector at least as restrictive as the actual // tailing selector (ie, we need to specify the DB name) or else we // might find a TS that won't show up in the actual tail stream. - coll.findOne(baseOplogSelector(), {sort: {$natural: -1}}, function (err, lastEntry) { + coll.findOne(baseOplogSelector(), {fields: {ts: 1}, sort: {$natural: -1}}, function (err, lastEntry) { if (err) { - console.log("OH NO ERROR", err); + console.log("OH NO ERROR", err) + // call callback anyway, I guess + callback(); return; } From 70a28a62290adfd305cf4fa09d548bf2a9091d42 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 25 Oct 2013 00:29:37 -0700 Subject: [PATCH 076/190] better way of not using more fibers --- packages/mongo-livedata/mongo_driver.js | 81 ++++++++++++------------- packages/mongo-livedata/oplog.js | 17 +++--- 2 files changed, 46 insertions(+), 52 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 851e7c05be..16bc502549 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -305,61 +305,58 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, // currently visible. // XXX become convinced that this is actually safe even if oplogConnection // is some kind of pool - callWhenProcessedLatest: function (callback) { + waitUntilProcessedLatest: function () { if (stopped) - throw new Error("Called callWhenProcessedLatest on stopped handle!"); + throw new Error("Called waitUntilProcessedLatest on stopped handle!"); // Calling onOplogEntry requries us to wait for the oplog connection to be // ready. readyFuture.wait(); - var coll = oplogLastEntryConnection._getCollection(OPLOG_COLLECTION); // We need to make the selector at least as restrictive as the actual // tailing selector (ie, we need to specify the DB name) or else we // might find a TS that won't show up in the actual tail stream. - coll.findOne(baseOplogSelector(), {fields: {ts: 1}, sort: {$natural: -1}}, function (err, lastEntry) { - if (err) { - console.log("OH NO ERROR", err) - // call callback anyway, I guess - callback(); - return; - } + // + // We don't want to block here: the whole point is to call callback + // asynchronously! + var lastEntry = oplogLastEntryConnection.findOne( + OPLOG_COLLECTION, baseOplogSelector(), + {fields: {ts: 1}, sort: {$natural: -1}}); - if (!lastEntry) { - // Really, nothing in the oplog? Well, we've processed everything. - callback(); - return; - } - var ts = lastEntry.ts; - if (!ts) - throw Error("oplog entry without ts: " + EJSON.stringify(lastEntry)); + if (!lastEntry) { + // Really, nothing in the oplog? Well, we've processed everything. + return; + } - if (lastProcessedTS && ts.lessThanOrEqual(lastProcessedTS)) { - // We've already caught up to here. - callback(); - return; - } + var ts = lastEntry.ts; + if (!ts) + throw Error("oplog entry without ts: " + EJSON.stringify(lastEntry)); - var insertAfter = pendingSequencers.length; - while (insertAfter - 1 > 0 - && pendingSequencers[insertAfter - 1].ts.greaterThan(ts)) { - insertAfter--; - } + if (lastProcessedTS && ts.lessThanOrEqual(lastProcessedTS)) { + // We've already caught up to here. + return; + } - // XXX this can occur if we fail over from one primary to another. so - // this check needs to be removed before we merge oplog. that said, it - // has been helpful so far at proving that we are properly using - // poolSize 1. Also, we could keep something like it if we could - // actually detect failover; see - // https://github.com/mongodb/node-mongodb-native/issues/1120 - if (insertAfter !== pendingSequencers.length) { - throw Error("found misordered oplog: " - + showTS(_.last(pendingSequencers).ts) + " vs " - + showTS(ts)); - } + var insertAfter = pendingSequencers.length; + while (insertAfter - 1 > 0 + && pendingSequencers[insertAfter - 1].ts.greaterThan(ts)) { + insertAfter--; + } - pendingSequencers.splice(insertAfter, 0, {ts: ts, callback: callback}); - }); + // XXX this can occur if we fail over from one primary to another. so + // this check needs to be removed before we merge oplog. that said, it + // has been helpful so far at proving that we are properly using + // poolSize 1. Also, we could keep something like it if we could + // actually detect failover; see + // https://github.com/mongodb/node-mongodb-native/issues/1120 + if (insertAfter !== pendingSequencers.length) { + throw Error("found misordered oplog: " + + showTS(_.last(pendingSequencers).ts) + " vs " + + showTS(ts)); + } + var f = new Future; + pendingSequencers.splice(insertAfter, 0, {ts: ts, future: f}); + f.wait(); } }; @@ -420,7 +417,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, while (!_.isEmpty(pendingSequencers) && pendingSequencers[0].ts.lessThanOrEqual(lastProcessedTS)) { var sequencer = pendingSequencers.shift(); - sequencer.callback(); + sequencer.future.return(); } }); readyFuture.return(); diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 01b34e520c..5796600c66 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -206,13 +206,12 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var write = fence.beginWrite(); // This write cannot complete until we've caught up to "this point" in the // oplog, and then made it back to the steady state. - self._oplogHandle.callWhenProcessedLatest(function () { - if (stopped || phase === PHASE.STEADY) - write.committed(); - else - writesToCommitWhenWeReachSteady.push(write); - }); - complete(); + Meteor.defer(complete); + self._oplogHandle.waitUntilProcessedLatest(); + if (stopped || phase === PHASE.STEADY) + write.committed(); + else + writesToCommitWhenWeReachSteady.push(write); } ); @@ -221,9 +220,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( add(initialDoc); }); - var catchUpFuture = new Future; - self._oplogHandle.callWhenProcessedLatest(catchUpFuture.resolver()); - catchUpFuture.wait(); + self._oplogHandle.waitUntilProcessedLatest(); if (phase !== PHASE.INITIALIZING) throw Error("Phase unexpectedly " + phase); From a5d805e63307227ad880d95b37addf32f68805fd Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 25 Oct 2013 14:54:00 -0700 Subject: [PATCH 077/190] delete dead code --- packages/mongo-livedata/oplog.js | 5 ----- 1 file changed, 5 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 5796600c66..26155b8a1a 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -122,11 +122,6 @@ MongoConnection.prototype._observeChangesWithOplog = function ( oplogEntryHandlers[PHASE.INITIALIZING] = function (op) { needToFetch.set(idForOp(op), op.ts.toString()); }; - oplogEntryHandlers[PHASE.FETCHING] = function (op) { - var id = idForOp(op); - // We can handle non-modify changes to things that we aren't fetching, - // directly. - }; // We can use the same handler for STEADY and FETCHING; the main difference is // that FETCHING has non-empty currentlyFetching and/or needToFetch. oplogEntryHandlers[PHASE.STEADY] = function (op) { From 2fb5e6601065b9b79f041c5fea999da3c546391c Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 25 Oct 2013 15:45:51 -0700 Subject: [PATCH 078/190] Some renames and minor fixes. --- packages/mongo-livedata/mongo_driver.js | 26 +++++++++++-------------- packages/mongo-livedata/oplog.js | 10 +++++----- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 16bc502549..3a9f2d7945 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -263,7 +263,7 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, }; }); // XXX doc - var pendingSequencers = []; + var catchingUpFutures = []; self._oplogHandle = { stop: function () { @@ -305,9 +305,9 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, // currently visible. // XXX become convinced that this is actually safe even if oplogConnection // is some kind of pool - waitUntilProcessedLatest: function () { + waitUntilCaughtUp: function () { if (stopped) - throw new Error("Called waitUntilProcessedLatest on stopped handle!"); + throw new Error("Called waitUntilCaughtUp on stopped handle!"); // Calling onOplogEntry requries us to wait for the oplog connection to be // ready. @@ -316,9 +316,6 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, // We need to make the selector at least as restrictive as the actual // tailing selector (ie, we need to specify the DB name) or else we // might find a TS that won't show up in the actual tail stream. - // - // We don't want to block here: the whole point is to call callback - // asynchronously! var lastEntry = oplogLastEntryConnection.findOne( OPLOG_COLLECTION, baseOplogSelector(), {fields: {ts: 1}, sort: {$natural: -1}}); @@ -337,9 +334,9 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, return; } - var insertAfter = pendingSequencers.length; + var insertAfter = catchingUpFutures.length; while (insertAfter - 1 > 0 - && pendingSequencers[insertAfter - 1].ts.greaterThan(ts)) { + && catchingUpFutures[insertAfter - 1].ts.greaterThan(ts)) { insertAfter--; } @@ -349,13 +346,13 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, // poolSize 1. Also, we could keep something like it if we could // actually detect failover; see // https://github.com/mongodb/node-mongodb-native/issues/1120 - if (insertAfter !== pendingSequencers.length) { + if (insertAfter !== catchingUpFutures.length) { throw Error("found misordered oplog: " - + showTS(_.last(pendingSequencers).ts) + " vs " + + showTS(_.last(catchingUpFutures).ts) + " vs " + showTS(ts)); } var f = new Future; - pendingSequencers.splice(insertAfter, 0, {ts: ts, future: f}); + catchingUpFutures.splice(insertAfter, 0, {ts: ts, future: f}); f.wait(); } }; @@ -414,9 +411,9 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, if (!doc.ts) throw Error("oplog entry without ts: " + EJSON.stringify(doc)); lastProcessedTS = doc.ts; - while (!_.isEmpty(pendingSequencers) - && pendingSequencers[0].ts.lessThanOrEqual(lastProcessedTS)) { - var sequencer = pendingSequencers.shift(); + while (!_.isEmpty(catchingUpFutures) + && catchingUpFutures[0].ts.lessThanOrEqual(lastProcessedTS)) { + var sequencer = catchingUpFutures.shift(); sequencer.future.return(); } }); @@ -1098,7 +1095,6 @@ MongoConnection.prototype.tail = function (cursorDescription, docCallback) { if (lastTS) { newSelector.ts = {$gt: lastTS}; } - // XXX maybe set replay flag cursor = self._createSynchronousCursor(new CursorDescription( cursorDescription.collectionName, newSelector, diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 26155b8a1a..523e3bb813 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -183,7 +183,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( oplogEntryHandlers[PHASE.FETCHING] = oplogEntryHandlers[PHASE.STEADY]; - var oplogHandle = self._oplogHandle.onOplogEntry( + var oplogEntryHandle = self._oplogHandle.onOplogEntry( cursorDescription.collectionName, function (op) { oplogEntryHandlers[phase](op); } @@ -202,7 +202,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // This write cannot complete until we've caught up to "this point" in the // oplog, and then made it back to the steady state. Meteor.defer(complete); - self._oplogHandle.waitUntilProcessedLatest(); + self._oplogHandle.waitUntilCaughtUp(); if (stopped || phase === PHASE.STEADY) write.committed(); else @@ -215,7 +215,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( add(initialDoc); }); - self._oplogHandle.waitUntilProcessedLatest(); + self._oplogHandle.waitUntilCaughtUp(); if (phase !== PHASE.INITIALIZING) throw Error("Phase unexpectedly " + phase); @@ -233,7 +233,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( return; stopped = true; listenersHandle.stop(); - oplogHandle.stop(); + oplogEntryHandle.stop(); published = null; selector = null; @@ -245,7 +245,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( }); writesToCommitWhenWeReachSteady = null; - oplogHandle = null; + oplogEntryHandle = null; listenersHandle = null; initialCursor = null; From 81c23f4baab1a09833391a1c8821948a6099f248 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 25 Oct 2013 17:25:19 -0700 Subject: [PATCH 079/190] minor refactor and add XXX suggestion --- packages/mongo-livedata/oplog.js | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 523e3bb813..b6c00717e7 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -163,14 +163,13 @@ MongoConnection.prototype._observeChangesWithOplog = function ( newDoc._id = id; LocalCollection._modify(newDoc, op.o); handleDoc(id, newDoc); - } else { - // If the selector is not affected by the modifier, no need to do - // anything! - if (!LocalCollection._isSelectorAffectedByModifier( + } else if (LocalCollection._isSelectorAffectedByModifier( cursorDescription.selector, op.o)) { - return; - } - + // XXX _isSelectorAffectedByModifier should actually be + // _canModifierChangeSelectorToTrue. because {x: 9} is affected by + // {$set: {x: 7}} but not in a way that is relevant here, because either + // x was already 9 (and this was handled by the previous clause), or x + // was not 9 and this isn't going to affect the selector needToFetch.set(id, op.ts.toString()); if (phase === PHASE.STEADY) fetchModifiedDocuments(); From 74b4bd2bd6ecded6de90ed4f6ce2f2938ddc40c9 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 25 Oct 2013 17:26:11 -0700 Subject: [PATCH 080/190] Make DocFetcher more async. This should use fewer fibers. nim, can you benchmark? --- packages/mongo-livedata/doc_fetcher.js | 65 ++++++++++---------- packages/mongo-livedata/doc_fetcher_tests.js | 62 +++++++++---------- packages/mongo-livedata/oplog.js | 43 +++++++------ 3 files changed, 86 insertions(+), 84 deletions(-) diff --git a/packages/mongo-livedata/doc_fetcher.js b/packages/mongo-livedata/doc_fetcher.js index a33eabc755..cdcbfb9cd2 100644 --- a/packages/mongo-livedata/doc_fetcher.js +++ b/packages/mongo-livedata/doc_fetcher.js @@ -1,20 +1,21 @@ +var Fiber = Npm.require('fibers'); var Future = Npm.require('fibers/future'); DocFetcher = function (mongoConnection) { var self = this; self._mongoConnection = mongoConnection; - // Map from cache key -> [Future] - self._futuresForCacheKey = {}; + // Map from cache key -> [callback] + self._callbacksForCacheKey = {}; }; _.extend(DocFetcher.prototype, { // Fetches document "id" from collectionName, returning it or null if not - // found. Throws other errors. Can yield. + // found. // // If you make multiple calls to fetch() with the same cacheKey (a string), // DocFetcher may assume that they all return the same document. (It does // not check to see if collectionName/id match.) - fetch: function (collectionName, id, cacheKey) { + fetch: function (collectionName, id, cacheKey, callback) { var self = this; check(collectionName, String); @@ -23,38 +24,36 @@ _.extend(DocFetcher.prototype, { // If there's already an in-progress fetch for this cache key, yield until // it's done and return whatever it returns. - if (_.has(self._futuresForCacheKey, cacheKey)) { - var f = new Future; - self._futuresForCacheKey[cacheKey].push(f); - return f.wait(); + if (_.has(self._callbacksForCacheKey, cacheKey)) { + self._callbacksForCacheKey[cacheKey].push(callback); + return; } - var futures = self._futuresForCacheKey[cacheKey] = []; + var callbacks = self._callbacksForCacheKey[cacheKey] = [callback]; - try { - var doc = self._mongoConnection.findOne( - collectionName, {_id: id}) || null; - // Return doc to all fibers that are blocking on us. Note that this array - // can continue to grow during calls to Future.return. - while (!_.isEmpty(futures)) { - // Clone the document so that the various calls to fetch don't return - // objects that are intertwingled with each other. Clone before popping - // the future, so that if clone throws, the error gets thrown to the - // next future instead of that fiber hanging. - var clonedDoc = EJSON.clone(doc); - futures.pop().return(clonedDoc); + Fiber(function () { + try { + var doc = self._mongoConnection.findOne( + collectionName, {_id: id}) || null; + // Return doc to all relevant callbacks. Note that this array can + // continue to grow during callback excecution. + while (!_.isEmpty(callbacks)) { + // Clone the document so that the various calls to fetch don't return + // objects that are intertwingled with each other. Clone before + // popping the future, so that if clone throws, the error gets passed + // to the next callback. + var clonedDoc = EJSON.clone(doc); + callbacks.pop()(null, clonedDoc); + } + } catch (e) { + while (!_.isEmpty(callbacks)) { + callbacks.pop()(e); + } + } finally { + // XXX consider keeping the doc around for a period of time before + // removing from the cache + delete self._callbacksForCacheKey[cacheKey]; } - } catch (e) { - while (!_.isEmpty(futures)) { - futures.pop().throw(e); - } - throw e; - } finally { - // XXX consider keeping the doc around for a period of time before - // removing from the cache - delete self._futuresForCacheKey[cacheKey]; - } - - return doc; + }).run(); } }); diff --git a/packages/mongo-livedata/doc_fetcher_tests.js b/packages/mongo-livedata/doc_fetcher_tests.js index cf4e05a8d0..c2affe7b17 100644 --- a/packages/mongo-livedata/doc_fetcher_tests.js +++ b/packages/mongo-livedata/doc_fetcher_tests.js @@ -1,38 +1,38 @@ var Fiber = Npm.require('fibers'); var Future = Npm.require('fibers/future'); -Tinytest.add("mongo-livedata - doc fetcher", function (test) { - var collName = "docfetcher-" + Random.id(); - var collection = new Meteor.Collection(collName); - var id1 = collection.insert({x: 1}); - var id2 = collection.insert({y: 2}); +testAsyncMulti("mongo-livedata - doc fetcher", [ + function (test, expect) { + var self = this; + var collName = "docfetcher-" + Random.id(); + var collection = new Meteor.Collection(collName); + var id1 = collection.insert({x: 1}); + var id2 = collection.insert({y: 2}); - var fetcher = new MongoTest.DocFetcher( - MongoInternals.defaultRemoteCollectionDriver().mongo); + var fetcher = new MongoTest.DocFetcher( + MongoInternals.defaultRemoteCollectionDriver().mongo); - // Test basic operation. - test.equal(fetcher.fetch(collName, id1, Random.id()), - {_id: id1, x: 1}); - test.equal(fetcher.fetch(collName, "nonexistent!", Random.id()), null); + // Test basic operation. + fetcher.fetch(collName, id1, Random.id(), expect(null, {_id: id1, x: 1})); + fetcher.fetch(collName, "nonexistent!", Random.id(), expect(null, null)); - var future = new Future; - var fetched = false; - var cacheKey = Random.id(); - Fiber(function () { - var d = fetcher.fetch(collName, id2, cacheKey); - fetched = true; - future.return(d); - }).run(); - // The fetcher yields: - test.isFalse(fetched); + var fetched = false; + var cacheKey = Random.id(); + var expected = {_id: id2, y: 2}; + fetcher.fetch(collName, id2, cacheKey, expect(function (e, d) { + fetched = true; + test.isFalse(e); + test.equal(d, expected); + })); + // The fetcher yields. + test.isFalse(fetched); - // Now ask for another document with the same cache key. Because a fetch for - // that cache key is in flight, we will get the other fetch's document, not - // this random document. - var doc2a = fetcher.fetch(collName, Random.id(), cacheKey); - // Finally, wait for the original fetch to return: - var doc2b = future.wait(); - var expected = {_id: id2, y: 2}; - test.equal(doc2a, expected); - test.equal(doc2b, expected); -}); + // Now ask for another document with the same cache key. Because a fetch for + // that cache key is in flight, we will get the other fetch's document, not + // this random document. + fetcher.fetch(collName, Random.id(), cacheKey, expect(function (e, d) { + test.isFalse(e); + test.equal(d, expected); + })); + } +]); diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index b6c00717e7..37e200b6da 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -81,28 +81,31 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (phase !== PHASE.FETCHING) throw new Error("Surprising phase in fetchModifiedDocuments: " + phase); - var futures = []; currentlyFetching = needToFetch; needToFetch = new IdMap; - currentlyFetching.each(function (cacheKey, id) { - // Run each until they yield. This implies that needToFetch will not be - // updated during this loop. - Fiber(function () { - var f = new Future; - futures.push(f); - var doc = self._docFetcher.fetch(cursorDescription.collectionName, id, - cacheKey); - if (!stopped) - handleDoc(id, doc); - f.return(); - }).run(); - }); - Future.wait(futures); - // Throw if any throw. - // XXX this means the observe will now be stalled - _.each(futures, function (f) { - f.get(); - }); + var waiting = 0; + var error = null; + var fut = new Future; + Fiber(function () { + currentlyFetching.each(function (cacheKey, id) { + // currentlyFetching will not be updated during this loop. + waiting++; + self._docFetcher.fetch(cursorDescription.collectionName, id, cacheKey, function (err, doc) { + if (err) { + if (!error) + error = err; + } else if (!stopped) { + handleDoc(id, doc); + } + waiting--; + if (waiting == 0) + fut.return(); + }); + }); + }).run(); + fut.wait(); + if (error) + throw error; currentlyFetching = new IdMap; } beSteady(); From adb46face09b979e4a0c72e236ec1b7baad8bc56 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 28 Oct 2013 14:44:27 -0700 Subject: [PATCH 081/190] Basic handling for fields in oplog Just run through projection before calling callbacks or keeping fields in published IdMap, or diffing. --- packages/mongo-livedata/mongo_driver.js | 5 ----- packages/mongo-livedata/oplog.js | 9 +++++++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 3a9f2d7945..b749682988 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1562,11 +1562,6 @@ var cursorSupportedByOplogTailing = function (cursorDescription) { // First, check the options. var options = cursorDescription.options; - // We don't yet implement field filtering for oplog tailing (just because it's - // not implemented, not because there's a deep problem with implementing it). - // XXX Implementing field filtering should be a priority. - if (options.fields) return false; - // This option (which are mostly used for sorted cursors) require us to figure // out where a given document fits in an order to know if it's included or // not, and we don't track that information when doing oplog tailing. diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 37e200b6da..07706cd1dd 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -31,13 +31,16 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var published = new IdMap; var selector = LocalCollection._compileSelector(cursorDescription.selector); + var projection = cursorDescription.options.fields ? + LocalCollection._compileProjection(cursorDescription.options.fields) : + EJSON.clone; var needToFetch = new IdMap; var currentlyFetching = new IdMap; var add = function (doc) { var id = doc._id; - var fields = EJSON.clone(doc); + var fields = projection(doc); delete fields._id; if (published.has(id)) throw Error("tried to add something already published " + id); @@ -52,8 +55,10 @@ MongoConnection.prototype._observeChangesWithOplog = function ( callbacks.removed && callbacks.removed(id); }; - // XXX mutates newDoc, that's weird + // XXX it doesn't mutate newDoc anymore since we apply projection function but + // be careful refactoring and moving out projection. var handleDoc = function (id, newDoc) { + newDoc = projection(newDoc); var matchesNow = newDoc && selector(newDoc); var matchedBefore = published.has(id); if (matchesNow && !matchedBefore) { From 111de8d803ac416f392ab350975f13dcb593eaf4 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 28 Oct 2013 14:45:29 -0700 Subject: [PATCH 082/190] Oplog observe handles 'drop collection' or db.c.drop() --- packages/mongo-livedata/mongo_driver.js | 11 +++++++++-- packages/mongo-livedata/oplog.js | 15 ++++++++++++++- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index b749682988..a9f549f923 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -258,8 +258,10 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, var baseOplogSelector = _.once(function () { return { ns: new RegExp('^' + quotemeta(dbNameFuture.wait()) + '\\.'), - // XXX also handle drop collection, etc - op: {$in: ['i', 'u', 'd']} + $or: [ + { op: {$in: ['i', 'u', 'd']} }, + // If it is not db.collection.drop(), ignore it + { op: 'c', 'o.drop': { $exists: true } }] }; }); // XXX doc @@ -403,6 +405,11 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, var collectionName = doc.ns.substr(dbName.length + 1); + // Is it a special command and the collection name is hidden somewhere in + // operator? + if (collectionName === "$cmd") + collectionName = doc.o.drop; + _.each(callbacksByCollection[collectionName], function (callback) { callback(EJSON.clone(doc)); }); diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 07706cd1dd..163e01590b 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -14,6 +14,9 @@ var idForOp = function (op) { return op.o._id; else if (op.op === 'u') return op.o2._id; + else if (op.op === 'c') + throw Error("Operator 'c' doesn't supply an object with id: " + + EJSON.stringify(op)); else throw Error("Unknown op: " + EJSON.stringify(op)); }; @@ -192,7 +195,17 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var oplogEntryHandle = self._oplogHandle.onOplogEntry( cursorDescription.collectionName, function (op) { - oplogEntryHandlers[phase](op); + if (op.op === 'c') { + // If it is not db.collection.drop(), ignore it + if (op.o && _.isEqual(_.keys(op.o), ['drop'])) { + published.each(function (fields, id) { + remove(id); + }); + } + } else { + // All other operators should be handled depending on phase + oplogEntryHandlers[phase](op); + } } ); From 008c74df4b2aeaec62e0a3869136b647068d919c Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 28 Oct 2013 15:45:58 -0700 Subject: [PATCH 083/190] OPLOG_URL is accessed only by appConfig. --- packages/application-configuration/config.js | 3 ++- packages/mongo-livedata/remote_collection_driver.js | 11 ++++++----- tools/run.js | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/packages/application-configuration/config.js b/packages/application-configuration/config.js index f7054ab3ac..831b4cfe0e 100644 --- a/packages/application-configuration/config.js +++ b/packages/application-configuration/config.js @@ -57,7 +57,8 @@ try { settings: settings, packages: { 'mongo-livedata': { - url: process.env.MONGO_URL + url: process.env.MONGO_URL, + oplog: process.env.OPLOG_URL }, 'email': { url: process.env.MAIL_URL diff --git a/packages/mongo-livedata/remote_collection_driver.js b/packages/mongo-livedata/remote_collection_driver.js index 552974aa5d..b56607e9f8 100644 --- a/packages/mongo-livedata/remote_collection_driver.js +++ b/packages/mongo-livedata/remote_collection_driver.js @@ -24,20 +24,21 @@ _.extend(MongoInternals.RemoteCollectionDriver.prototype, { // you're only trying to receive data from a remote DDP server.) MongoInternals.defaultRemoteCollectionDriver = _.once(function () { var mongoUrl; + var connectionOptions = {}; + AppConfig.configurePackage("mongo-livedata", function (config) { // This will keep running if mongo gets reconfigured. That's not ideal, but // should be ok for now. mongoUrl = config.url; + + if (config.oplog) + connectionOptions.oplogUrl = config.oplog; }); + // XXX bad error since it could also be set directly in METEOR_DEPLOY_CONFIG if (! mongoUrl) throw new Error("MONGO_URL must be set in environment"); - var connectionOptions = {}; - // XXX we should NOT be reading directly from the env here; need to consult - // with naomi re: AppConfig - if (process.env.XXX_OPLOG_URL) - connectionOptions.oplogUrl = process.env.XXX_OPLOG_URL; return new MongoInternals.RemoteCollectionDriver(mongoUrl, connectionOptions); }); diff --git a/tools/run.js b/tools/run.js index fb8506b3f6..39893ddd67 100644 --- a/tools/run.js +++ b/tools/run.js @@ -243,7 +243,7 @@ var startServer = function (options) { env.PORT = options.innerPort; env.MONGO_URL = options.mongoUrl; - env.XXX_OPLOG_URL = options.oplogUrl; + env.OPLOG_URL = options.oplogUrl; env.ROOT_URL = options.rootUrl; if (options.settings) env.METEOR_SETTINGS = options.settings; From e793c9d948f13eb0a95a1512dd4de8a0c787eebe Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 29 Oct 2013 17:14:16 -0700 Subject: [PATCH 084/190] Remove unnecessary check. This removed check will never be false as we pass the oplog selector that looks for 'o.drop' and we assume that you can get only one command per oplog record. --- packages/mongo-livedata/oplog.js | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 163e01590b..566bff264d 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -196,12 +196,9 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var oplogEntryHandle = self._oplogHandle.onOplogEntry( cursorDescription.collectionName, function (op) { if (op.op === 'c') { - // If it is not db.collection.drop(), ignore it - if (op.o && _.isEqual(_.keys(op.o), ['drop'])) { - published.each(function (fields, id) { - remove(id); - }); - } + published.each(function (fields, id) { + remove(id); + }); } else { // All other operators should be handled depending on phase oplogEntryHandlers[phase](op); From 2bbd69ad2ce6469300034563413e9d1b36f22002 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 29 Oct 2013 17:16:05 -0700 Subject: [PATCH 085/190] Tests to prove the basic fields implementation was wrong. --- .../mongo-livedata/observe_changes_tests.js | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/packages/mongo-livedata/observe_changes_tests.js b/packages/mongo-livedata/observe_changes_tests.js index 2b01302728..3ea7ab33f4 100644 --- a/packages/mongo-livedata/observe_changes_tests.js +++ b/packages/mongo-livedata/observe_changes_tests.js @@ -168,6 +168,36 @@ if (Meteor.isServer) { onComplete(); }); }); + + Tinytest.addAsync("observeChanges - unordered - specific fields + selector on excluded fields", function (test, onComplete) { + var c = makeCollection(); + withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) { + var handle = c.find({ mac: 1, cheese: 2 }, + {fields:{noodles: 1, bacon: 1}}).observeChanges(logger); + var barid = c.insert({thing: "stuff", mac: 1, cheese: 2}); + logger.expectResultOnly("added", [barid, {}]); + + var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok", mac: 1, cheese: 2}); + + logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad"}]); + + c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok", mac: 1, cheese: 2}); + logger.expectResultOnly("changed", + [fooid, {noodles: "alright", bacon: undefined}]); + c.update(fooid, {noodles: "alright", potatoes: "meh", apples: "ok", mac: 1, cheese: 2}); + c.remove(fooid); + logger.expectResultOnly("removed", [fooid]); + c.remove(barid); + logger.expectResultOnly("removed", [barid]); + + fooid = c.insert({noodles: "good", bacon: "bad", mac: 1, cheese: 2}); + + logger.expectResult("added", [fooid, {noodles: "good", bacon: "bad"}]); + logger.expectNoResult(); + handle.stop(); + onComplete(); + }); + }); } From 8a00f1d713e8a67d6da453e257aee0ccdf55821f Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 29 Oct 2013 21:37:06 -0700 Subject: [PATCH 086/190] Fix basic fields projection. --- packages/mongo-livedata/oplog.js | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 566bff264d..aa0a597a1f 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -43,12 +43,12 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var add = function (doc) { var id = doc._id; - var fields = projection(doc); + var fields = _.clone(doc); delete fields._id; if (published.has(id)) throw Error("tried to add something already published " + id); published.set(id, fields); - callbacks.added && callbacks.added(id, EJSON.clone(fields)); + callbacks.added && callbacks.added(id, projection(fields)); }; var remove = function (id) { @@ -58,10 +58,8 @@ MongoConnection.prototype._observeChangesWithOplog = function ( callbacks.removed && callbacks.removed(id); }; - // XXX it doesn't mutate newDoc anymore since we apply projection function but - // be careful refactoring and moving out projection. var handleDoc = function (id, newDoc) { - newDoc = projection(newDoc); + newDoc = _.clone(newDoc); var matchesNow = newDoc && selector(newDoc); var matchedBefore = published.has(id); if (matchesNow && !matchedBefore) { @@ -77,6 +75,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (callbacks.changed) { var changed = LocalCollection._makeChangedFields( EJSON.clone(newDoc), oldDoc); + changed = projection(changed); if (!_.isEmpty(changed)) callbacks.changed(id, changed); } From 77b5631f4256d41e1b19fba8e3bcaa2ef3b74881 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 7 Nov 2013 16:31:14 -0800 Subject: [PATCH 087/190] Separate projection functions into separate file. --- packages/minimongo/minimongo.js | 95 -------------------------------- packages/minimongo/package.js | 1 + packages/minimongo/projection.js | 94 +++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+), 95 deletions(-) create mode 100644 packages/minimongo/projection.js diff --git a/packages/minimongo/minimongo.js b/packages/minimongo/minimongo.js index 86d9c2045c..6e95109f12 100644 --- a/packages/minimongo/minimongo.js +++ b/packages/minimongo/minimongo.js @@ -1090,101 +1090,6 @@ LocalCollection._observeOrderedFromObserveChanges = return handle; }; -LocalCollection._compileProjection = function (fields) { - if (!_.isObject(fields)) - throw MinimongoError("fields option must be an object"); - - if (_.any(_.values(fields), function (x) { - return _.indexOf([1, 0, true, false], x) === -1; })) - throw MinimongoError("Projection values should be one of 1, 0, true, or false"); - - var _idProjection = _.isUndefined(fields._id) ? true : fields._id; - // Find the non-_id keys (_id is handled specially because it is included unless - // explicitly excluded). Sort the keys, so that our code to detect overlaps - // like 'foo' and 'foo.bar' can assume that 'foo' comes first. - var fieldsKeys = _.reject(_.keys(fields).sort(), function (key) { return key === '_id'; }); - var including = null; // Unknown - var projectionRulesTree = {}; // Tree represented as nested objects - - _.each(fieldsKeys, function (keyPath) { - var rule = !!fields[keyPath]; - if (including === null) - including = rule; - if (including !== rule) - // This error message is copies from MongoDB shell - throw MinimongoError("You cannot currently mix including and excluding fields."); - var treePos = projectionRulesTree; - keyPath = keyPath.split('.'); - - _.each(keyPath.slice(0, -1), function (key, idx) { - if (!_.has(treePos, key)) - treePos[key] = {}; - else if (_.isBoolean(treePos[key])) { - // Check passed projection fields' keys: If you have two rules such as - // 'foo.bar' and 'foo.bar.baz', then the result becomes ambiguous. If - // that happens, there is a probability you are doing something wrong, - // framework should notify you about such mistake earlier on cursor - // compilation step than later during runtime. Note, that real mongo - // doesn't do anything about it and the later rule appears in projection - // project, more priority it takes. - // - // Example, assume following in mongo shell: - // > db.coll.insert({ a: { b: 23, c: 44 } }) - // > db.coll.find({}, { 'a': 1, 'a.b': 1 }) - // { "_id" : ObjectId("520bfe456024608e8ef24af3"), "a" : { "b" : 23 } } - // > db.coll.find({}, { 'a.b': 1, 'a': 1 }) - // { "_id" : ObjectId("520bfe456024608e8ef24af3"), "a" : { "b" : 23, "c" : 44 } } - // - // Note, how second time the return set of keys is different. - - var currentPath = keyPath.join('.'); - var anotherPath = keyPath.slice(0, idx + 1).join('.'); - throw MinimongoError("both " + currentPath + " and " + anotherPath + - " found in fields option, using both of them may trigger " + - "unexpected behavior. Did you mean to use only one of them?"); - } - - treePos = treePos[key]; - }); - - treePos[_.last(keyPath)] = including; - }); - - // returns transformed doc according to ruleTree - var transform = function (doc, ruleTree) { - // Special case for "sets" - if (_.isArray(doc)) - return _.map(doc, function (subdoc) { return transform(subdoc, ruleTree); }); - - var res = including ? {} : EJSON.clone(doc); - _.each(ruleTree, function (rule, key) { - if (!_.has(doc, key)) - return; - if (_.isObject(rule)) { - // For sub-objects/subsets we branch - if (_.isObject(doc[key])) - res[key] = transform(doc[key], rule); - // Otherwise we don't even touch this subfield - } else if (including) - res[key] = doc[key]; - else - delete res[key]; - }); - - return res; - }; - - return function (obj) { - var res = transform(obj, projectionRulesTree); - - if (_idProjection && _.has(obj, '_id')) - res._id = obj._id; - if (!_idProjection && _.has(res, '_id')) - delete res._id; - return res; - }; -}; - // Searches $near operator in the selector recursively // (including all $or/$and/$nor/$not branches) var isGeoQuery = function (selector) { diff --git a/packages/minimongo/package.js b/packages/minimongo/package.js index f70ccddabb..58cc889784 100644 --- a/packages/minimongo/package.js +++ b/packages/minimongo/package.js @@ -13,6 +13,7 @@ Package.on_use(function (api) { api.add_files([ 'minimongo.js', 'selector.js', + 'projection.js', 'modify.js', 'diff.js', 'objectid.js' diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js new file mode 100644 index 0000000000..87e02a8520 --- /dev/null +++ b/packages/minimongo/projection.js @@ -0,0 +1,94 @@ +LocalCollection._compileProjection = function (fields) { + if (!_.isObject(fields)) + throw MinimongoError("fields option must be an object"); + + if (_.any(_.values(fields), function (x) { + return _.indexOf([1, 0, true, false], x) === -1; })) + throw MinimongoError("Projection values should be one of 1, 0, true, or false"); + + var _idProjection = _.isUndefined(fields._id) ? true : fields._id; + // Find the non-_id keys (_id is handled specially because it is included unless + // explicitly excluded). Sort the keys, so that our code to detect overlaps + // like 'foo' and 'foo.bar' can assume that 'foo' comes first. + var fieldsKeys = _.reject(_.keys(fields).sort(), function (key) { return key === '_id'; }); + var including = null; // Unknown + var projectionRulesTree = {}; // Tree represented as nested objects + + _.each(fieldsKeys, function (keyPath) { + var rule = !!fields[keyPath]; + if (including === null) + including = rule; + if (including !== rule) + // This error message is copies from MongoDB shell + throw MinimongoError("You cannot currently mix including and excluding fields."); + var treePos = projectionRulesTree; + keyPath = keyPath.split('.'); + + _.each(keyPath.slice(0, -1), function (key, idx) { + if (!_.has(treePos, key)) + treePos[key] = {}; + else if (_.isBoolean(treePos[key])) { + // Check passed projection fields' keys: If you have two rules such as + // 'foo.bar' and 'foo.bar.baz', then the result becomes ambiguous. If + // that happens, there is a probability you are doing something wrong, + // framework should notify you about such mistake earlier on cursor + // compilation step than later during runtime. Note, that real mongo + // doesn't do anything about it and the later rule appears in projection + // project, more priority it takes. + // + // Example, assume following in mongo shell: + // > db.coll.insert({ a: { b: 23, c: 44 } }) + // > db.coll.find({}, { 'a': 1, 'a.b': 1 }) + // { "_id" : ObjectId("520bfe456024608e8ef24af3"), "a" : { "b" : 23 } } + // > db.coll.find({}, { 'a.b': 1, 'a': 1 }) + // { "_id" : ObjectId("520bfe456024608e8ef24af3"), "a" : { "b" : 23, "c" : 44 } } + // + // Note, how second time the return set of keys is different. + + var currentPath = keyPath.join('.'); + var anotherPath = keyPath.slice(0, idx + 1).join('.'); + throw MinimongoError("both " + currentPath + " and " + anotherPath + + " found in fields option, using both of them may trigger " + + "unexpected behavior. Did you mean to use only one of them?"); + } + + treePos = treePos[key]; + }); + + treePos[_.last(keyPath)] = including; + }); + + // returns transformed doc according to ruleTree + var transform = function (doc, ruleTree) { + // Special case for "sets" + if (_.isArray(doc)) + return _.map(doc, function (subdoc) { return transform(subdoc, ruleTree); }); + + var res = including ? {} : EJSON.clone(doc); + _.each(ruleTree, function (rule, key) { + if (!_.has(doc, key)) + return; + if (_.isObject(rule)) { + // For sub-objects/subsets we branch + if (_.isObject(doc[key])) + res[key] = transform(doc[key], rule); + // Otherwise we don't even touch this subfield + } else if (including) + res[key] = doc[key]; + else + delete res[key]; + }); + + return res; + }; + + return function (obj) { + var res = transform(obj, projectionRulesTree); + + if (_idProjection && _.has(obj, '_id')) + res._id = obj._id; + if (!_idProjection && _.has(res, '_id')) + delete res._id; + return res; + }; +}; From 5f1b721823f960f0b40c902676c88216d278d861 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Mon, 28 Oct 2013 14:30:50 -0700 Subject: [PATCH 088/190] Fix meteor run to look at the OPLOG_URL environment variable. --- tools/run.js | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/run.js b/tools/run.js index 39893ddd67..696ef6fa9b 100644 --- a/tools/run.js +++ b/tools/run.js @@ -414,8 +414,12 @@ exports.run = function (context, options) { // Allow override and use of external mongo. Matches code in launch_mongo. var mongoUrl = process.env.MONGO_URL || ("mongodb://127.0.0.1:" + mongoPort + "/meteor"); - var oplogUrl = process.env.MONGO_URL ? undefined - : "mongodb://127.0.01:" + mongoPort + "/local"; + // Allow people to specify an OPLOG_URL override. If someone specifies + // a MONGO_URL but not an OPLOG_URL, disable the oplog. If neither is + // specified, use the default internal mongo oplog. + var oplogUrl = process.env.OPLOG_URL || + (process.env.MONGO_URL ? undefined + : "mongodb://127.0.0.1:" + mongoPort + "/local"); var firstRun = true; var serverHandle; From 254d7695bc2d644027be0588fba54c1ddf13d287 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 7 Nov 2013 16:32:57 -0800 Subject: [PATCH 089/190] Refactorings by slava. --- packages/minimongo/projection.js | 85 +++++++++++++++++++------------- 1 file changed, 51 insertions(+), 34 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 87e02a8520..14811b0ee2 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -1,4 +1,50 @@ +// Knows how to compile a fields projection to a predicate function. LocalCollection._compileProjection = function (fields) { + var _idProjection = _.isUndefined(fields._id) ? true : fields._id; + var details = projectionDetails(fields); + + // returns transformed doc according to ruleTree + var transform = function (doc, ruleTree) { + // Special case for "sets" + if (_.isArray(doc)) + return _.map(doc, function (subdoc) { return transform(subdoc, ruleTree); }); + + var res = details.including ? {} : EJSON.clone(doc); + _.each(ruleTree, function (rule, key) { + if (!_.has(doc, key)) + return; + if (_.isObject(rule)) { + // For sub-objects/subsets we branch + if (_.isObject(doc[key])) + res[key] = transform(doc[key], rule); + // Otherwise we don't even touch this subfield + } else if (details.including) + res[key] = doc[key]; + else + delete res[key]; + }); + + return res; + }; + + return function (obj) { + var res = transform(obj, details.tree); + + if (_idProjection && _.has(obj, '_id')) + res._id = obj._id; + if (!_idProjection && _.has(res, '_id')) + delete res._id; + return res; + }; +}; + +// Traverses the keys of passed projection and constructs a tree where all +// leaves are either all True or all False +// @returns Object: +// - tree - Object - tree representation of keys involved in projection +// (exception for '_id' as it is a special case handled separately) +// - including - Boolean - "take only certain fields" type of projection +var projectionDetails = function (fields) { if (!_.isObject(fields)) throw MinimongoError("fields option must be an object"); @@ -6,7 +52,6 @@ LocalCollection._compileProjection = function (fields) { return _.indexOf([1, 0, true, false], x) === -1; })) throw MinimongoError("Projection values should be one of 1, 0, true, or false"); - var _idProjection = _.isUndefined(fields._id) ? true : fields._id; // Find the non-_id keys (_id is handled specially because it is included unless // explicitly excluded). Sort the keys, so that our code to detect overlaps // like 'foo' and 'foo.bar' can assume that 'foo' comes first. @@ -57,38 +102,10 @@ LocalCollection._compileProjection = function (fields) { treePos[_.last(keyPath)] = including; }); - - // returns transformed doc according to ruleTree - var transform = function (doc, ruleTree) { - // Special case for "sets" - if (_.isArray(doc)) - return _.map(doc, function (subdoc) { return transform(subdoc, ruleTree); }); - - var res = including ? {} : EJSON.clone(doc); - _.each(ruleTree, function (rule, key) { - if (!_.has(doc, key)) - return; - if (_.isObject(rule)) { - // For sub-objects/subsets we branch - if (_.isObject(doc[key])) - res[key] = transform(doc[key], rule); - // Otherwise we don't even touch this subfield - } else if (including) - res[key] = doc[key]; - else - delete res[key]; - }); - - return res; - }; - - return function (obj) { - var res = transform(obj, projectionRulesTree); - - if (_idProjection && _.has(obj, '_id')) - res._id = obj._id; - if (!_idProjection && _.has(res, '_id')) - delete res._id; - return res; + + return { + tree: projectionRulesTree, + including: including }; }; + From 034d2e628b662864c09d04bb22b1b753aeaf62e7 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 5 Nov 2013 15:30:06 -0800 Subject: [PATCH 090/190] Remove a couple of EJSON clones we don't need --- packages/mongo-livedata/oplog.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index aa0a597a1f..53cd01ae75 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -74,7 +74,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( published.set(id, newDoc); if (callbacks.changed) { var changed = LocalCollection._makeChangedFields( - EJSON.clone(newDoc), oldDoc); + newDoc, oldDoc); changed = projection(changed); if (!_.isEmpty(changed)) callbacks.changed(id, changed); @@ -169,7 +169,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // Oh great, we actually know what the document is, so we can apply // this directly. // XXX this assumes no field filtering - var newDoc = EJSON.clone(published.get(id)); + var newDoc = _.clone(published.get(id)); newDoc._id = id; LocalCollection._modify(newDoc, op.o); handleDoc(id, newDoc); From 132221262c5a08605f806cd809c594f287b9880a Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 12:38:05 -0800 Subject: [PATCH 091/190] Refactor out the tree construction. --- packages/minimongo/projection.js | 96 +++++++++++++++++++++----------- 1 file changed, 63 insertions(+), 33 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 14811b0ee2..f28cbd74ca 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -57,7 +57,6 @@ var projectionDetails = function (fields) { // like 'foo' and 'foo.bar' can assume that 'foo' comes first. var fieldsKeys = _.reject(_.keys(fields).sort(), function (key) { return key === '_id'; }); var including = null; // Unknown - var projectionRulesTree = {}; // Tree represented as nested objects _.each(fieldsKeys, function (keyPath) { var rule = !!fields[keyPath]; @@ -66,46 +65,77 @@ var projectionDetails = function (fields) { if (including !== rule) // This error message is copies from MongoDB shell throw MinimongoError("You cannot currently mix including and excluding fields."); - var treePos = projectionRulesTree; - keyPath = keyPath.split('.'); + }); - _.each(keyPath.slice(0, -1), function (key, idx) { - if (!_.has(treePos, key)) - treePos[key] = {}; - else if (_.isBoolean(treePos[key])) { - // Check passed projection fields' keys: If you have two rules such as - // 'foo.bar' and 'foo.bar.baz', then the result becomes ambiguous. If - // that happens, there is a probability you are doing something wrong, - // framework should notify you about such mistake earlier on cursor - // compilation step than later during runtime. Note, that real mongo - // doesn't do anything about it and the later rule appears in projection - // project, more priority it takes. - // - // Example, assume following in mongo shell: - // > db.coll.insert({ a: { b: 23, c: 44 } }) - // > db.coll.find({}, { 'a': 1, 'a.b': 1 }) - // { "_id" : ObjectId("520bfe456024608e8ef24af3"), "a" : { "b" : 23 } } - // > db.coll.find({}, { 'a.b': 1, 'a': 1 }) - // { "_id" : ObjectId("520bfe456024608e8ef24af3"), "a" : { "b" : 23, "c" : 44 } } - // - // Note, how second time the return set of keys is different. - var currentPath = keyPath.join('.'); - var anotherPath = keyPath.slice(0, idx + 1).join('.'); - throw MinimongoError("both " + currentPath + " and " + anotherPath + - " found in fields option, using both of them may trigger " + - "unexpected behavior. Did you mean to use only one of them?"); - } + var projectionRulesTree = pathsToTree( + fieldsKeys, + function (path) { return including; }, + function (node, path, fullPath) { + // Check passed projection fields' keys: If you have two rules such as + // 'foo.bar' and 'foo.bar.baz', then the result becomes ambiguous. If + // that happens, there is a probability you are doing something wrong, + // framework should notify you about such mistake earlier on cursor + // compilation step than later during runtime. Note, that real mongo + // doesn't do anything about it and the later rule appears in projection + // project, more priority it takes. + // + // Example, assume following in mongo shell: + // > db.coll.insert({ a: { b: 23, c: 44 } }) + // > db.coll.find({}, { 'a': 1, 'a.b': 1 }) + // { "_id" : ObjectId("520bfe456024608e8ef24af3"), "a" : { "b" : 23 } } + // > db.coll.find({}, { 'a.b': 1, 'a': 1 }) + // { "_id" : ObjectId("520bfe456024608e8ef24af3"), "a" : { "b" : 23, "c" : 44 } } + // + // Note, how second time the return set of keys is different. - treePos = treePos[key]; + var currentPath = keyPath.join('.'); + var anotherPath = keyPath.slice(0, idx + 1).join('.'); + throw MinimongoError("both " + currentPath + " and " + anotherPath + + " found in fields option, using both of them may trigger " + + "unexpected behavior. Did you mean to use only one of them?"); }); - treePos[_.last(keyPath)] = including; - }); - return { tree: projectionRulesTree, including: including }; }; +// paths - Array: list of mongo style paths +// newLeaveFn - Function: of form function(path) should return a scalar value to +// put into list created for that path +// conflictFn - Function: of form function(node, path, fullPath) is called +// when building a tree path for 'fullPath' node on +// 'path' was already a leave with a value. Conflicted +// path is ignored. +// @returns - Object: tree represented as a set of nested objects +var pathsToTree = function (paths, newLeaveFn, conflictFn) { + var tree = {}; + _.each(paths, function (keyPath) { + var treePos = tree; + var pathArr = keyPath.split('.'); + + // use _.all just for iteration with break + var sucess = _.all(pathArr.slice(0, -1), function (key, idx) { + if (!_.has(treePos, key)) + treePos[key] = {}; + else if (!_.isObject(treePos[key])) { + conflictFn(treePos[key], + pathArray.slice(0, idx + 1).join('.'), + keyPath); + // break out of loop as we are failing for this path + return false; + } + + treePos = treePos[key]; + return true; + }); + + if (sucess) + treePos[_.last(pathArr)] = newLeaveFn(keyPath); + }); + + return tree; +}; + From 8a4b58d987f3cc464e054e32598eabb6974fd341 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 5 Nov 2013 16:58:58 -0800 Subject: [PATCH 092/190] More on deep/shallow cloning --- packages/mongo-livedata/oplog.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 53cd01ae75..6fcffe9010 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -48,6 +48,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (published.has(id)) throw Error("tried to add something already published " + id); published.set(id, fields); + // projection will deep copy object callbacks.added && callbacks.added(id, projection(fields)); }; @@ -75,6 +76,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (callbacks.changed) { var changed = LocalCollection._makeChangedFields( newDoc, oldDoc); + // projection will deep copy the changed object changed = projection(changed); if (!_.isEmpty(changed)) callbacks.changed(id, changed); @@ -169,7 +171,8 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // Oh great, we actually know what the document is, so we can apply // this directly. // XXX this assumes no field filtering - var newDoc = _.clone(published.get(id)); + // XXX get rid of this deep clone once we run it though projection + var newDoc = EJSON.clone(published.get(id)); newDoc._id = id; LocalCollection._modify(newDoc, op.o); handleDoc(id, newDoc); From 0ee8f954522dd0728c947f3375d811b312676d5a Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 12:44:13 -0800 Subject: [PATCH 093/190] Define interface for selector-fields merger --- packages/minimongo/projection.js | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index f28cbd74ca..cc3c597586 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -38,6 +38,23 @@ LocalCollection._compileProjection = function (fields) { }; }; +// Knows how to combine a mongo selector and a fields projection to a new fields +// projection taking into account active fields from the passed selector. +// @returns Object - projection object (same as fields option of mongo cursor) +LocalCollection._combineSelectorAndProjection = function (selector, projection) +{ + var prjDetails = projectionDetails(projection); + var tree = prjDetails.tree; + var mergedProjection = {}; + + if (prjDetails.including) { + // both selector and projection are pointing on fields to include + } else { + // selector is pointing at fields to include + // projection is pointing at fields to exclude + } +}; + // Traverses the keys of passed projection and constructs a tree where all // leaves are either all True or all False // @returns Object: From 5851f19a7a4cb7f78f161c748714b8f652ee6bc6 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 13:42:01 -0800 Subject: [PATCH 094/190] Make conflictFn also a conflict resolution function --- packages/minimongo/projection.js | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index cc3c597586..30f21899d7 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -49,6 +49,7 @@ LocalCollection._combineSelectorAndProjection = function (selector, projection) if (prjDetails.including) { // both selector and projection are pointing on fields to include + ; } else { // selector is pointing at fields to include // projection is pointing at fields to exclude @@ -124,11 +125,12 @@ var projectionDetails = function (fields) { // put into list created for that path // conflictFn - Function: of form function(node, path, fullPath) is called // when building a tree path for 'fullPath' node on -// 'path' was already a leave with a value. Conflicted -// path is ignored. +// 'path' was already a leave with a value. Must return a +// conflict resolution. +// initial tree - Optional Object: starting tree. // @returns - Object: tree represented as a set of nested objects -var pathsToTree = function (paths, newLeaveFn, conflictFn) { - var tree = {}; +var pathsToTree = function (paths, newLeaveFn, conflictFn, tree) { + tree = tree || {}; _.each(paths, function (keyPath) { var treePos = tree; var pathArr = keyPath.split('.'); @@ -138,19 +140,25 @@ var pathsToTree = function (paths, newLeaveFn, conflictFn) { if (!_.has(treePos, key)) treePos[key] = {}; else if (!_.isObject(treePos[key])) { - conflictFn(treePos[key], - pathArray.slice(0, idx + 1).join('.'), - keyPath); - // break out of loop as we are failing for this path - return false; + treePos[key] = conflictFn(treePos[key], + pathArray.slice(0, idx + 1).join('.'), + keyPath); + // break out of loop if we are failing for this path + if (!_.isObject(treePos[key])) + return false; } treePos = treePos[key]; return true; }); - if (sucess) - treePos[_.last(pathArr)] = newLeaveFn(keyPath); + if (sucess) { + var lastKey = _.last(pathArr); + if (!_.has(treePos, lastKey)) + treePos[lastKey] = newLeaveFn(keyPath); + else + treePos[lastKey] = conflictFn(treePos[lastKey], keyPath, keyPath); + } }); return tree; From 562b9823a821f609edcca83c115d277c84c87354 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 15:16:05 -0800 Subject: [PATCH 095/190] Implement combinattion of selector and projection for inclusive projection. --- packages/minimongo/projection.js | 28 ++++++++++++++++++++++++++-- packages/minimongo/selector.js | 15 +++++++++++---- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 30f21899d7..b3099be206 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -41,19 +41,27 @@ LocalCollection._compileProjection = function (fields) { // Knows how to combine a mongo selector and a fields projection to a new fields // projection taking into account active fields from the passed selector. // @returns Object - projection object (same as fields option of mongo cursor) +// XXX doesn't know how to deal with fields projections like {'foo.0': 1} LocalCollection._combineSelectorAndProjection = function (selector, projection) { var prjDetails = projectionDetails(projection); var tree = prjDetails.tree; var mergedProjection = {}; + var selectorPaths = LocalCollection._getPathsWithoutNumericKeys(selector); if (prjDetails.including) { // both selector and projection are pointing on fields to include - ; + tree = pathsToTree(selectorPaths, + function (path) { return true; }, + function (node, path, fullPath) { + return true; + }, tree); } else { // selector is pointing at fields to include // projection is pointing at fields to exclude } + + return treeToPaths(tree); }; // Traverses the keys of passed projection and constructs a tree where all @@ -141,7 +149,7 @@ var pathsToTree = function (paths, newLeaveFn, conflictFn, tree) { treePos[key] = {}; else if (!_.isObject(treePos[key])) { treePos[key] = conflictFn(treePos[key], - pathArray.slice(0, idx + 1).join('.'), + pathArr.slice(0, idx + 1).join('.'), keyPath); // break out of loop if we are failing for this path if (!_.isObject(treePos[key])) @@ -164,3 +172,19 @@ var pathsToTree = function (paths, newLeaveFn, conflictFn, tree) { return tree; }; +// Returns a set of key paths similar to +// { 'foo.bar': 1, 'a.b.c': 1 } +var treeToPaths = function (tree, prefix) { + prefix = prefix || ''; + var result = {}; + + _.each(tree, function (val, key) { + if (_.isObject(val)) + _.extend(result, treeToPaths(val, prefix + key + '.')); + else + result[prefix + key] = val; + }); + + return result; +}; + diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index 6a1f9766f4..db5caaaa74 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -829,11 +829,12 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { return true; }); }); +}; - // string can be converted to integer - function numericKey (s) { - return /^[0-9]+$/.test(s); - } +LocalCollection._getPathsWithoutNumericKeys = function (sel) { + return _.map(getPaths(sel), function (path) { + return _.reject(path.split('.'), numericKey).join('.'); + }); }; // Returns a list of key paths the given selector is looking for @@ -849,3 +850,9 @@ var getPaths = MinimongoTest.getSelectorPaths = function (sel) { return k; }).flatten().uniq().value(); }; + +// string can be converted to integer +function numericKey (s) { + return /^[0-9]+$/.test(s); +} + From 31345957225ad2c5489aa4f0273e37ed9f1bf892 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 15:16:34 -0800 Subject: [PATCH 096/190] Tests for combination of selector and inclusive projection --- packages/minimongo/minimongo_tests.js | 84 +++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 2a5aca1854..6606fa25ef 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2469,3 +2469,87 @@ Tinytest.add("minimongo - modifier affects selector", function (test) { affected({ 'foo.0.bar': 0 }, { $set: { 'foo.0.0.bar': 1 } }, "delicate work with nested arrays and selectors by indecies"); }); +Tinytest.add("minimongo - selector and projection combination", function (test) { + function testSelProjectionComb (sel, proj, expected, desc) { + test.equal(LocalCollection._combineSelectorAndProjection(sel, proj), expected, desc); + } + + testSelProjectionComb({ a: 1, b: 2 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true }, "simplest incl"); + testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true, e: true }, "simplest incl, branching"); + testSelProjectionComb({ + 'a.b': { $lt: 3 }, + 'y.0': -1, + 'a.c': 15 + }, { + 'd': 1, + 'z': 1 + }, { + 'a.b': true, + 'y': true, + 'a.c': true, + 'd': true, + 'z': true + }, "multikey paths in selector"); + + testSelProjectionComb({ + foo: 1234, + $and: [{ k: -1 }, { $or: [{ b: 15 }] }] + }, { + 'foo.bar': 1, + 'foo.zzz': 1, + 'b.asdf': 1 + }, { + foo: true, + b: true, + k: true + }, "multikey paths in fields"); + + testSelProjectionComb({ + 'a.b.c': 123, + 'a.b.d': 321, + 'b.c.0': 111, + 'a.e': 12345 + }, { + 'a.b.z': 1, + 'a.b.d.g': 1, + 'c.c.c': 1 + }, { + 'a.b.c': true, + 'a.b.d': true, + 'a.b.z': true, + 'b.c': true, + 'a.e': true, + 'c.c.c': true + }, "multikey both paths"); + + testSelProjectionComb({ + 'a.b.c.d': 123, + 'a.b1.c.d': 421, + 'a.b.c.e': 111 + }, { + 'a.b': 1 + }, { + 'a.b': true, + 'a.b1.c.d': true + }, "shadowing one another"); + + testSelProjectionComb({ + 'a.b': 123, + 'foo.bar': false + }, { + 'a.b.c.d': 1, + 'foo': 1 + }, { + 'a.b': true, + 'foo': true + }, "shadowing one another"); + + testSelProjectionComb({ + 'a.b.c': 1 + }, { + 'a.b.c': 1 + }, { + 'a.b.c': true + }, "same paths"); +}); + From 3bf1e6a1a8a51085f4418b041da04ccf6e9bf8cc Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 15:52:17 -0800 Subject: [PATCH 097/190] Fix error reporting --- packages/minimongo/minimongo.js | 2 +- packages/minimongo/projection.js | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/minimongo/minimongo.js b/packages/minimongo/minimongo.js index 6e95109f12..9be6db7a40 100644 --- a/packages/minimongo/minimongo.js +++ b/packages/minimongo/minimongo.js @@ -45,7 +45,7 @@ LocalCollection._applyChanges = function (doc, changeFields) { }); }; -var MinimongoError = function (message) { +MinimongoError = function (message) { var e = new Error(message); e.name = "MinimongoError"; return e; diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index b3099be206..7d3e6c3bd4 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -115,8 +115,8 @@ var projectionDetails = function (fields) { // // Note, how second time the return set of keys is different. - var currentPath = keyPath.join('.'); - var anotherPath = keyPath.slice(0, idx + 1).join('.'); + var currentPath = fullPath; + var anotherPath = path; throw MinimongoError("both " + currentPath + " and " + anotherPath + " found in fields option, using both of them may trigger " + "unexpected behavior. Did you mean to use only one of them?"); From 24d1b31f52ad5c837d6e6b93298920f455b1de0e Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 15:52:35 -0800 Subject: [PATCH 098/190] Mark tests for inclusive projections --- packages/minimongo/minimongo_tests.js | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 6606fa25ef..53bebb9867 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2474,6 +2474,7 @@ Tinytest.add("minimongo - selector and projection combination", function (test) test.equal(LocalCollection._combineSelectorAndProjection(sel, proj), expected, desc); } + // Test with inclusive projection testSelProjectionComb({ a: 1, b: 2 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true }, "simplest incl"); testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true, e: true }, "simplest incl, branching"); testSelProjectionComb({ @@ -2489,7 +2490,7 @@ Tinytest.add("minimongo - selector and projection combination", function (test) 'a.c': true, 'd': true, 'z': true - }, "multikey paths in selector"); + }, "multikey paths in selector - incl"); testSelProjectionComb({ foo: 1234, @@ -2502,7 +2503,7 @@ Tinytest.add("minimongo - selector and projection combination", function (test) foo: true, b: true, k: true - }, "multikey paths in fields"); + }, "multikey paths in fields - incl"); testSelProjectionComb({ 'a.b.c': 123, @@ -2520,7 +2521,7 @@ Tinytest.add("minimongo - selector and projection combination", function (test) 'b.c': true, 'a.e': true, 'c.c.c': true - }, "multikey both paths"); + }, "multikey both paths - incl"); testSelProjectionComb({ 'a.b.c.d': 123, @@ -2531,7 +2532,7 @@ Tinytest.add("minimongo - selector and projection combination", function (test) }, { 'a.b': true, 'a.b1.c.d': true - }, "shadowing one another"); + }, "shadowing one another - incl"); testSelProjectionComb({ 'a.b': 123, @@ -2542,7 +2543,7 @@ Tinytest.add("minimongo - selector and projection combination", function (test) }, { 'a.b': true, 'foo': true - }, "shadowing one another"); + }, "shadowing one another - incl"); testSelProjectionComb({ 'a.b.c': 1 From ba674e98e887d6c9abbd5203fa8f34b28f62ab76 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 15:52:49 -0800 Subject: [PATCH 099/190] Tests for exclusive projections. TDD FTW --- packages/minimongo/minimongo_tests.js | 96 ++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 1 deletion(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 53bebb9867..3f2ce855fa 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2551,6 +2551,100 @@ Tinytest.add("minimongo - selector and projection combination", function (test) 'a.b.c': 1 }, { 'a.b.c': true - }, "same paths"); + }, "same paths - incl"); + + // Test with exclusive projection + testSelProjectionComb({ a: 1, b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl"); + testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl, branching"); + testSelProjectionComb({ + 'a.b': { $lt: 3 }, + 'y.0': -1, + 'a.c': 15 + }, { + 'd': 0, + 'z': 0 + }, { + d: false, + z: false + }, "multikey paths in selector - excl"); + + testSelProjectionComb({ + foo: 1234, + $and: [{ k: -1 }, { $or: [{ b: 15 }] }] + }, { + 'foo.bar': 0, + 'foo.zzz': 0, + 'b.asdf': 0 + }, { + }, "multikey paths in fields - excl"); + + testSelProjectionComb({ + 'a.b.c': 123, + 'a.b.d': 321, + 'b.c.0': 111, + 'a.e': 12345 + }, { + 'a.b.z': 0, + 'a.b.d.g': 0, + 'c.c.c': 0 + }, { + 'a.b.z': false, + 'c.c.c': false + }, "multikey both paths - excl"); + + testSelProjectionComb({ + 'a.b.c.d': 123, + 'a.b1.c.d': 421, + 'a.b.c.e': 111 + }, { + 'a.b': 0 + }, { + }, "shadowing one another - excl"); + + testSelProjectionComb({ + 'a.b': 123, + 'foo.bar': false + }, { + 'a.b.c.d': 0, + 'foo': 0 + }, { + }, "shadowing one another - excl"); + + testSelProjectionComb({ + 'a.b.c': 1 + }, { + 'a.b.c': 0 + }, { + }, "same paths - excl"); + + testSelProjectionComb({ + 'a.b': 123, + 'a.c.d': 222, + 'ddd': 123 + }, { + 'a.b': 0, + 'a.c.e': 0, + 'asdf': 0 + }, { + 'a.c.e': false, + 'asdf': false + }, "intercept the selector path - excl"); + + testSelProjectionComb({ + 'a.b.c': 14 + }, { + 'a.b.d': 0 + }, { + 'a.b.d': false + }, "different branches - excl"); + + testSelProjectionComb({ + 'a.b.c.d': "124", + 'foo.bar.baz.que': "some value" + }, { + 'a.b.c.d.e': 0, + 'foo.bar': 0 + }, { + }, "excl on incl paths - excl"); }); From 13a98748a186d2534fd0bf742f9c620460b44912 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 17:46:52 -0800 Subject: [PATCH 100/190] Implement combine selector and projection for both including and excluding projections. --- packages/minimongo/projection.js | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 7d3e6c3bd4..3230fc6b19 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -49,19 +49,28 @@ LocalCollection._combineSelectorAndProjection = function (selector, projection) var mergedProjection = {}; var selectorPaths = LocalCollection._getPathsWithoutNumericKeys(selector); + // merge the paths to include + tree = pathsToTree(selectorPaths, + function (path) { return true; }, + function (node, path, fullPath) { return true; }, + tree); + mergedProjection = treeToPaths(tree); if (prjDetails.including) { // both selector and projection are pointing on fields to include - tree = pathsToTree(selectorPaths, - function (path) { return true; }, - function (node, path, fullPath) { - return true; - }, tree); + // so we can just return the merged tree + return mergedProjection; } else { // selector is pointing at fields to include // projection is pointing at fields to exclude - } + // make sure we don't exclude important paths + var mergedExclProjection = {}; + _.each(mergedProjection, function (incl, path) { + if (!incl) + mergedExclProjection[path] = false; + }); - return treeToPaths(tree); + return mergedExclProjection; + } }; // Traverses the keys of passed projection and constructs a tree where all From c1268579685485666a67b46a2eb540f82224712f Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 4 Nov 2013 18:13:13 -0800 Subject: [PATCH 101/190] Apply shared projection to anything stored in the cache. --- packages/mongo-livedata/oplog.js | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 6fcffe9010..49ffa94805 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -33,10 +33,14 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var phase = PHASE.INITIALIZING; var published = new IdMap; - var selector = LocalCollection._compileSelector(cursorDescription.selector); - var projection = cursorDescription.options.fields ? - LocalCollection._compileProjection(cursorDescription.options.fields) : - EJSON.clone; + var selector = cursorDescription.selector; + var selectorFn = LocalCollection._compileSelector(selector); + var projection = cursorDescription.options.fields || {}; + var projectionFn = LocalCollection._compileProjection(projection); + // Projection function, result of combining important fields for selector and + // existing fields projection + var sharedProjection = LocalCollection._combineSelectorAndProjection(selector, projection); + var sharedProjectionFn = LocalCollection._compileProjection(sharedProjection); var needToFetch = new IdMap; var currentlyFetching = new IdMap; @@ -47,9 +51,8 @@ MongoConnection.prototype._observeChangesWithOplog = function ( delete fields._id; if (published.has(id)) throw Error("tried to add something already published " + id); - published.set(id, fields); - // projection will deep copy object - callbacks.added && callbacks.added(id, projection(fields)); + published.set(id, sharedProjectionFn(fields)); + callbacks.added && callbacks.added(id, projectionFn(fields)); }; var remove = function (id) { @@ -61,7 +64,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var handleDoc = function (id, newDoc) { newDoc = _.clone(newDoc); - var matchesNow = newDoc && selector(newDoc); + var matchesNow = newDoc && selectorFn(newDoc); var matchedBefore = published.has(id); if (matchesNow && !matchedBefore) { add(newDoc); @@ -72,12 +75,11 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (!oldDoc) throw Error("thought that " + id + " was there!"); delete newDoc._id; - published.set(id, newDoc); + published.set(id, sharedProjectionFn(newDoc)); if (callbacks.changed) { var changed = LocalCollection._makeChangedFields( - newDoc, oldDoc); - // projection will deep copy the changed object - changed = projection(changed); + _.clone(newDoc), oldDoc); + changed = projectionFn(changed); if (!_.isEmpty(changed)) callbacks.changed(id, changed); } @@ -156,7 +158,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // XXX what if selector yields? for now it can't but later it could have // $where - if (selector(op.o)) + if (selectorFn(op.o)) add(op.o); } else if (op.op === 'u') { // Is this a modifier ($set/$unset, which may require us to poll the From 3a8c36e1de4ef43836d4802425a8519360544361 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 5 Nov 2013 18:25:58 -0800 Subject: [PATCH 102/190] Test to catch modifier affecting fields projected out --- .../mongo-livedata/observe_changes_tests.js | 29 ++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/observe_changes_tests.js b/packages/mongo-livedata/observe_changes_tests.js index 3ea7ab33f4..1831718afc 100644 --- a/packages/mongo-livedata/observe_changes_tests.js +++ b/packages/mongo-livedata/observe_changes_tests.js @@ -173,7 +173,7 @@ if (Meteor.isServer) { var c = makeCollection(); withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) { var handle = c.find({ mac: 1, cheese: 2 }, - {fields:{noodles: 1, bacon: 1}}).observeChanges(logger); + {fields:{noodles: 1, bacon: 1, eggs: 1}}).observeChanges(logger); var barid = c.insert({thing: "stuff", mac: 1, cheese: 2}); logger.expectResultOnly("added", [barid, {}]); @@ -184,7 +184,11 @@ if (Meteor.isServer) { c.update(fooid, {noodles: "alright", potatoes: "tasty", apples: "ok", mac: 1, cheese: 2}); logger.expectResultOnly("changed", [fooid, {noodles: "alright", bacon: undefined}]); + + // Doesn't get update event, since modifies only hidden fields c.update(fooid, {noodles: "alright", potatoes: "meh", apples: "ok", mac: 1, cheese: 2}); + logger.expectNoResult(); + c.remove(fooid); logger.expectResultOnly("removed", [fooid]); c.remove(barid); @@ -198,6 +202,29 @@ if (Meteor.isServer) { onComplete(); }); }); + + Tinytest.addAsync("observeChanges - unordered - specific fields + modify on excluded fields", function (test, onComplete) { + var c = makeCollection(); + withCallbackLogger(test, ["added", "changed", "removed"], Meteor.isServer, function (logger) { + var handle = c.find({ mac: 1, cheese: 2 }, + {fields:{noodles: 1, bacon: 1, eggs: 1}}).observeChanges(logger); + var fooid = c.insert({noodles: "good", bacon: "bad", apples: "ok", mac: 1, cheese: 2}); + + logger.expectResultOnly("added", [fooid, {noodles: "good", bacon: "bad"}]); + + + // Noodles go into shadow, mac appears as eggs + c.update(fooid, {$rename: { noodles: 'shadow', apples: 'eggs' }}); + logger.expectResultOnly("changed", + [fooid, {eggs:"ok", noodles: undefined}]); + + c.remove(fooid); + logger.expectResultOnly("removed", [fooid]); + logger.expectNoResult(); + handle.stop(); + onComplete(); + }); + }); } From afb2aaece7af64a24354e01b675acff583636732 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 5 Nov 2013 18:29:03 -0800 Subject: [PATCH 103/190] Filter out modified object If modifier sets fields we don't like in cache, for example --- packages/mongo-livedata/oplog.js | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 49ffa94805..3326051d75 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -172,12 +172,10 @@ MongoConnection.prototype._observeChangesWithOplog = function ( } else if (published.has(id)) { // Oh great, we actually know what the document is, so we can apply // this directly. - // XXX this assumes no field filtering - // XXX get rid of this deep clone once we run it though projection var newDoc = EJSON.clone(published.get(id)); newDoc._id = id; LocalCollection._modify(newDoc, op.o); - handleDoc(id, newDoc); + handleDoc(id, sharedProjectionFn(newDoc)); } else if (LocalCollection._isSelectorAffectedByModifier( cursorDescription.selector, op.o)) { // XXX _isSelectorAffectedByModifier should actually be From 74c9373d3ab26975671b2560101d97ede5a006f5 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 11 Nov 2013 11:43:06 -0800 Subject: [PATCH 104/190] Define projection function contract: returned doc shouldn't retain anything from the passed argument. --- packages/minimongo/minimongo_tests.js | 31 +++++++++++++++++++++++++++ packages/minimongo/projection.js | 6 ++++++ 2 files changed, 37 insertions(+) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 3f2ce855fa..43dbbf45e9 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -1151,6 +1151,37 @@ Tinytest.add("minimongo - fetch with projection, subarrays", function (test) { {a: [ [ { c: 2 }, { c: 4 } ], { c: 5 }, [ { c: 9 } ] ] }); }); +Tinytest.add("minimongo - fetch with projection, deep copy", function (test) { + // Compiled fields projection defines the contract: returned document doesn't + // retain anything from the passed argument. + var doc = { + a: { x: 42 }, + b: { + y: { z: 33 } + }, + c: "asdf" + }; + + var fields = { + 'a': 1, + 'b.y': 1 + }; + + var projectionFn = LocalCollection._compileProjection(fields); + var filteredDoc = projectionFn(doc); + doc.a.x++; + doc.b.y.z--; + test.equal(filteredDoc.a.x, 42, "projection returning deep copy - including"); + test.equal(filteredDoc.b.y.z, 33, "projection returning deep copy - including"); + + fields = { c: 0 }; + projectionFn = LocalCollection._compileProjection(fields); + filteredDoc = projectionFn(doc); + + doc.a.x = 5; + test.equal(filteredDoc.a.x, 43, "projection returning deep copy - excluding"); +}); + Tinytest.add("minimongo - observe ordered with projection", function (test) { // These tests are copy-paste from "minimongo -observe ordered", // slightly modified to test projection diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 3230fc6b19..73a10c59cb 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -1,4 +1,10 @@ // Knows how to compile a fields projection to a predicate function. +// @returns - Function: a closure that filters out an object according to the +// fields projection rules: +// @param obj - Object: MongoDB-styled document +// @returns - Object: a document with the fields filtered out +// according to projection rules. Doesn't retain subfields +// of passed argument. LocalCollection._compileProjection = function (fields) { var _idProjection = _.isUndefined(fields._id) ? true : fields._id; var details = projectionDetails(fields); From b1f8930630f12f0bed03032a0136670506158f65 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 11 Nov 2013 11:43:41 -0800 Subject: [PATCH 105/190] Projection function never retains anything from passed doc. Always does deep copy. --- packages/minimongo/projection.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 73a10c59cb..5c3dcad09b 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -25,7 +25,7 @@ LocalCollection._compileProjection = function (fields) { res[key] = transform(doc[key], rule); // Otherwise we don't even touch this subfield } else if (details.including) - res[key] = doc[key]; + res[key] = EJSON.clone(doc[key]); else delete res[key]; }); From a39ce59c355af0de1abfeab31db6e60f550f1405 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 11 Nov 2013 11:54:43 -0800 Subject: [PATCH 106/190] More tests on combining a selector with a fields projection: A case when a selector has a path with numbered keys overlapping with fields. --- packages/minimongo/minimongo_tests.js | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 43dbbf45e9..a61269f230 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2584,6 +2584,17 @@ Tinytest.add("minimongo - selector and projection combination", function (test) 'a.b.c': true }, "same paths - incl"); + testSelProjectionComb({ + 'x.4.y': 42, + 'z.0.1': 33 + }, { + 'x.x': 1 + }, { + 'x.x': true, + 'x.y': true, + 'z': true + }, "numbered keys in selector - incl"); + // Test with exclusive projection testSelProjectionComb({ a: 1, b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl"); testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl, branching"); @@ -2677,5 +2688,16 @@ Tinytest.add("minimongo - selector and projection combination", function (test) 'foo.bar': 0 }, { }, "excl on incl paths - excl"); + + testSelProjectionComb({ + 'x.4.y': 42, + 'z.0.1': 33 + }, { + 'x.x': 0, + 'x.y': 0 + }, { + 'x.x': false, + }, "numbered keys in selector - excl"); + }); From 891e3324eab57ab7abb4678141cf16bb759c2ef9 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 19 Nov 2013 16:13:11 -0800 Subject: [PATCH 107/190] Remove an outdated comment. Fix typos --- packages/minimongo/projection.js | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 5c3dcad09b..e0c6aed8a6 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -47,7 +47,6 @@ LocalCollection._compileProjection = function (fields) { // Knows how to combine a mongo selector and a fields projection to a new fields // projection taking into account active fields from the passed selector. // @returns Object - projection object (same as fields option of mongo cursor) -// XXX doesn't know how to deal with fields projections like {'foo.0': 1} LocalCollection._combineSelectorAndProjection = function (selector, projection) { var prjDetails = projectionDetails(projection); @@ -148,7 +147,7 @@ var projectionDetails = function (fields) { // put into list created for that path // conflictFn - Function: of form function(node, path, fullPath) is called // when building a tree path for 'fullPath' node on -// 'path' was already a leave with a value. Must return a +// 'path' was already a leaf with a value. Must return a // conflict resolution. // initial tree - Optional Object: starting tree. // @returns - Object: tree represented as a set of nested objects @@ -159,7 +158,7 @@ var pathsToTree = function (paths, newLeaveFn, conflictFn, tree) { var pathArr = keyPath.split('.'); // use _.all just for iteration with break - var sucess = _.all(pathArr.slice(0, -1), function (key, idx) { + var success = _.all(pathArr.slice(0, -1), function (key, idx) { if (!_.has(treePos, key)) treePos[key] = {}; else if (!_.isObject(treePos[key])) { @@ -175,7 +174,7 @@ var pathsToTree = function (paths, newLeaveFn, conflictFn, tree) { return true; }); - if (sucess) { + if (success) { var lastKey = _.last(pathArr); if (!_.has(treePos, lastKey)) treePos[lastKey] = newLeaveFn(keyPath); From a8fd4eefb5319a3392ed89041f9f4c639315b15b Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 19 Nov 2013 16:14:15 -0800 Subject: [PATCH 108/190] Make "getPathsWithoutNumericKeys" a package scope variable rather than globally available underscore method. --- packages/minimongo/projection.js | 2 +- packages/minimongo/selector.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index e0c6aed8a6..8a92e8f54a 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -52,7 +52,7 @@ LocalCollection._combineSelectorAndProjection = function (selector, projection) var prjDetails = projectionDetails(projection); var tree = prjDetails.tree; var mergedProjection = {}; - var selectorPaths = LocalCollection._getPathsWithoutNumericKeys(selector); + var selectorPaths = getPathsWithoutNumericKeys(selector); // merge the paths to include tree = pathsToTree(selectorPaths, diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index db5caaaa74..4406e2dfe3 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -831,7 +831,7 @@ LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { }); }; -LocalCollection._getPathsWithoutNumericKeys = function (sel) { +getPathsWithoutNumericKeys = function (sel) { return _.map(getPaths(sel), function (path) { return _.reject(path.split('.'), numericKey).join('.'); }); From d5ea98799d381051d70e5810fbcdce8c0aeb05c0 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 19 Nov 2013 16:19:17 -0800 Subject: [PATCH 109/190] Tests on $where --- packages/minimongo/minimongo_tests.js | 36 +++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index a61269f230..19fb6a306f 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2595,6 +2595,24 @@ Tinytest.add("minimongo - selector and projection combination", function (test) 'z': true }, "numbered keys in selector - incl"); + testSelProjectionComb({ + 'a.b.c': 42, + $where: function () { return true; } + }, { + 'a.b': 1, + 'z.z': 1 + }, {}, "$where in the selector - incl"); + + testSelProjectionComb({ + $or: [ + {'a.b.c': 42}, + {$where: function () { return true; } } + ] + }, { + 'a.b': 1, + 'z.z': 1 + }, {}, "$where in the selector - incl"); + // Test with exclusive projection testSelProjectionComb({ a: 1, b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl"); testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl, branching"); @@ -2699,5 +2717,23 @@ Tinytest.add("minimongo - selector and projection combination", function (test) 'x.x': false, }, "numbered keys in selector - excl"); + testSelProjectionComb({ + 'a.b.c': 42, + $where: function () { return true; } + }, { + 'a.b': 0, + 'z.z': 0 + }, {}, "$where in the selector - excl"); + + testSelProjectionComb({ + $or: [ + {'a.b.c': 42}, + {$where: function () { return true; } } + ] + }, { + 'a.b': 0, + 'z.z': 0 + }, {}, "$where in the selector - excl"); + }); From cc260af22d927d201797faa16382c732564181a5 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 19 Nov 2013 16:46:08 -0800 Subject: [PATCH 110/190] Handle the $where operator in the selector and projection combination --- packages/minimongo/projection.js | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 8a92e8f54a..056035130f 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -49,10 +49,18 @@ LocalCollection._compileProjection = function (fields) { // @returns Object - projection object (same as fields option of mongo cursor) LocalCollection._combineSelectorAndProjection = function (selector, projection) { + var selectorPaths = getPathsWithoutNumericKeys(selector); + + // Special case for $where operator in the selector - projection should depend + // on all fields of the document. getSelectorPaths returns a list of paths + // selector depends on. If one of the paths is '' (empty string) representing + // the root or the whole document, complete projection should be returned. + if (_.contains(selectorPaths, '')) + return {}; + var prjDetails = projectionDetails(projection); var tree = prjDetails.tree; var mergedProjection = {}; - var selectorPaths = getPathsWithoutNumericKeys(selector); // merge the paths to include tree = pathsToTree(selectorPaths, From b3cd37af59fc48b1b22e5c08d4c3ed05af782a4e Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 19 Nov 2013 18:12:33 -0800 Subject: [PATCH 111/190] Fix typos --- packages/minimongo/projection.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 056035130f..d6c245f412 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -151,15 +151,15 @@ var projectionDetails = function (fields) { }; // paths - Array: list of mongo style paths -// newLeaveFn - Function: of form function(path) should return a scalar value to -// put into list created for that path +// newLeafFn - Function: of form function(path) should return a scalar value to +// put into list created for that path // conflictFn - Function: of form function(node, path, fullPath) is called // when building a tree path for 'fullPath' node on // 'path' was already a leaf with a value. Must return a // conflict resolution. // initial tree - Optional Object: starting tree. // @returns - Object: tree represented as a set of nested objects -var pathsToTree = function (paths, newLeaveFn, conflictFn, tree) { +var pathsToTree = function (paths, newLeafFn, conflictFn, tree) { tree = tree || {}; _.each(paths, function (keyPath) { var treePos = tree; @@ -185,7 +185,7 @@ var pathsToTree = function (paths, newLeaveFn, conflictFn, tree) { if (success) { var lastKey = _.last(pathArr); if (!_.has(treePos, lastKey)) - treePos[lastKey] = newLeaveFn(keyPath); + treePos[lastKey] = newLeafFn(keyPath); else treePos[lastKey] = conflictFn(treePos[lastKey], keyPath, keyPath); } From e9e39d2cb2f6b37d27c30bae822a0a511327e8e3 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 19 Nov 2013 18:29:51 -0800 Subject: [PATCH 112/190] Rename $OPLOG_URL to $MONGO_OPLOG_URL --- packages/application-configuration/config.js | 2 +- tools/run.js | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/application-configuration/config.js b/packages/application-configuration/config.js index 831b4cfe0e..edd1ac1651 100644 --- a/packages/application-configuration/config.js +++ b/packages/application-configuration/config.js @@ -58,7 +58,7 @@ try { packages: { 'mongo-livedata': { url: process.env.MONGO_URL, - oplog: process.env.OPLOG_URL + oplog: process.env.MONGO_OPLOG_URL }, 'email': { url: process.env.MAIL_URL diff --git a/tools/run.js b/tools/run.js index 696ef6fa9b..52a90ed192 100644 --- a/tools/run.js +++ b/tools/run.js @@ -243,7 +243,7 @@ var startServer = function (options) { env.PORT = options.innerPort; env.MONGO_URL = options.mongoUrl; - env.OPLOG_URL = options.oplogUrl; + env.MONGO_OPLOG_URL = options.oplogUrl; env.ROOT_URL = options.rootUrl; if (options.settings) env.METEOR_SETTINGS = options.settings; @@ -414,10 +414,10 @@ exports.run = function (context, options) { // Allow override and use of external mongo. Matches code in launch_mongo. var mongoUrl = process.env.MONGO_URL || ("mongodb://127.0.0.1:" + mongoPort + "/meteor"); - // Allow people to specify an OPLOG_URL override. If someone specifies - // a MONGO_URL but not an OPLOG_URL, disable the oplog. If neither is + // Allow people to specify an MONGO_OPLOG_URL override. If someone specifies a + // MONGO_URL but not an MONGO_OPLOG_URL, disable the oplog. If neither is // specified, use the default internal mongo oplog. - var oplogUrl = process.env.OPLOG_URL || + var oplogUrl = process.env.MONGO_OPLOG_URL || (process.env.MONGO_URL ? undefined : "mongodb://127.0.0.1:" + mongoPort + "/local"); var firstRun = true; From 2527766ad61828c0bad1703c937e70c88d9b94c5 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 19 Nov 2013 21:41:47 -0800 Subject: [PATCH 113/190] Move observe-from-observe-changes into own file --- packages/minimongo/minimongo.js | 123 -------------------------------- packages/minimongo/observe.js | 122 +++++++++++++++++++++++++++++++ packages/minimongo/package.js | 1 + 3 files changed, 123 insertions(+), 123 deletions(-) create mode 100644 packages/minimongo/observe.js diff --git a/packages/minimongo/minimongo.js b/packages/minimongo/minimongo.js index 9be6db7a40..79c71d44b6 100644 --- a/packages/minimongo/minimongo.js +++ b/packages/minimongo/minimongo.js @@ -967,129 +967,6 @@ LocalCollection._makeChangedFields = function (newDoc, oldDoc) { return fields; }; -LocalCollection._observeFromObserveChanges = function (cursor, callbacks) { - var transform = cursor.getTransform(); - if (!transform) - transform = function (doc) {return doc;}; - if (callbacks.addedAt && callbacks.added) - throw new Error("Please specify only one of added() and addedAt()"); - if (callbacks.changedAt && callbacks.changed) - throw new Error("Please specify only one of changed() and changedAt()"); - if (callbacks.removed && callbacks.removedAt) - throw new Error("Please specify only one of removed() and removedAt()"); - if (callbacks.addedAt || callbacks.movedTo || - callbacks.changedAt || callbacks.removedAt) - return LocalCollection._observeOrderedFromObserveChanges(cursor, callbacks, transform); - else - return LocalCollection._observeUnorderedFromObserveChanges(cursor, callbacks, transform); -}; - -LocalCollection._observeUnorderedFromObserveChanges = - function (cursor, callbacks, transform) { - var docs = {}; - var suppressed = !!callbacks._suppress_initial; - var handle = cursor.observeChanges({ - added: function (id, fields) { - var strId = LocalCollection._idStringify(id); - var doc = EJSON.clone(fields); - doc._id = id; - docs[strId] = doc; - suppressed || callbacks.added && callbacks.added(transform(doc)); - }, - changed: function (id, fields) { - var strId = LocalCollection._idStringify(id); - var doc = docs[strId]; - var oldDoc = EJSON.clone(doc); - // writes through to the doc set - LocalCollection._applyChanges(doc, fields); - suppressed || callbacks.changed && callbacks.changed(transform(doc), transform(oldDoc)); - }, - removed: function (id) { - var strId = LocalCollection._idStringify(id); - var doc = docs[strId]; - delete docs[strId]; - suppressed || callbacks.removed && callbacks.removed(transform(doc)); - } - }); - suppressed = false; - return handle; -}; - -LocalCollection._observeOrderedFromObserveChanges = - function (cursor, callbacks, transform) { - var docs = new OrderedDict(LocalCollection._idStringify); - var suppressed = !!callbacks._suppress_initial; - // The "_no_indices" option sets all index arguments to -1 - // and skips the linear scans required to generate them. - // This lets observers that don't need absolute indices - // benefit from the other features of this API -- - // relative order, transforms, and applyChanges -- without - // the speed hit. - var indices = !callbacks._no_indices; - var handle = cursor.observeChanges({ - addedBefore: function (id, fields, before) { - var doc = EJSON.clone(fields); - doc._id = id; - // XXX could `before` be a falsy ID? Technically - // idStringify seems to allow for them -- though - // OrderedDict won't call stringify on a falsy arg. - docs.putBefore(id, doc, before || null); - if (!suppressed) { - if (callbacks.addedAt) { - var index = indices ? docs.indexOf(id) : -1; - callbacks.addedAt(transform(EJSON.clone(doc)), - index, before); - } else if (callbacks.added) { - callbacks.added(transform(EJSON.clone(doc))); - } - } - }, - changed: function (id, fields) { - var doc = docs.get(id); - if (!doc) - throw new Error("Unknown id for changed: " + id); - var oldDoc = EJSON.clone(doc); - // writes through to the doc set - LocalCollection._applyChanges(doc, fields); - if (callbacks.changedAt) { - var index = indices ? docs.indexOf(id) : -1; - callbacks.changedAt(transform(EJSON.clone(doc)), - transform(oldDoc), index); - } else if (callbacks.changed) { - callbacks.changed(transform(EJSON.clone(doc)), - transform(oldDoc)); - } - }, - movedBefore: function (id, before) { - var doc = docs.get(id); - var from; - // only capture indexes if we're going to call the callback that needs them. - if (callbacks.movedTo) - from = indices ? docs.indexOf(id) : -1; - docs.moveBefore(id, before || null); - if (callbacks.movedTo) { - var to = indices ? docs.indexOf(id) : -1; - callbacks.movedTo(transform(EJSON.clone(doc)), from, to, - before || null); - } else if (callbacks.moved) { - callbacks.moved(transform(EJSON.clone(doc))); - } - - }, - removed: function (id) { - var doc = docs.get(id); - var index; - if (callbacks.removedAt) - index = indices ? docs.indexOf(id) : -1; - docs.remove(id); - callbacks.removedAt && callbacks.removedAt(transform(doc), index); - callbacks.removed && callbacks.removed(transform(doc)); - } - }); - suppressed = false; - return handle; -}; - // Searches $near operator in the selector recursively // (including all $or/$and/$nor/$not branches) var isGeoQuery = function (selector) { diff --git a/packages/minimongo/observe.js b/packages/minimongo/observe.js new file mode 100644 index 0000000000..e3a2f7a8af --- /dev/null +++ b/packages/minimongo/observe.js @@ -0,0 +1,122 @@ +LocalCollection._observeFromObserveChanges = function (cursor, callbacks) { + var transform = cursor.getTransform(); + if (!transform) + transform = function (doc) {return doc;}; + if (callbacks.addedAt && callbacks.added) + throw new Error("Please specify only one of added() and addedAt()"); + if (callbacks.changedAt && callbacks.changed) + throw new Error("Please specify only one of changed() and changedAt()"); + if (callbacks.removed && callbacks.removedAt) + throw new Error("Please specify only one of removed() and removedAt()"); + if (callbacks.addedAt || callbacks.movedTo || + callbacks.changedAt || callbacks.removedAt) + return LocalCollection._observeOrderedFromObserveChanges(cursor, callbacks, transform); + else + return LocalCollection._observeUnorderedFromObserveChanges(cursor, callbacks, transform); +}; + +LocalCollection._observeUnorderedFromObserveChanges = + function (cursor, callbacks, transform) { + var docs = {}; + var suppressed = !!callbacks._suppress_initial; + var handle = cursor.observeChanges({ + added: function (id, fields) { + var strId = LocalCollection._idStringify(id); + var doc = EJSON.clone(fields); + doc._id = id; + docs[strId] = doc; + suppressed || callbacks.added && callbacks.added(transform(doc)); + }, + changed: function (id, fields) { + var strId = LocalCollection._idStringify(id); + var doc = docs[strId]; + var oldDoc = EJSON.clone(doc); + // writes through to the doc set + LocalCollection._applyChanges(doc, fields); + suppressed || callbacks.changed && callbacks.changed(transform(doc), transform(oldDoc)); + }, + removed: function (id) { + var strId = LocalCollection._idStringify(id); + var doc = docs[strId]; + delete docs[strId]; + suppressed || callbacks.removed && callbacks.removed(transform(doc)); + } + }); + suppressed = false; + return handle; +}; + +LocalCollection._observeOrderedFromObserveChanges = + function (cursor, callbacks, transform) { + var docs = new OrderedDict(LocalCollection._idStringify); + var suppressed = !!callbacks._suppress_initial; + // The "_no_indices" option sets all index arguments to -1 + // and skips the linear scans required to generate them. + // This lets observers that don't need absolute indices + // benefit from the other features of this API -- + // relative order, transforms, and applyChanges -- without + // the speed hit. + var indices = !callbacks._no_indices; + var handle = cursor.observeChanges({ + addedBefore: function (id, fields, before) { + var doc = EJSON.clone(fields); + doc._id = id; + // XXX could `before` be a falsy ID? Technically + // idStringify seems to allow for them -- though + // OrderedDict won't call stringify on a falsy arg. + docs.putBefore(id, doc, before || null); + if (!suppressed) { + if (callbacks.addedAt) { + var index = indices ? docs.indexOf(id) : -1; + callbacks.addedAt(transform(EJSON.clone(doc)), + index, before); + } else if (callbacks.added) { + callbacks.added(transform(EJSON.clone(doc))); + } + } + }, + changed: function (id, fields) { + var doc = docs.get(id); + if (!doc) + throw new Error("Unknown id for changed: " + id); + var oldDoc = EJSON.clone(doc); + // writes through to the doc set + LocalCollection._applyChanges(doc, fields); + if (callbacks.changedAt) { + var index = indices ? docs.indexOf(id) : -1; + callbacks.changedAt(transform(EJSON.clone(doc)), + transform(oldDoc), index); + } else if (callbacks.changed) { + callbacks.changed(transform(EJSON.clone(doc)), + transform(oldDoc)); + } + }, + movedBefore: function (id, before) { + var doc = docs.get(id); + var from; + // only capture indexes if we're going to call the callback that needs them. + if (callbacks.movedTo) + from = indices ? docs.indexOf(id) : -1; + docs.moveBefore(id, before || null); + if (callbacks.movedTo) { + var to = indices ? docs.indexOf(id) : -1; + callbacks.movedTo(transform(EJSON.clone(doc)), from, to, + before || null); + } else if (callbacks.moved) { + callbacks.moved(transform(EJSON.clone(doc))); + } + + }, + removed: function (id) { + var doc = docs.get(id); + var index; + if (callbacks.removedAt) + index = indices ? docs.indexOf(id) : -1; + docs.remove(id); + callbacks.removedAt && callbacks.removedAt(transform(doc), index); + callbacks.removed && callbacks.removed(transform(doc)); + } + }); + suppressed = false; + return handle; +}; diff --git a/packages/minimongo/package.js b/packages/minimongo/package.js index 58cc889784..28b3147705 100644 --- a/packages/minimongo/package.js +++ b/packages/minimongo/package.js @@ -16,6 +16,7 @@ Package.on_use(function (api) { 'projection.js', 'modify.js', 'diff.js', + 'observe.js', 'objectid.js' ]); }); From 1d3d38d6b1fa6ef5b9e0f5be8773b04500895458 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 19 Nov 2013 23:10:52 -0800 Subject: [PATCH 114/190] Refactor observe-from-observe-changes Specifically, factor out the logic that keeps a cache of the current cursor contents from the part that calls observe callbacks. Also: - move IdMap from mongo-livedata to minimongo - get rid of references to the 'moved' callback which no longer exists --- packages/minimongo/diff.js | 6 +- .../{mongo-livedata => minimongo}/id_map.js | 4 +- packages/minimongo/minimongo.js | 26 +- packages/minimongo/observe.js | 261 ++++++++++-------- packages/minimongo/package.js | 1 + packages/mongo-livedata/mongo_driver.js | 4 +- packages/mongo-livedata/oplog.js | 10 +- packages/mongo-livedata/package.js | 2 +- 8 files changed, 178 insertions(+), 136 deletions(-) rename packages/{mongo-livedata => minimongo}/id_map.js (93%) diff --git a/packages/minimongo/diff.js b/packages/minimongo/diff.js index 53910c04dd..4b97628582 100644 --- a/packages/minimongo/diff.js +++ b/packages/minimongo/diff.js @@ -3,8 +3,6 @@ // old_results and new_results: collections of documents. // if ordered, they are arrays. // if unordered, they are maps {_id: doc}. -// observer: object with 'added', 'changed', 'removed', -// and (if ordered) 'moved' functions (each optional) LocalCollection._diffQueryChanges = function (ordered, oldResults, newResults, observer) { if (ordered) @@ -17,8 +15,8 @@ LocalCollection._diffQueryChanges = function (ordered, oldResults, newResults, LocalCollection._diffQueryUnorderedChanges = function (oldResults, newResults, observer) { - if (observer.moved) { - throw new Error("_diffQueryUnordered called with a moved observer!"); + if (observer.movedBefore) { + throw new Error("_diffQueryUnordered called with a movedBefore observer!"); } _.each(newResults, function (newDoc) { diff --git a/packages/mongo-livedata/id_map.js b/packages/minimongo/id_map.js similarity index 93% rename from packages/mongo-livedata/id_map.js rename to packages/minimongo/id_map.js index cf513fa49c..bf112d3572 100644 --- a/packages/mongo-livedata/id_map.js +++ b/packages/minimongo/id_map.js @@ -1,9 +1,9 @@ -IdMap = function () { +LocalCollection._IdMap = function () { var self = this; self._map = {}; }; -_.extend(IdMap.prototype, { +_.extend(LocalCollection._IdMap.prototype, { get: function (id) { var self = this; var key = LocalCollection._idStringify(id); diff --git a/packages/minimongo/minimongo.js b/packages/minimongo/minimongo.js index 79c71d44b6..1ddf883aab 100644 --- a/packages/minimongo/minimongo.js +++ b/packages/minimongo/minimongo.js @@ -16,8 +16,7 @@ LocalCollection = function (name) { this.next_qid = 1; // live query id generator // qid -> live query object. keys: - // ordered: bool. ordered queries have moved callbacks and callbacks - // take indices. + // ordered: bool. ordered queries have addedBefore/movedBefore callbacks. // results: array (ordered) or object (unordered) of current results // results_snapshot: snapshot of results. null if not paused. // cursor: Cursor object for the query. @@ -219,11 +218,22 @@ LocalCollection.Cursor.prototype._publishCursor = function (sub) { return Meteor.Collection._publishCursor(self, sub, collection); }; -LocalCollection._isOrderedChanges = function (callbacks) { +LocalCollection._observeChangesCallbacksAreOrdered = function (callbacks) { if (callbacks.added && callbacks.addedBefore) throw new Error("Please specify only one of added() and addedBefore()"); - return typeof callbacks.addedBefore == 'function' || - typeof callbacks.movedBefore === 'function'; + return !!(callbacks.addedBefore || callbacks.movedBefore); +}; + +LocalCollection._observeCallbacksAreOrdered = function (callbacks) { + if (callbacks.addedAt && callbacks.added) + throw new Error("Please specify only one of added() and addedAt()"); + if (callbacks.changedAt && callbacks.changed) + throw new Error("Please specify only one of changed() and changedAt()"); + if (callbacks.removed && callbacks.removedAt) + throw new Error("Please specify only one of removed() and removedAt()"); + + return !!(callbacks.addedAt || callbacks.movedTo || callbacks.changedAt + || callbacks.removedAt); }; // the handle that comes back from observe. @@ -258,7 +268,7 @@ _.extend(LocalCollection.Cursor.prototype, { observeChanges: function (options) { var self = this; - var ordered = LocalCollection._isOrderedChanges(options); + var ordered = LocalCollection._observeChangesCallbacksAreOrdered(options); if (!options._allow_unordered && !ordered && (self.skip || self.limit)) throw new Error("must use ordered observe with skip or limit"); @@ -287,8 +297,7 @@ _.extend(LocalCollection.Cursor.prototype, { query.results_snapshot = (ordered ? [] : {}); // wrap callbacks we were passed. callbacks only fire when not paused and - // are never undefined (except that query.moved is undefined for unordered - // callbacks). + // are never undefined // Filters out blacklisted fields according to cursor's projection. // XXX wrong place for this? @@ -318,7 +327,6 @@ _.extend(LocalCollection.Cursor.prototype, { query.changed = wrapCallback(options.changed, 1, true); query.removed = wrapCallback(options.removed); if (ordered) { - query.moved = wrapCallback(options.moved); query.addedBefore = wrapCallback(options.addedBefore, 1); query.movedBefore = wrapCallback(options.movedBefore); } diff --git a/packages/minimongo/observe.js b/packages/minimongo/observe.js index e3a2f7a8af..4a361f11ea 100644 --- a/packages/minimongo/observe.js +++ b/packages/minimongo/observe.js @@ -1,122 +1,159 @@ -LocalCollection._observeFromObserveChanges = function (cursor, callbacks) { - var transform = cursor.getTransform(); - if (!transform) - transform = function (doc) {return doc;}; - if (callbacks.addedAt && callbacks.added) - throw new Error("Please specify only one of added() and addedAt()"); - if (callbacks.changedAt && callbacks.changed) - throw new Error("Please specify only one of changed() and changedAt()"); - if (callbacks.removed && callbacks.removedAt) - throw new Error("Please specify only one of removed() and removedAt()"); - if (callbacks.addedAt || callbacks.movedTo || - callbacks.changedAt || callbacks.removedAt) - return LocalCollection._observeOrderedFromObserveChanges(cursor, callbacks, transform); - else - return LocalCollection._observeUnorderedFromObserveChanges(cursor, callbacks, transform); +// XXX maybe move these into another ObserveHelpers package or something + +// Wrapped callbacks should not mutate self.docs. +LocalCollection._CachingChangeObserver = function (callbacks) { + var self = this; + self.ordered = LocalCollection._observeChangesCallbacksAreOrdered(callbacks); + + if (self.ordered) { + self.docs = new OrderedDict(LocalCollection._idStringify); + self.callbacks = { + addedBefore: function (id, fields, before) { + var doc = EJSON.clone(fields); + doc._id = id; + callbacks.addedBefore && callbacks.addedBefore.call(self, id, fields, before); + // This line triggers if we provide added with movedBefore. + callbacks.added && callbacks.added.call(self, id, fields); + // XXX could `before` be a falsy ID? Technically + // idStringify seems to allow for them -- though + // OrderedDict won't call stringify on a falsy arg. + self.docs.putBefore(id, doc, before || null); + }, + movedBefore: function (id, before) { + var doc = self.docs.get(id); + callbacks.movedBefore && callbacks.movedBefore.call(self, id, before); + self.docs.moveBefore(id, before || null); + } + }; + } else { + self.docs = new LocalCollection._IdMap; + self.callbacks = { + added: function (id, fields) { + var doc = EJSON.clone(fields); + callbacks.added && callbacks.added.call(self, id, fields); + doc._id = id; + self.docs.set(id, doc); + } + }; + } + + // The methods in _IdMap and OrderedDict used by these callbacks are + // identical. + self.callbacks.changed = function (id, fields) { + var doc = self.docs.get(id); + if (!doc) + throw new Error("Unknown id for changed: " + id); + callbacks.changed && callbacks.changed.call( + self, id, EJSON.clone(fields)); + LocalCollection._applyChanges(doc, fields); + }; + self.callbacks.removed = function (id) { + callbacks.removed && callbacks.removed.call(self, id); + self.docs.remove(id); + }; }; -LocalCollection._observeUnorderedFromObserveChanges = - function (cursor, callbacks, transform) { - var docs = {}; - var suppressed = !!callbacks._suppress_initial; - var handle = cursor.observeChanges({ - added: function (id, fields) { - var strId = LocalCollection._idStringify(id); - var doc = EJSON.clone(fields); - doc._id = id; - docs[strId] = doc; - suppressed || callbacks.added && callbacks.added(transform(doc)); - }, - changed: function (id, fields) { - var strId = LocalCollection._idStringify(id); - var doc = docs[strId]; - var oldDoc = EJSON.clone(doc); - // writes through to the doc set - LocalCollection._applyChanges(doc, fields); - suppressed || callbacks.changed && callbacks.changed(transform(doc), transform(oldDoc)); - }, - removed: function (id) { - var strId = LocalCollection._idStringify(id); - var doc = docs[strId]; - delete docs[strId]; - suppressed || callbacks.removed && callbacks.removed(transform(doc)); - } - }); - suppressed = false; - return handle; -}; +LocalCollection._observeFromObserveChanges = function (cursor, observeCallbacks) { + var transform = cursor.getTransform() || function (doc) {return doc;}; + var suppressed = !!observeCallbacks._suppress_initial; -LocalCollection._observeOrderedFromObserveChanges = - function (cursor, callbacks, transform) { - var docs = new OrderedDict(LocalCollection._idStringify); - var suppressed = !!callbacks._suppress_initial; - // The "_no_indices" option sets all index arguments to -1 - // and skips the linear scans required to generate them. - // This lets observers that don't need absolute indices - // benefit from the other features of this API -- - // relative order, transforms, and applyChanges -- without - // the speed hit. - var indices = !callbacks._no_indices; - var handle = cursor.observeChanges({ - addedBefore: function (id, fields, before) { - var doc = EJSON.clone(fields); - doc._id = id; - // XXX could `before` be a falsy ID? Technically - // idStringify seems to allow for them -- though - // OrderedDict won't call stringify on a falsy arg. - docs.putBefore(id, doc, before || null); - if (!suppressed) { - if (callbacks.addedAt) { - var index = indices ? docs.indexOf(id) : -1; - callbacks.addedAt(transform(EJSON.clone(doc)), - index, before); - } else if (callbacks.added) { - callbacks.added(transform(EJSON.clone(doc))); + var observeChangesCallbacks; + if (LocalCollection._observeCallbacksAreOrdered(observeCallbacks)) { + // The "_no_indices" option sets all index arguments to -1 and skips the + // linear scans required to generate them. This lets observers that don't + // need absolute indices benefit from the other features of this API -- + // relative order, transforms, and applyChanges -- without the speed hit. + var indices = !observeCallbacks._no_indices; + observeChangesCallbacks = { + addedBefore: function (id, fields, before) { + var self = this; + if (suppressed || !(observeCallbacks.addedAt || observeCallbacks.added)) + return; + var doc = transform(_.extend(fields, {_id: id})); + if (observeCallbacks.addedAt) { + var index = indices + ? (before ? self.docs.indexOf(before) : self.docs.size()) : -1; + observeCallbacks.addedAt(doc, index, before); + } else { + observeCallbacks.added(doc); + } + }, + changed: function (id, fields) { + var self = this; + if (!(observeCallbacks.changedAt || observeCallbacks.changed)) + return; + var doc = EJSON.clone(self.docs.get(id)); + if (!doc) + throw new Error("Unknown id for changed: " + id); + var oldDoc = transform(EJSON.clone(doc)); + LocalCollection._applyChanges(doc, fields); + doc = transform(doc); + if (observeCallbacks.changedAt) { + var index = indices ? self.docs.indexOf(id) : -1; + observeCallbacks.changedAt(doc, oldDoc, index); + } else { + observeCallbacks.changed(doc, oldDoc); + } + }, + movedBefore: function (id, before) { + var self = this; + if (!observeCallbacks.movedTo) + return; + var from = indices ? self.docs.indexOf(id) : -1; + + var to = indices + ? (before ? self.docs.indexOf(before) : self.docs.size()) : -1; + // When not moving backwards, adjust for the fact that removing the + // document slides everything back one slot. + if (to > from) + --to; + observeCallbacks.movedTo(transform(EJSON.clone(self.docs.get(id))), + from, to, before || null); + }, + removed: function (id) { + var self = this; + if (!(observeCallbacks.removedAt || observeCallbacks.removed)) + return; + // technically maybe there should be an EJSON.clone here, but it's about + // to be removed from self.docs! + var doc = transform(self.docs.get(id)); + if (observeCallbacks.removedAt) { + var index = indices ? self.docs.indexOf(id) : -1; + observeCallbacks.removedAt(doc, index); + } else { + observeCallbacks.removed(doc); } } - }, - changed: function (id, fields) { - var doc = docs.get(id); - if (!doc) - throw new Error("Unknown id for changed: " + id); - var oldDoc = EJSON.clone(doc); - // writes through to the doc set - LocalCollection._applyChanges(doc, fields); - if (callbacks.changedAt) { - var index = indices ? docs.indexOf(id) : -1; - callbacks.changedAt(transform(EJSON.clone(doc)), - transform(oldDoc), index); - } else if (callbacks.changed) { - callbacks.changed(transform(EJSON.clone(doc)), - transform(oldDoc)); - } - }, - movedBefore: function (id, before) { - var doc = docs.get(id); - var from; - // only capture indexes if we're going to call the callback that needs them. - if (callbacks.movedTo) - from = indices ? docs.indexOf(id) : -1; - docs.moveBefore(id, before || null); - if (callbacks.movedTo) { - var to = indices ? docs.indexOf(id) : -1; - callbacks.movedTo(transform(EJSON.clone(doc)), from, to, - before || null); - } else if (callbacks.moved) { - callbacks.moved(transform(EJSON.clone(doc))); + }; + } else { + observeChangesCallbacks = { + added: function (id, fields) { + if (!suppressed && observeCallbacks.added) { + var doc = _.extend(fields, {_id: id}); + observeCallbacks.added(transform(doc)); + } + }, + changed: function (id, fields) { + var self = this; + if (observeCallbacks.changed) { + var oldDoc = self.docs.get(id); + var doc = EJSON.clone(oldDoc); + LocalCollection._applyChanges(doc, fields); + observeCallbacks.changed(transform(doc), transform(oldDoc)); + } + }, + removed: function (id) { + var self = this; + if (observeCallbacks.removed) { + observeCallbacks.removed(transform(self.docs.get(id))); + } } + }; + } - }, - removed: function (id) { - var doc = docs.get(id); - var index; - if (callbacks.removedAt) - index = indices ? docs.indexOf(id) : -1; - docs.remove(id); - callbacks.removedAt && callbacks.removedAt(transform(doc), index); - callbacks.removed && callbacks.removed(transform(doc)); - } - }); + var changeObserver = new LocalCollection._CachingChangeObserver( + observeChangesCallbacks); + var handle = cursor.observeChanges(changeObserver.callbacks); suppressed = false; return handle; }; diff --git a/packages/minimongo/package.js b/packages/minimongo/package.js index 28b3147705..3469e2acc2 100644 --- a/packages/minimongo/package.js +++ b/packages/minimongo/package.js @@ -16,6 +16,7 @@ Package.on_use(function (api) { 'projection.js', 'modify.js', 'diff.js', + 'id_map.js', 'observe.js', 'objectid.js' ]); diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index a9f549f923..b33e0c117f 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -903,7 +903,7 @@ Cursor.prototype.observe = function (callbacks) { Cursor.prototype.observeChanges = function (callbacks) { var self = this; - var ordered = LocalCollection._isOrderedChanges(callbacks); + var ordered = LocalCollection._observeChangesCallbacksAreOrdered(callbacks); return self._mongo._observeChanges( self._cursorDescription, ordered, callbacks); }; @@ -1126,7 +1126,6 @@ var ObserveHandle = function (liveResultsSet, callbacks) { self._addedBefore = callbacks.addedBefore; self._changed = callbacks.changed; self._removed = callbacks.removed; - self._moved = callbacks.moved; self._movedBefore = callbacks.movedBefore; self._observeHandleId = nextObserveHandleId++; }; @@ -1291,7 +1290,6 @@ var LiveResultsSet = function (cursorDescription, mongoHandle, ordered, self._callbackMultiplexer = {}; var callbackNames = ['added', 'changed', 'removed']; if (self._ordered) { - callbackNames.push('moved'); callbackNames.push('addedBefore'); callbackNames.push('movedBefore'); } diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 3326051d75..078c9b338d 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -32,7 +32,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var phase = PHASE.INITIALIZING; - var published = new IdMap; + var published = new LocalCollection._IdMap; var selector = cursorDescription.selector; var selectorFn = LocalCollection._compileSelector(selector); var projection = cursorDescription.options.fields || {}; @@ -42,8 +42,8 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var sharedProjection = LocalCollection._combineSelectorAndProjection(selector, projection); var sharedProjectionFn = LocalCollection._compileProjection(sharedProjection); - var needToFetch = new IdMap; - var currentlyFetching = new IdMap; + var needToFetch = new LocalCollection._IdMap; + var currentlyFetching = new LocalCollection._IdMap; var add = function (doc) { var id = doc._id; @@ -93,7 +93,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( throw new Error("Surprising phase in fetchModifiedDocuments: " + phase); currentlyFetching = needToFetch; - needToFetch = new IdMap; + needToFetch = new LocalCollection._IdMap; var waiting = 0; var error = null; var fut = new Future; @@ -117,7 +117,7 @@ MongoConnection.prototype._observeChangesWithOplog = function ( fut.wait(); if (error) throw error; - currentlyFetching = new IdMap; + currentlyFetching = new LocalCollection._IdMap; } beSteady(); }; diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index f0ad107cdb..d93d7a70cb 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -42,7 +42,7 @@ Package.on_use(function (api) { // For tests only. api.export('MongoTest', 'server', {testOnly: true}); - api.add_files(['id_map.js', 'doc_fetcher.js', 'mongo_driver.js', + api.add_files(['doc_fetcher.js', 'mongo_driver.js', 'oplog.js'], 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); From 9d5905896360d9531c5c893b66ab5294d22f6274 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Thu, 21 Nov 2013 15:35:53 -0800 Subject: [PATCH 115/190] Refactor observe dedup; support it for oplog. --- packages/facts/facts.js | 2 + packages/livedata/crossbar.js | 2 +- packages/minimongo/id_map.js | 7 +- packages/minimongo/minimongo.js | 8 +- packages/minimongo/observe.js | 32 +- packages/mongo-livedata/mongo_driver.js | 273 +++++------------- .../mongo-livedata/mongo_livedata_tests.js | 49 ++-- packages/mongo-livedata/observe_multiplex.js | 199 +++++++++++++ packages/mongo-livedata/oplog.js | 97 ++++--- packages/mongo-livedata/package.js | 2 +- 10 files changed, 402 insertions(+), 269 deletions(-) create mode 100644 packages/mongo-livedata/observe_multiplex.js diff --git a/packages/facts/facts.js b/packages/facts/facts.js index ca1791bb74..65f4517530 100644 --- a/packages/facts/facts.js +++ b/packages/facts/facts.js @@ -63,6 +63,8 @@ if (Meteor.isServer) { }); } else { Facts.server = new Meteor.Collection(serverFactsCollection); + // XXX making all clients subscribe all the time is wasteful. + // add an interface here // Meteor.subscribe("facts"); Template.serverFacts.factsByPackage = function () { diff --git a/packages/livedata/crossbar.js b/packages/livedata/crossbar.js index 613ae13840..ef76651887 100644 --- a/packages/livedata/crossbar.js +++ b/packages/livedata/crossbar.js @@ -21,7 +21,7 @@ _.extend(DDPServer._InvalidationCrossbar.prototype, { // XXX It should be legal to call fire() from inside a listen() // callback? // - // Note: the LiveResultsSet constructor assumes that a call to listen() never + // Note: the MongoPollster constructor assumes that a call to listen() never // yields. listen: function (trigger, callback) { var self = this; diff --git a/packages/minimongo/id_map.js b/packages/minimongo/id_map.js index bf112d3572..445e465600 100644 --- a/packages/minimongo/id_map.js +++ b/packages/minimongo/id_map.js @@ -3,6 +3,11 @@ LocalCollection._IdMap = function () { self._map = {}; }; +// Some of these methods are designed to match methods on OrderedDict, since +// (eg) ObserveMultiplex and _CachingChangeObserver use them interchangeably. +// (Conceivably, this should be replaced with "UnorderedDict" with a specific +// set of methods that overlap between the two.) + _.extend(LocalCollection._IdMap.prototype, { get: function (id) { var self = this; @@ -32,7 +37,7 @@ _.extend(LocalCollection._IdMap.prototype, { var self = this; self._map = {}; }, - each: function (iterator) { + forEach: function (iterator) { var self = this; _.each(self._map, function (value, key, obj) { var context = this; diff --git a/packages/minimongo/minimongo.js b/packages/minimongo/minimongo.js index 1ddf883aab..54870d87b1 100644 --- a/packages/minimongo/minimongo.js +++ b/packages/minimongo/minimongo.js @@ -5,7 +5,7 @@ // Cursor: a specification for a particular subset of documents, w/ // a defined order, limit, and offset. creating a Cursor with LocalCollection.find(), -// LiveResultsSet: the return value of a live query. +// ObserveHandle: the return value of a live query. LocalCollection = function (name) { this.name = name; @@ -237,7 +237,7 @@ LocalCollection._observeCallbacksAreOrdered = function (callbacks) { }; // the handle that comes back from observe. -LocalCollection.LiveResultsSet = function () {}; +LocalCollection.ObserveHandle = function () {}; // options to contain: // * callbacks for observe(): @@ -254,7 +254,7 @@ LocalCollection.LiveResultsSet = function () {}; // * collection: the collection this query is querying // // iff x is a returned query handle, (x instanceof -// LocalCollection.LiveResultsSet) is true +// LocalCollection.ObserveHandle) is true // // initial results delivered through added callback // XXX maybe callbacks should take a list of objects, to expose transactions? @@ -342,7 +342,7 @@ _.extend(LocalCollection.Cursor.prototype, { }); } - var handle = new LocalCollection.LiveResultsSet; + var handle = new LocalCollection.ObserveHandle; _.extend(handle, { collection: self.collection, stop: function () { diff --git a/packages/minimongo/observe.js b/packages/minimongo/observe.js index 4a361f11ea..f6697bfa80 100644 --- a/packages/minimongo/observe.js +++ b/packages/minimongo/observe.js @@ -1,17 +1,31 @@ // XXX maybe move these into another ObserveHelpers package or something // Wrapped callbacks should not mutate self.docs. -LocalCollection._CachingChangeObserver = function (callbacks) { +LocalCollection._CachingChangeObserver = function (options) { var self = this; - self.ordered = LocalCollection._observeChangesCallbacksAreOrdered(callbacks); + options = options || {}; + + var orderedFromCallbacks = options.callbacks && + LocalCollection._observeChangesCallbacksAreOrdered(options.callbacks); + if (_.has(options, 'ordered')) { + self.ordered = options.ordered; + if (options.callbacks && options.ordered !== orderedFromCallbacks) + throw Error("ordered option doesn't match callbacks"); + } else if (options.callbacks) { + self.ordered = orderedFromCallbacks; + } else { + throw Error("must provide ordered or callbacks"); + } + var callbacks = options.callbacks || {}; if (self.ordered) { self.docs = new OrderedDict(LocalCollection._idStringify); - self.callbacks = { + self.applyChange = { addedBefore: function (id, fields, before) { var doc = EJSON.clone(fields); doc._id = id; - callbacks.addedBefore && callbacks.addedBefore.call(self, id, fields, before); + callbacks.addedBefore && callbacks.addedBefore.call( + self, id, fields, before); // This line triggers if we provide added with movedBefore. callbacks.added && callbacks.added.call(self, id, fields); // XXX could `before` be a falsy ID? Technically @@ -27,7 +41,7 @@ LocalCollection._CachingChangeObserver = function (callbacks) { }; } else { self.docs = new LocalCollection._IdMap; - self.callbacks = { + self.applyChange = { added: function (id, fields) { var doc = EJSON.clone(fields); callbacks.added && callbacks.added.call(self, id, fields); @@ -39,7 +53,7 @@ LocalCollection._CachingChangeObserver = function (callbacks) { // The methods in _IdMap and OrderedDict used by these callbacks are // identical. - self.callbacks.changed = function (id, fields) { + self.applyChange.changed = function (id, fields) { var doc = self.docs.get(id); if (!doc) throw new Error("Unknown id for changed: " + id); @@ -47,7 +61,7 @@ LocalCollection._CachingChangeObserver = function (callbacks) { self, id, EJSON.clone(fields)); LocalCollection._applyChanges(doc, fields); }; - self.callbacks.removed = function (id) { + self.applyChange.removed = function (id) { callbacks.removed && callbacks.removed.call(self, id); self.docs.remove(id); }; @@ -152,8 +166,8 @@ LocalCollection._observeFromObserveChanges = function (cursor, observeCallbacks) } var changeObserver = new LocalCollection._CachingChangeObserver( - observeChangesCallbacks); - var handle = cursor.observeChanges(changeObserver.callbacks); + {callbacks: observeChangesCallbacks}); + var handle = cursor.observeChanges(changeObserver.applyChange); suppressed = false; return handle; }; diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index b33e0c117f..ddb01a35de 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -111,7 +111,7 @@ MongoConnection = function (url, options) { var self = this; options = options || {}; self._connectCallbacks = []; - self._liveResultsSets = {}; + self._observeMultiplexers = {}; var mongoOptions = {db: {safe: true}, server: {}, replSet: {}}; @@ -827,13 +827,13 @@ MongoConnection.prototype._dropIndex = function (collectionName, index) { // like fetch or forEach on it). // // ObserveHandle is the "observe handle" returned from observeChanges. It has a -// reference to a LiveResultsSet. +// reference to an ObserveMultiplexer. // -// LiveResultsSet caches the results of a query and reruns it when necessary. -// It is hooked up to one or more ObserveHandles; a single LiveResultsSet -// can drive multiple sets of observation callbacks if they are for the -// same query. - +// ObserveMultiplexer allows multiple identical ObserveHandles to be driven by a +// single low-level observe process such as a MongoPollster. +// +// A MongoPollster caches the results of a query and reruns it when necessary. +// It is hooked up to an ObserveMultiplexer. var CursorDescription = function (collectionName, selector, options) { var self = this; @@ -1118,23 +1118,6 @@ MongoConnection.prototype.tail = function (cursorDescription, docCallback) { }; }; -var nextObserveHandleId = 1; -var ObserveHandle = function (liveResultsSet, callbacks) { - var self = this; - self._liveResultsSet = liveResultsSet; - self._added = callbacks.added; - self._addedBefore = callbacks.addedBefore; - self._changed = callbacks.changed; - self._removed = callbacks.removed; - self._movedBefore = callbacks.movedBefore; - self._observeHandleId = nextObserveHandleId++; -}; -ObserveHandle.prototype.stop = function () { - var self = this; - self._liveResultsSet._removeObserveHandle(self); - self._liveResultsSet = null; -}; - MongoConnection.prototype._observeChanges = function ( cursorDescription, ordered, callbacks) { var self = this; @@ -1143,55 +1126,52 @@ MongoConnection.prototype._observeChanges = function ( return self._observeChangesTailable(cursorDescription, ordered, callbacks); } - // XXX maybe this should actually use deduping too? - if (self._oplogHandle && !ordered && !callbacks._testOnlyPollCallback - // XXX remove this when oplog does de-duping - && !cursorDescription.options._dontUseOplog - && cursorSupportedByOplogTailing(cursorDescription)) { - return self._observeChangesWithOplog(cursorDescription, callbacks); - } - var observeKey = JSON.stringify( _.extend({ordered: ordered}, cursorDescription)); - var liveResultsSet; - var observeHandle; - var newlyCreated = false; + var multiplexer, observeHandle; - // Find a matching LiveResultsSet, or create a new one. This next block is + // Find a matching ObserveMultiplexer, or create a new one. This next block is // guaranteed to not yield (and it doesn't call anything that can observe a // new query), so no other calls to this function can interleave with it. Meteor._noYieldsAllowed(function () { - if (_.has(self._liveResultsSets, observeKey)) { - liveResultsSet = self._liveResultsSets[observeKey]; + var observeImplementation; + if (_.has(self._observeMultiplexers, observeKey)) { + multiplexer = self._observeMultiplexers[observeKey]; } else { - // Create a new LiveResultsSet. It is created "locked": no polling can - // take place. - liveResultsSet = new LiveResultsSet( - cursorDescription, - self, - ordered, - function () { - delete self._liveResultsSets[observeKey]; - }, - callbacks._testOnlyPollCallback); - self._liveResultsSets[observeKey] = liveResultsSet; - newlyCreated = true; + // Create a new ObserveMultiplexer. + multiplexer = new ObserveMultiplexer({ + ordered: ordered, + onStop: function () { + observeImplementation.stop(); + delete self._observeMultiplexers[observeKey]; + } + }); + self._observeMultiplexers[observeKey] = multiplexer; + + if (self._oplogHandle && !ordered && !callbacks._testOnlyPollCallback + && cursorSupportedByOplogTailing(cursorDescription)) { + observeImplementation = observeChangesWithOplog( + cursorDescription, self, multiplexer); + } else { + // Start polling. + observeImplementation = new MongoPollster( + cursorDescription, + self, + ordered, + multiplexer, + callbacks._testOnlyPollCallback); + } } - observeHandle = new ObserveHandle(liveResultsSet, callbacks); + observeHandle = new ObserveHandle(multiplexer, callbacks); + // This field is only set for the first ObserveHandle in an + // ObserveMultiplexer. It is only there for use by one test. + if (observeImplementation) + observeHandle._observeImplementation = observeImplementation; }); - if (newlyCreated) { - // This is the first ObserveHandle on this LiveResultsSet. Add it and run - // the initial synchronous poll (which may yield). - liveResultsSet._addFirstObserveHandle(observeHandle); - } else { - // Not the first ObserveHandle. Add it to the LiveResultsSet. This call - // yields until we're not in the middle of a poll, and its invocation of the - // initial 'added' callbacks may yield as well. It blocks until the 'added' - // callbacks have fired. - liveResultsSet._addObserveHandleAndSendInitialAdds(observeHandle); - } + // Blocks until the initial adds have been sent. + multiplexer.addHandleAndSendInitialAdds(observeHandle); return observeHandle; }; @@ -1228,14 +1208,16 @@ listenAll = function (cursorDescription, listenCallback) { }; }; -var LiveResultsSet = function (cursorDescription, mongoHandle, ordered, - stopCallback, testOnlyPollCallback) { +var MongoPollster = function (cursorDescription, mongoHandle, ordered, + multiplexer, testOnlyPollCallback) { var self = this; self._cursorDescription = cursorDescription; self._mongoHandle = mongoHandle; self._ordered = ordered; - self._stopCallbacks = [stopCallback]; + self._multiplexer = multiplexer; + self._stopCallbacks = []; + self._stopped = false; // This constructor cannot yield, so we don't create the synchronousCursor yet // (since that can yield). @@ -1243,7 +1225,7 @@ var LiveResultsSet = function (cursorDescription, mongoHandle, ordered, // previous results snapshot. on each poll cycle, diffs against // results drives the callbacks. - self._results = ordered ? [] : {}; + self._results = null; // The number of _pollMongo calls that have been added to self._taskQueue but // have not started running. Used to make sure we never schedule more than one @@ -1253,17 +1235,14 @@ var LiveResultsSet = function (cursorDescription, mongoHandle, ordered, // running") or 1 (for "a poll scheduled that isn't running yet"), but it can // also be 2 if incremented by _suspendPolling. self._pollsScheduledButNotStarted = 0; - // Number of _addObserveHandleAndSendInitialAdds tasks scheduled but not yet - // running. _removeObserveHandle uses this to know if it's safe to shut down - // this LiveResultsSet. - self._addHandleTasksScheduledButNotPerformed = 0; self._pendingWrites = []; // people to notify when polling completes - // Make sure to create a separately throttled function for each LiveResultsSet + // Make sure to create a separately throttled function for each MongoPollster // object. self._ensurePollIsScheduled = _.throttle( self._unthrottledEnsurePollIsScheduled, 50 /* ms */); + // XXX figure out if we still need a queue self._taskQueue = new Meteor._SynchronousQueue(); var listenersHandle = listenAll( @@ -1284,33 +1263,6 @@ var LiveResultsSet = function (cursorDescription, mongoHandle, ordered, ); self._stopCallbacks.push(function () { listenersHandle.stop(); }); - // Map from handle ID to ObserveHandle. - self._observeHandles = {}; - - self._callbackMultiplexer = {}; - var callbackNames = ['added', 'changed', 'removed']; - if (self._ordered) { - callbackNames.push('addedBefore'); - callbackNames.push('movedBefore'); - } - _.each(callbackNames, function (callback) { - var handleCallback = '_' + callback; - self._callbackMultiplexer[callback] = function () { - var args = _.toArray(arguments); - // Because callbacks can yield and _removeObserveHandle() (ie, - // handle.stop()) doesn't synchronize its actions with _taskQueue, - // ObserveHandles can disappear from self._observeHandles during this - // dispatch. Thus, we save a copy of the keys of self._observeHandles - // before we start to iterate, and we check to see if the handle is still - // there each time. - _.each(_.keys(self._observeHandles), function (handleId) { - var handle = self._observeHandles[handleId]; - if (handle && handle[handleCallback]) - handle[handleCallback].apply(null, EJSON.clone(args)); - }); - }; - }); - // every once and a while, poll even if we don't think we're dirty, for // eventual consistency with database writes from outside the Meteor // universe. @@ -1328,31 +1280,15 @@ var LiveResultsSet = function (cursorDescription, mongoHandle, ordered, }); } + // Make sure we actually poll soon! + self._unthrottledEnsurePollIsScheduled(); + Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "live-results-sets", 1); + "mongo-livedata", "mongo-pollsters", 1); }; -_.extend(LiveResultsSet.prototype, { - _addFirstObserveHandle: function (handle) { - var self = this; - if (! _.isEmpty(self._observeHandles)) - throw new Error("Not the first observe handle!"); - if (! _.isEmpty(self._results)) - throw new Error("Call _addFirstObserveHandle before polling!"); - - self._observeHandles[handle._observeHandleId] = handle; - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "observe-handles", 1); - - // Run the first _poll() cycle synchronously (delivering results to the - // first ObserveHandle). - ++self._pollsScheduledButNotStarted; - self._taskQueue.runTask(function () { - self._pollMongo(); - }); - }, - - // This is always called through _.throttle. +_.extend(MongoPollster.prototype, { + // This is always called through _.throttle (except once at startup). _unthrottledEnsurePollIsScheduled: function () { var self = this; if (self._pollsScheduledButNotStarted > 0) @@ -1402,6 +1338,13 @@ _.extend(LiveResultsSet.prototype, { var self = this; --self._pollsScheduledButNotStarted; + var first = false; + if (!self._results) { + first = true; + // XXX maybe use _IdMap/OrderedDict instead? + self._results = self.ordered ? [] : {}; + } + self._testOnlyPollCallback && self._testOnlyPollCallback(); // Save the list of pending writes which this round will commit. @@ -1419,91 +1362,35 @@ _.extend(LiveResultsSet.prototype, { var oldResults = self._results; // Run diffs. (This can yield too.) - if (!_.isEmpty(self._observeHandles)) { + if (!self._stopped) { LocalCollection._diffQueryChanges( - self._ordered, oldResults, newResults, self._callbackMultiplexer); + self._ordered, oldResults, newResults, self._multiplexer); } // Replace self._results atomically. self._results = newResults; - // Mark all the writes which existed before this call as commmitted. (If new - // writes have shown up in the meantime, there'll already be another - // _pollMongo task scheduled.) - _.each(writesForCycle, function (w) {w.committed();}); - }, + // Signals the multiplexer to call all initial adds. + if (first) + self._multiplexer.ready(); - // Adds the observe handle to this set and sends its initial added - // callbacks. Meteor._SynchronousQueue guarantees that this won't interleave - // with a call to _pollMongo or another call to this function. - _addObserveHandleAndSendInitialAdds: function (handle) { - var self = this; - - // Check this before calling runTask (even though runTask does the same - // check) so that we don't leak a LiveResultsSet by incrementing - // _addHandleTasksScheduledButNotPerformed and never decrementing it. - if (!self._taskQueue.safeToRunTask()) - throw new Error( - "Can't call observe() from an observe callback on the same query"); - - // Keep track of how many of these tasks are on the queue, so that - // _removeObserveHandle knows if it's safe to GC. - ++self._addHandleTasksScheduledButNotPerformed; - - self._taskQueue.runTask(function () { - if (!self._observeHandles) - throw new Error("Can't add observe handle to stopped LiveResultsSet"); - - if (_.has(self._observeHandles, handle._observeHandleId)) - throw new Error("Duplicate observe handle ID"); - self._observeHandles[handle._observeHandleId] = handle; - --self._addHandleTasksScheduledButNotPerformed; - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "observe-handles", 1); - - // Send initial adds. - if (handle._added || handle._addedBefore) { - _.each(self._results, function (doc, i) { - var fields = EJSON.clone(doc); - delete fields._id; - if (self._ordered) { - handle._added && handle._added(doc._id, fields); - handle._addedBefore && handle._addedBefore(doc._id, fields, null); - } else { - handle._added(doc._id, fields); - } - }); - } + // Once the ObserveMultiplexer has processed everything we've done in this + // round, mark all the writes which existed before this call as + // commmitted. (If new writes have shown up in the meantime, there'll + // already be another _pollMongo task scheduled.) + self._multiplexer.onFlush(function () { + _.each(writesForCycle, function (w) { + w.committed(); + }); }); }, - // Remove an observe handle. If it was the last observe handle, call all the - // stop callbacks; you cannot add any more observe handles after this. - // - // This is not synchronized with polls and handle additions: this means that - // you can safely call it from within an observe callback. - _removeObserveHandle: function (handle) { + stop: function () { var self = this; - - if (!_.has(self._observeHandles, handle._observeHandleId)) - throw new Error("Unknown observe handle ID " + handle._observeHandleId); - delete self._observeHandles[handle._observeHandleId]; + self._stopped = true; + _.each(self._stopCallbacks, function (c) { c(); }); Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "observe-handles", -1); - - if (_.isEmpty(self._observeHandles) && - self._addHandleTasksScheduledButNotPerformed === 0) { - // The last observe handle was stopped; call our stop callbacks, which: - // - removes us from the MongoConnection's _liveResultsSets map - // - stops the poll timer - // - removes us from the invalidation crossbar - _.each(self._stopCallbacks, function (c) { c(); }); - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "live-results-sets", -1); - // This will cause future _addObserveHandleAndSendInitialAdds calls to - // throw. - self._observeHandles = null; - } + "mongo-livedata", "mongo-pollsters", -1); } }); diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 35f4f1c127..ce32bb54e9 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -346,7 +346,7 @@ Tinytest.addAsync("mongo-livedata - basics, " + idGeneration, function (test, on Tinytest.addAsync("mongo-livedata - fuzz test, " + idGeneration, function(test, onComplete) { - var run = test.runId(); + var run = Random.id(); var coll; if (Meteor.isClient) { coll = new Meteor.Collection(null, collectionOptions); // local, unmanaged @@ -382,6 +382,15 @@ Tinytest.addAsync("mongo-livedata - fuzz test, " + idGeneration, function(test, } }); + // XXX What if there are multiple observe handles on the ObserveMultiplexer? + // There shouldn't be because the collection has a name unique to this + // run. + if (Meteor.isServer) { + // For now, has to be polling (not oplog). + test.isTrue(obs._observeImplementation); + test.isTrue(obs._observeImplementation._suspendPolling); + } + var step = 0; // Use non-deterministic randomness so we can have a shorter fuzz @@ -413,11 +422,8 @@ Tinytest.addAsync("mongo-livedata - fuzz test, " + idGeneration, function(test, var max_counters = _.clone(counters); finishObserve(function () { - // XXX What if there are multiple observe handles on the LiveResultsSet? - // There shouldn't be because the collection has a name unique to this - // run. if (Meteor.isServer) - obs._liveResultsSet._suspendPolling(); + obs._observeImplementation._suspendPolling(); // Do a batch of 1-10 operations var batch_count = rnd(10) + 1; @@ -450,7 +456,7 @@ Tinytest.addAsync("mongo-livedata - fuzz test, " + idGeneration, function(test, } } if (Meteor.isServer) - obs._liveResultsSet._resumePolling(); + obs._observeImplementation._resumePolling(); }); @@ -513,7 +519,7 @@ Tinytest.addAsync("mongo-livedata - scribbling, " + idGeneration, function (test }); Tinytest.addAsync("mongo-livedata - stop handle in callback, " + idGeneration, function (test, onComplete) { - var run = test.runId(); + var run = Random.id(); var coll; if (Meteor.isClient) { coll = new Meteor.Collection(null, collectionOptions); // local, unmanaged @@ -572,12 +578,11 @@ if (Meteor.isServer) { var coll = new Meteor.Collection("observeInCallback-"+run, collectionOptions); var callbackCalled = false; - // oplog doesn't do de-duping yet, so it doesn't throw on recursive observe - var handle = coll.find({}, {_dontUseOplog: true}).observe({ + var handle = coll.find({}).observe({ added: function (newDoc) { callbackCalled = true; test.throws(function () { - coll.find({}, {_dontUseOplog: true}).observe(); + coll.find({}).observe(); }); } }); @@ -600,12 +605,12 @@ if (Meteor.isServer) { var observer = function (noAdded) { var output = []; var callbacks = { - changedAt: function (newDoc) { + changed: function (newDoc) { output.push({changed: newDoc._id}); } }; if (!noAdded) { - callbacks.addedAt = function (doc) { + callbacks.added = function (doc) { output.push({added: doc._id}); }; } @@ -640,11 +645,10 @@ if (Meteor.isServer) { // Original observe not affected. test.length(o1.output, 0); - // White-box test: both observes should have the same underlying - // LiveResultsSet. - var liveResultsSet = o1.handle._liveResultsSet; - test.isTrue(liveResultsSet); - test.isTrue(liveResultsSet === o2.handle._liveResultsSet); + // White-box test: both observes should share an ObserveMultiplexer. + var observeMultiplexer = o1.handle._multiplexer; + test.isTrue(observeMultiplexer); + test.isTrue(observeMultiplexer === o2.handle._multiplexer); // Update. Both observes fire. runInFence(function () { @@ -668,14 +672,15 @@ if (Meteor.isServer) { test.length(o2.output, 1); test.equal(o2.output.shift(), {changed: docId2}); - // Stop second handle. Nothing should happen, but the liveResultsSet should + // Stop second handle. Nothing should happen, but the multiplexer should // be stopped. + test.isTrue(observeMultiplexer._handles); // This will change. o2.handle.stop(); test.length(o1.output, 0); test.length(o2.output, 0); - // White-box: liveResultsSet has nulled its _observeHandles so you can't + // White-box: ObserveMultiplexer has nulled its _handles so you can't // accidentally join to it. - test.isNull(liveResultsSet._observeHandles); + test.isNull(observeMultiplexer._handles); // Start yet another handle on the same query. var o3 = observer(); @@ -687,8 +692,8 @@ if (Meteor.isServer) { // Old observers not called. test.length(o1.output, 0); test.length(o2.output, 0); - // White-box: Different LiveResultsSet. - test.isTrue(liveResultsSet !== o3.handle._liveResultsSet); + // White-box: Different ObserveMultiplexer. + test.isTrue(observeMultiplexer !== o3.handle._multiplexer); // Start another handle with no added callback. Regression test for #589. var o4 = observer(true); diff --git a/packages/mongo-livedata/observe_multiplex.js b/packages/mongo-livedata/observe_multiplex.js new file mode 100644 index 0000000000..ade3838d12 --- /dev/null +++ b/packages/mongo-livedata/observe_multiplex.js @@ -0,0 +1,199 @@ +var Future = Npm.require('fibers/future'); + +ObserveMultiplexer = function (options) { + var self = this; + + if (!options || !_.has(options, 'ordered')) + throw Error("must specified ordered"); + + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "observe-multiplexers", 1); + + self._ordered = options.ordered; + self._onStop = options.onStop || function () {}; + self._queue = new Meteor._SynchronousQueue(); + self._handles = {}; + self._ready = false; + self._readyFuture = new Future; + self._cache = new LocalCollection._CachingChangeObserver({ + ordered: options.ordered}); + // Number of addHandleAndSendInitialAdds tasks scheduled but not yet + // running. removeHandle uses this to know if it's time to call the onStop + // callback. + self._addHandleTasksScheduledButNotPerformed = 0; + + _.each(self.callbackNames(), function (callbackName) { + self[callbackName] = function (/* ... */) { + self._applyCallback(callbackName, _.toArray(arguments)); + }; + }); +}; + +_.extend(ObserveMultiplexer.prototype, { + addHandleAndSendInitialAdds: function (handle) { + var self = this; + + // Check this before calling runTask (even though runTask does the same + // check) so that we don't leak an ObserveMultiplexer on error by + // incrementing _addHandleTasksScheduledButNotPerformed and never + // decrementing it. + if (!self._queue.safeToRunTask()) + throw new Error( + "Can't call observeChanges from an observe callback on the same query"); + ++self._addHandleTasksScheduledButNotPerformed; + + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "observe-handles", 1); + + self._queue.runTask(function () { + self._handles[handle._id] = handle; + if (self._ready) + self._sendAdds(handle); + --self._addHandleTasksScheduledButNotPerformed; + }); + // *outside* the task, since otherwise we'd deadlock + self._waitUntilReady(); + }, + + // Remove an observe handle. If it was the last observe handle, call the + // onStop callback; you cannot add any more observe handles after this. + // + // This is not synchronized with polls and handle additions: this means that + // you can safely call it from within an observe callback, but it also means + // that we have to be careful when we iterate over _handles. + removeHandle: function (id) { + var self = this; + delete self._handles[id]; + + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "observe-handles", -1); + + if (_.isEmpty(self._handles) && + self._addHandleTasksScheduledButNotPerformed === 0) { + self._stop(); + } + }, + _stop: function () { + var self = this; + // Call stop callback (which kills the underlying process which sends us + // callbacks and removes us from the connection's dictionary). + self._onStop(); + // Cause future addHandleAndSendInitialAdds calls to throw (but the onStop + // callback should make our connection forget about us). + self._handles = null; + // It shouldn't be possible for us to stop when all our handles still + // haven't been returned from observeChanges! + if (!self._readyFuture.isResolved()) + throw Error("surprising _stop"); + + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "observe-multiplexers", -1); + }, + _waitUntilReady: function (handle) { + var self = this; + self._readyFuture.wait(); + }, + // Sends initial adds to all the handles we know about so far. Does not block. + ready: function () { + var self = this; + self._queue.queueTask(function () { + if (self._ready) + throw Error("can't make ObserveMultiplex ready twice!"); + self._ready = true; + // Use _.keys iteration in case removeHandle is called concurrently. + _.each(_.keys(self._handles), function (handleId) { + var handle = self._handles[handleId]; + handle && self._sendAdds(handle); + }); + self._readyFuture.return(); + }); + }, + onFlush: function (cb) { + var self = this; + self._queue.queueTask(cb); + }, + callbackNames: function () { + var self = this; + if (self._ordered) + return ["addedBefore", "changed", "movedBefore", "removed"]; + else + return ["added", "changed", "removed"]; + }, + _applyCallback: function (callbackName, args) { + var self = this; + self._queue.queueTask(function () { + // First, apply the change to the cache. + // XXX We could make applyChange callbacks promise not to hang on to any + // state from their arguments (assuming that their supplied callbacks + // don't) and skip this clone. Currently 'changed' hangs on to state + // though. + self._cache.applyChange[callbackName].apply(null, EJSON.clone(args)); + // If we haven't finished the initial adds, we have nothing more to do. + if (!self._ready) + return; + // Now multiplex the callbacks out to all observe handles. It's OK if + // these calls yield; since we're inside a task, no other use of our queue + // can continue until these are done. (But we do have to be careful to not + // use a handle that got removed, because removeHandle does not use the + // queue.) + _.each(_.keys(self._handles), function (handleId) { + var handle = self._handles[handleId]; + if (!handle) + return; + var callback = handle['_' + callbackName]; + // clone arguments so that callbacks can mutate their arguments + callback && callback.apply(null, EJSON.clone(args)); + }); + }); + }, + _sendAdds: function (handle) { + var self = this; + if (self._queue.safeToRunTask()) + throw Error("_sendAdds may only be called from within a task!"); + var add = self._ordered ? handle._addedBefore : handle._added; + if (!add) + return; + // note: docs may be an _IdMap or an OrderedDict + self._cache.docs.forEach(function (doc, id) { + if (!_.has(self._handles, handle._id)) + throw Error("handle got removed before sending initial adds!"); + var fields = EJSON.clone(doc); + delete fields._id; + if (self._ordered) + add(id, fields, null); // we're going in order, so add at end + else + add(id, fields); + }); + } +}); + + +var nextObserveHandleId = 1; +ObserveHandle = function (multiplexer, callbacks) { + var self = this; + // The end user is only supposed to call stop(). The other fields are + // accessible to the multiplexer, though. + self._multiplexer = multiplexer; + _.each(multiplexer.callbackNames(), function (name) { + if (callbacks[name]) { + self['_' + name] = callbacks[name]; + } else if (name === "addedBefore" && callbacks.added) { + // Special case: if you specify "added" and "movedBefore", you get an + // ordered observe where for some reason you don't get ordering data on + // the adds. I dunno, we wrote tests for it, there must have been a + // reason. + self._addedBefore = function (id, fields, before) { + callbacks.added(id, fields); + }; + } + }); + self._stopped = false; + self._id = nextObserveHandleId++; +}; +ObserveHandle.prototype.stop = function () { + var self = this; + if (self._stopped) + return; + self._stopped = true; + self._multiplexer.removeHandle(self._id); +}; diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 078c9b338d..0be7d4bdea 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -21,10 +21,9 @@ var idForOp = function (op) { throw Error("Unknown op: " + EJSON.stringify(op)); }; -MongoConnection.prototype._observeChangesWithOplog = function ( - cursorDescription, callbacks) { - var self = this; - +observeChangesWithOplog = function (cursorDescription, + mongoHandle, + multiplexer) { var stopped = false; Package.facts && Package.facts.Facts.incrementServerFact( @@ -52,14 +51,14 @@ MongoConnection.prototype._observeChangesWithOplog = function ( if (published.has(id)) throw Error("tried to add something already published " + id); published.set(id, sharedProjectionFn(fields)); - callbacks.added && callbacks.added(id, projectionFn(fields)); + multiplexer.added(id, projectionFn(fields)); }; var remove = function (id) { if (!published.has(id)) throw Error("tried to remove something unpublished " + id); published.remove(id); - callbacks.removed && callbacks.removed(id); + multiplexer.removed(id); }; var handleDoc = function (id, newDoc) { @@ -76,13 +75,11 @@ MongoConnection.prototype._observeChangesWithOplog = function ( throw Error("thought that " + id + " was there!"); delete newDoc._id; published.set(id, sharedProjectionFn(newDoc)); - if (callbacks.changed) { - var changed = LocalCollection._makeChangedFields( - _.clone(newDoc), oldDoc); - changed = projectionFn(changed); - if (!_.isEmpty(changed)) - callbacks.changed(id, changed); - } + var changed = LocalCollection._makeChangedFields( + _.clone(newDoc), oldDoc); + changed = projectionFn(changed); + if (!_.isEmpty(changed)) + multiplexer.changed(id, changed); } }; @@ -98,10 +95,10 @@ MongoConnection.prototype._observeChangesWithOplog = function ( var error = null; var fut = new Future; Fiber(function () { - currentlyFetching.each(function (cacheKey, id) { + currentlyFetching.forEach(function (cacheKey, id) { // currentlyFetching will not be updated during this loop. waiting++; - self._docFetcher.fetch(cursorDescription.collectionName, id, cacheKey, function (err, doc) { + mongoHandle._docFetcher.fetch(cursorDescription.collectionName, id, cacheKey, function (err, doc) { if (err) { if (!error) error = err; @@ -195,10 +192,10 @@ MongoConnection.prototype._observeChangesWithOplog = function ( oplogEntryHandlers[PHASE.FETCHING] = oplogEntryHandlers[PHASE.STEADY]; - var oplogEntryHandle = self._oplogHandle.onOplogEntry( + var oplogEntryHandle = mongoHandle._oplogHandle.onOplogEntry( cursorDescription.collectionName, function (op) { if (op.op === 'c') { - published.each(function (fields, id) { + published.forEach(function (fields, id) { remove(id); }); } else { @@ -221,32 +218,57 @@ MongoConnection.prototype._observeChangesWithOplog = function ( // This write cannot complete until we've caught up to "this point" in the // oplog, and then made it back to the steady state. Meteor.defer(complete); - self._oplogHandle.waitUntilCaughtUp(); - if (stopped || phase === PHASE.STEADY) - write.committed(); - else - writesToCommitWhenWeReachSteady.push(write); + mongoHandle._oplogHandle.waitUntilCaughtUp(); + // Make sure that all of the callbacks have made it through the + // multiplexer and been delivered to ObserveHandles before committing + // writes. + multiplexer.onFlush(function (){ + if (stopped || phase === PHASE.STEADY) { + write.committed(); + } else { + writesToCommitWhenWeReachSteady.push(write); + } + }); } ); - var initialCursor = new Cursor(self, cursorDescription); - initialCursor.forEach(function (initialDoc) { - add(initialDoc); + // observeChangesWithOplog cannot yield (because the manipulation of + // mongoHandle._observeMultiplexers needs to be yield-free); calling + // multiplexer.ready() is the equivalent of the observeChanges "synchronous" + // return. + Meteor.defer(function () { + if (stopped) + throw new Error("oplog stopped surprisingly early"); + + var initialCursor = new Cursor(mongoHandle, cursorDescription); + initialCursor.forEach(function (initialDoc) { + add(initialDoc); + }); + if (stopped) + throw new Error("oplog stopped quite early"); + // Actually send out the initial adds to the ObserveHandles. + multiplexer.ready(); + + if (stopped) + return; + mongoHandle._oplogHandle.waitUntilCaughtUp(); + + if (stopped) + return; + if (phase !== PHASE.INITIALIZING) + throw Error("Phase unexpectedly " + phase); + + if (needToFetch.isEmpty()) { + beSteady(); + } else { + fetchModifiedDocuments(); + } }); - self._oplogHandle.waitUntilCaughtUp(); - - if (phase !== PHASE.INITIALIZING) - throw Error("Phase unexpectedly " + phase); - - if (needToFetch.isEmpty()) { - beSteady(); - } else { - phase = PHASE.FETCHING; - Meteor.defer(fetchModifiedDocuments); - } - return { + // This stop function is invoked from the onStop of the ObserveMultiplexer, + // so it shouldn't actually be possible to call it until the multiplexer is + // ready. stop: function () { if (stopped) return; @@ -266,7 +288,6 @@ MongoConnection.prototype._observeChangesWithOplog = function ( oplogEntryHandle = null; listenersHandle = null; - initialCursor = null; Package.facts && Package.facts.Facts.incrementServerFact( "mongo-livedata", "oplog-observers", -1); diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index d93d7a70cb..6cb718001a 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -42,7 +42,7 @@ Package.on_use(function (api) { // For tests only. api.export('MongoTest', 'server', {testOnly: true}); - api.add_files(['doc_fetcher.js', 'mongo_driver.js', + api.add_files(['doc_fetcher.js', 'mongo_driver.js', 'observe_multiplex.js', 'oplog.js'], 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); From de5a92d51f38eb1f54f4d248f8e385203d0ce1c9 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 25 Nov 2013 11:18:52 -0800 Subject: [PATCH 116/190] Add a comment and assertion to _sendAdds --- packages/mongo-livedata/observe_multiplex.js | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/packages/mongo-livedata/observe_multiplex.js b/packages/mongo-livedata/observe_multiplex.js index ade3838d12..29591d495a 100644 --- a/packages/mongo-livedata/observe_multiplex.js +++ b/packages/mongo-livedata/observe_multiplex.js @@ -146,10 +146,19 @@ _.extend(ObserveMultiplexer.prototype, { }); }); }, + + // Sends initial adds to a handle. It should only be called once the handle is + // ready (ie, the ready callback has been called) and from within a task + // (either the task that is processing the ready() call or the task that is + // processing the addHandleAndSendInitialAdds call). It synchronously invokes + // the handle's added or addedBefore; there's no need to flush the queue + // afterwards to ensure that the callbacks get out. _sendAdds: function (handle) { var self = this; if (self._queue.safeToRunTask()) throw Error("_sendAdds may only be called from within a task!"); + if (!self._ready) + throw Error("_sendAdds may only be called once ready!"); var add = self._ordered ? handle._addedBefore : handle._added; if (!add) return; From b9511cad19d22eca965a3d26a3cb61bc28b56075 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Mon, 25 Nov 2013 12:37:17 -0800 Subject: [PATCH 117/190] Stream initial adds to the first handle (and to any other handles added before any initial adds come in) --- packages/minimongo/id_map.js | 2 +- packages/mongo-livedata/observe_multiplex.js | 59 ++++++++++++++------ packages/mongo-livedata/oplog.js | 4 +- 3 files changed, 46 insertions(+), 19 deletions(-) diff --git a/packages/minimongo/id_map.js b/packages/minimongo/id_map.js index 445e465600..c759df528d 100644 --- a/packages/minimongo/id_map.js +++ b/packages/minimongo/id_map.js @@ -29,7 +29,7 @@ _.extend(LocalCollection._IdMap.prototype, { var key = LocalCollection._idStringify(id); return _.has(self._map, key); }, - isEmpty: function () { + empty: function () { var self = this; return _.isEmpty(self._map); }, diff --git a/packages/mongo-livedata/observe_multiplex.js b/packages/mongo-livedata/observe_multiplex.js index 29591d495a..53d7fa1380 100644 --- a/packages/mongo-livedata/observe_multiplex.js +++ b/packages/mongo-livedata/observe_multiplex.js @@ -15,6 +15,10 @@ ObserveMultiplexer = function (options) { self._handles = {}; self._ready = false; self._readyFuture = new Future; + // Any handles added between creation and the first doc being added (or the + // cursor being made ready while empty) get special handling: their adds get + // delivered immediately instead of waiting for ready. + self._initialHandles = {}; self._cache = new LocalCollection._CachingChangeObserver({ ordered: options.ordered}); // Number of addHandleAndSendInitialAdds tasks scheduled but not yet @@ -47,8 +51,11 @@ _.extend(ObserveMultiplexer.prototype, { self._queue.runTask(function () { self._handles[handle._id] = handle; - if (self._ready) + if (self._ready) { self._sendAdds(handle); + } else if (self._cache.docs.empty()) { + self._initialHandles[handle._id] = handle; + } --self._addHandleTasksScheduledButNotPerformed; }); // *outside* the task, since otherwise we'd deadlock @@ -63,6 +70,13 @@ _.extend(ObserveMultiplexer.prototype, { // that we have to be careful when we iterate over _handles. removeHandle: function (id) { var self = this; + + // This should not be possible: you can only call removeHandle by having + // access to the ObserveHandle, which isn't returned to user code until the + // multiplex is ready. + if (!self._ready || self._initialHandles) + throw new Error("Can't remove handles until the multiplex is ready"); + delete self._handles[id]; Package.facts && Package.facts.Facts.incrementServerFact( @@ -83,8 +97,10 @@ _.extend(ObserveMultiplexer.prototype, { self._handles = null; // It shouldn't be possible for us to stop when all our handles still // haven't been returned from observeChanges! + if (!self._ready) + throw Error("surprising _stop: not ready"); if (!self._readyFuture.isResolved()) - throw Error("surprising _stop"); + throw Error("surprising _stop: unresolved"); Package.facts && Package.facts.Facts.incrementServerFact( "mongo-livedata", "observe-multiplexers", -1); @@ -99,12 +115,17 @@ _.extend(ObserveMultiplexer.prototype, { self._queue.queueTask(function () { if (self._ready) throw Error("can't make ObserveMultiplex ready twice!"); - self._ready = true; - // Use _.keys iteration in case removeHandle is called concurrently. - _.each(_.keys(self._handles), function (handleId) { - var handle = self._handles[handleId]; - handle && self._sendAdds(handle); + // We can assume that removeHandle isn't called during this loop because + // you can't stop a handle until the synchronous bit is done. (If it is, + // removeHandle will throw due to _ready being false.) + _.each(self._handles, function (handle, handleId) { + // If this was an "initial handle", we already sent its adds. + if (_.has(self._initialHandles, handleId)) + return; + self._sendAdds(handle); }); + self._initialHandles = null; + self._ready = true; self._readyFuture.return(); }); }, @@ -128,15 +149,24 @@ _.extend(ObserveMultiplexer.prototype, { // don't) and skip this clone. Currently 'changed' hangs on to state // though. self._cache.applyChange[callbackName].apply(null, EJSON.clone(args)); - // If we haven't finished the initial adds, we have nothing more to do. - if (!self._ready) - return; + + var handleIds = _.keys(self._handles); + // If we haven't finished the initial adds, then the only callbacks that + // we multiplex out are those to the "initial handles": handles that got + // added before any initial adds were received. (This allows us to stream + // the first handle's adds out rather than buffering them until ready().) + if (!self._ready) { + if (callbackName !== 'added' && callbackName !== 'addedBefore') + throw new Error("Got " + callbackName + " during initial adds"); + handleIds = _.keys(self._initialHandles); + } + // Now multiplex the callbacks out to all observe handles. It's OK if // these calls yield; since we're inside a task, no other use of our queue // can continue until these are done. (But we do have to be careful to not // use a handle that got removed, because removeHandle does not use the - // queue.) - _.each(_.keys(self._handles), function (handleId) { + // queue; thus, we iterate over an array of keys that we control.) + _.each(handleIds, function (handleId) { var handle = self._handles[handleId]; if (!handle) return; @@ -147,8 +177,7 @@ _.extend(ObserveMultiplexer.prototype, { }); }, - // Sends initial adds to a handle. It should only be called once the handle is - // ready (ie, the ready callback has been called) and from within a task + // Sends initial adds to a handle. It should only be called from within a task // (either the task that is processing the ready() call or the task that is // processing the addHandleAndSendInitialAdds call). It synchronously invokes // the handle's added or addedBefore; there's no need to flush the queue @@ -157,8 +186,6 @@ _.extend(ObserveMultiplexer.prototype, { var self = this; if (self._queue.safeToRunTask()) throw Error("_sendAdds may only be called from within a task!"); - if (!self._ready) - throw Error("_sendAdds may only be called once ready!"); var add = self._ordered ? handle._addedBefore : handle._added; if (!add) return; diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 0be7d4bdea..754bc43835 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -85,7 +85,7 @@ observeChangesWithOplog = function (cursorDescription, var fetchModifiedDocuments = function () { phase = PHASE.FETCHING; - while (!stopped && !needToFetch.isEmpty()) { + while (!stopped && !needToFetch.empty()) { if (phase !== PHASE.FETCHING) throw new Error("Surprising phase in fetchModifiedDocuments: " + phase); @@ -258,7 +258,7 @@ observeChangesWithOplog = function (cursorDescription, if (phase !== PHASE.INITIALIZING) throw Error("Phase unexpectedly " + phase); - if (needToFetch.isEmpty()) { + if (needToFetch.empty()) { beSteady(); } else { fetchModifiedDocuments(); From 604a48f64fea265aabe835c1fb9ec5d32841f8d1 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Wed, 6 Nov 2013 17:19:26 -0800 Subject: [PATCH 118/190] Implement _canSelectorBecomeTrueByModifier Is used to prune unnecessary fetches in oplog tailing. --- packages/minimongo/minimongo_tests.js | 131 ++++++++++++++++++++++++++ packages/minimongo/modify.js | 71 ++++++++------ packages/minimongo/projection.js | 2 +- packages/minimongo/selector.js | 67 +++++++++++++ 4 files changed, 242 insertions(+), 29 deletions(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 19fb6a306f..d59ae5385b 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2737,3 +2737,134 @@ Tinytest.add("minimongo - selector and projection combination", function (test) }); +(function () { + // TODO: Tests for "can selector become true by modifier" are incomplete, + // absent or test the functionality of "not ideal" implementation (test checks + // that certain case always returns true as implementation is incomplete) + // - tests with $and/$or/$nor/$not branches (are absent) + // - more tests with arrays fields and numeric keys (incomplete and test "not + // ideal" implementation) + // - tests when numeric keys actually mean numeric keys, not array indexes + // (are absent) + // - tests with $-operators in the selector (are incomplete and test "not + // ideal" implementation) + + var test = null; // set this global in the beginning of every test + // T - should return true + // F - should return false + function T (sel, mod, desc) { + test.isTrue(LocalCollection._canSelectorBecomeTrueByModifier(sel, mod), desc); + } + function F (sel, mod, desc) { + test.isFalse(LocalCollection._canSelectorBecomeTrueByModifier(sel, mod), desc); + } + + Tinytest.add("minimongo - can selector become true by modifier - literals (structured tests)", function (t) { + test = t; + + var selector = { + 'a.b.c': 2, + 'foo.bar': { + z: { y: 1 } + }, + 'foo.baz': [ {ans: 42}, "string", false, undefined ], + 'empty.field': null + }; + + T(selector, {$set:{ 'a.b.c': 2 }}); + F(selector, {$unset:{ 'a': 1 }}); + F(selector, {$unset:{ 'a.b': 1 }}); + F(selector, {$unset:{ 'a.b.c': 1 }}); + T(selector, {$set:{ 'a.b': { c: 2 } }}); + F(selector, {$set:{ 'a.b': {} }}); + T(selector, {$set:{ 'a.b': { c: 2, x: 5 } }}); + F(selector, {$set:{ 'a.b.c.k': 3 }}); + F(selector, {$set:{ 'a.b.c.k': {} }}); + + F(selector, {$unset:{ 'foo': 1 }}); + F(selector, {$unset:{ 'foo.bar': 1 }}); + F(selector, {$unset:{ 'foo.bar.z': 1 }}); + F(selector, {$unset:{ 'foo.bar.z.y': 1 }}); + F(selector, {$set:{ 'foo.bar.x': 1 }}); + F(selector, {$set:{ 'foo.bar': {} }}); + F(selector, {$set:{ 'foo.bar': 3 }}); + T(selector, {$set:{ 'foo.bar': { z: { y: 1 } } }}); + T(selector, {$set:{ 'foo.bar.z': { y: 1 } }}); + T(selector, {$set:{ 'foo.bar.z.y': 1 }}); + + F(selector, {$set:{ 'empty.field': {} }}); + T(selector, {$set:{ 'empty': {} }}); + T(selector, {$set:{ 'empty.field': null }}); + T(selector, {$set:{ 'empty.field': undefined }}); + F(selector, {$set:{ 'empty.field.a': 3 }}); + }); + + Tinytest.add("minimongo - can selector become true by modifier - literals (adhoc tests)", function (t) { + test = t; + T({x:1}, {$set:{x:1}}, "simple set scalar"); + T({x:"a"}, {$set:{x:"a"}}, "simple set scalar"); + T({x:false}, {$set:{x:false}}, "simple set scalar"); + F({x:true}, {$set:{x:false}}, "simple set scalar"); + F({x:2}, {$set:{x:3}}, "simple set scalar"); + + F({'foo.bar.baz': 1, x:1}, {$unset:{'foo.bar.baz': 1}, $set:{x:1}}, "simple unset of the interesting path"); + F({'foo.bar.baz': 1, x:1}, {$unset:{'foo.bar': 1}, $set:{x:1}}, "simple unset of the interesting path prefix"); + F({'foo.bar.baz': 1, x:1}, {$unset:{'foo': 1}, $set:{x:1}}, "simple unset of the interesting path prefix"); + F({'foo.bar.baz': 1}, {$unset:{'foo.baz': 1}}, "simple unset of the interesting path prefix"); + F({'foo.bar.baz': 1}, {$unset:{'foo.bar.bar': 1}}, "simple unset of the interesting path prefix"); + }); + + Tinytest.add("minimongo - can selector become true by modifier - regexps", function (t) { + test = t; + + // Regexp + T({ 'foo.bar': /^[0-9]+$/i }, { $set: {'foo.bar': '01233'} }, "set of regexp"); + // XXX this test should be False, should be fixed within improved implementation + T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $set: {'foo.bar': '0a1233', x: 1} }, "set of regexp"); + // XXX this test should be False, should be fixed within improved implementation + T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $unset: {'foo.bar': 1}, $set: { x: 1 } }, "unset of regexp"); + T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $set: { x: 1 } }, "don't touch regexp"); + }); + + Tinytest.add("minimongo - can selector become true by modifier - undefined/null", function (t) { + test = t; + // Nulls / Undefined + T({ 'foo.bar': null }, {$set:{'foo.bar': null}}, "set of null looking for null"); + T({ 'foo.bar': null }, {$set:{'foo.bar': undefined}}, "set of undefined looking for null"); + T({ 'foo.bar': undefined }, {$set:{'foo.bar': null}}, "set of null looking for undefined"); + T({ 'foo.bar': undefined }, {$set:{'foo.bar': undefined}}, "set of undefined looking for undefined"); + T({ 'foo.bar': null }, {$set:{'foo': null}}, "set of null of parent path looking for null"); + F({ 'foo.bar': null }, {$set:{'foo.bar.baz': null}}, "set of null of different path looking for null"); + T({ 'foo.bar': null }, { $unset: { 'foo': 1 } }, "unset the parent"); + T({ 'foo.bar': null }, { $unset: { 'foo.bar': 1 } }, "unset tracked path"); + T({ 'foo.bar': null }, { $set: { 'foo': 3 } }, "set the parent"); + T({ 'foo.bar': null }, { $set: { 'foo': {baz:1} } }, "set the parent"); + + }); + + Tinytest.add("minimongo - can selector become true by modifier - literals with arrays", function (t) { + test = t; + // These tests are incomplete and in theory they all should return true as we + // don't support any case with numeric fields yet. + T({'a.1.b': 1, x:1}, {$unset:{'a.1.b': 1}, $set:{x:1}}, "unset of array element's field with exactly the same index as selector"); + F({'a.2.b': 1}, {$unset:{'a.1.b': 1}}, "unset of array element's field with different index as selector"); + // This is false, because if you are looking for array but in reality it is an + // object, it just can't get to true. + F({'a.2.b': 1}, {$unset:{'a.b': 1}}, "unset of field while selector is looking for index"); + T({ 'foo.bar': null }, {$set:{'foo.1.bar': null}}, "set array's element's field to null looking for null"); + T({ 'foo.bar': null }, {$set:{'foo.0.bar': 1, 'foo.1.bar': null}}, "set array's element's field to null looking for null"); + // This is false, because there may remain other array elements that match + // but we modified this test as we don't support this case yet + T({'a.b': 1}, {$unset:{'a.1.b': 1}}, "unset of array element's field"); + }); + + Tinytest.add("minimongo - can selector become true by modifier - set an object literal whose fields are selected", function (t) { + test = t; + T({ 'a.b.c': 1 }, { $set: { 'a.b': { c: 1 } } }, "a simple scalar selector and simple set"); + F({ 'a.b.c': 1 }, { $set: { 'a.b': { c: 2 } } }, "a simple scalar selector and simple set to false"); + F({ 'a.b.c': 1 }, { $set: { 'a.b': { d: 1 } } }, "a simple scalar selector and simple set a wrong literal"); + F({ 'a.b.c': 1 }, { $set: { 'a.b': 222 } }, "a simple scalar selector and simple set a wrong type"); + }); + +})(); + diff --git a/packages/minimongo/modify.js b/packages/minimongo/modify.js index e13c21e666..e52a03b5c3 100644 --- a/packages/minimongo/modify.js +++ b/packages/minimongo/modify.js @@ -23,14 +23,16 @@ LocalCollection._modify = function (doc, mod, isInsert) { if (!is_modifier) { if (mod._id && !EJSON.equals(doc._id, mod._id)) - throw Error("Cannot change the _id of a document"); + throw MinimongoError("Cannot change the _id of a document"); // replace the whole document for (var k in mod) { if (k.substr(0, 1) === '$') - throw Error("When replacing document, field name may not start with '$'"); + throw MinimongoError( + "When replacing document, field name may not start with '$'"); if (/\./.test(k)) - throw Error("When replacing document, field name may not contain '.'"); + throw MinimongoError( + "When replacing document, field name may not contain '.'"); } new_doc = mod; } else { @@ -43,12 +45,13 @@ LocalCollection._modify = function (doc, mod, isInsert) { if (isInsert && op === '$setOnInsert') mod_func = LocalCollection._modifiers['$set']; if (!mod_func) - throw Error("Invalid modifier specified " + op); + throw MinimongoError("Invalid modifier specified " + op); for (var keypath in mod[op]) { // XXX mongo doesn't allow mod field names to end in a period, // but I don't see why.. it allows '' as a key, as does JS if (keypath.length && keypath[keypath.length-1] === '.') - throw Error("Invalid mod field name, may not end in a period"); + throw MinimongoError( + "Invalid mod field name, may not end in a period"); var arg = mod[op][keypath]; var keyparts = keypath.split('.'); @@ -101,7 +104,8 @@ LocalCollection._findModTarget = function (doc, keyparts, no_create, if (forbid_array) return null; if (!numeric) - throw Error("can't append to array using string field name [" + throw MinimongoError( + "can't append to array using string field name [" + keypart + "]"); keypart = parseInt(keypart); if (last) @@ -113,7 +117,7 @@ LocalCollection._findModTarget = function (doc, keyparts, no_create, if (doc.length === keypart) doc.push({}); else if (typeof doc[keypart] !== "object") - throw Error("can't modify field '" + keyparts[i + 1] + + throw MinimongoError("can't modify field '" + keyparts[i + 1] + "' of list value " + JSON.stringify(doc[keypart])); } } else { @@ -141,18 +145,28 @@ LocalCollection._noCreateModifiers = { LocalCollection._modifiers = { $inc: function (target, field, arg) { if (typeof arg !== "number") - throw Error("Modifier $inc allowed for numbers only"); + throw MinimongoError("Modifier $inc allowed for numbers only"); if (field in target) { if (typeof target[field] !== "number") - throw Error("Cannot apply $inc modifier to non-number"); + throw MinimongoError("Cannot apply $inc modifier to non-number"); target[field] += arg; } else { target[field] = arg; } }, $set: function (target, field, arg) { + if (!_.isObject(target)) { // not an array or an object + var e = MinimongoError("Cannot set property on non-object field"); + e.setPropertyError = true; + throw e; + } + if (target === null) { + var e = MinimongoError("Cannot set property on null"); + e.setPropertyError = true; + throw e; + } if (field === '_id' && !EJSON.equals(arg, target._id)) - throw Error("Cannot change the _id of a document"); + throw MinimongoError("Cannot change the _id of a document"); target[field] = EJSON.clone(arg); }, @@ -172,7 +186,7 @@ LocalCollection._modifiers = { if (target[field] === undefined) target[field] = []; if (!(target[field] instanceof Array)) - throw Error("Cannot apply $push modifier to non-array"); + throw MinimongoError("Cannot apply $push modifier to non-array"); if (!(arg && arg.$each)) { // Simple mode: not $each @@ -183,16 +197,16 @@ LocalCollection._modifiers = { // Fancy mode: $each (and maybe $slice and $sort) var toPush = arg.$each; if (!(toPush instanceof Array)) - throw Error("$each must be an array"); + throw MinimongoError("$each must be an array"); // Parse $slice. var slice = undefined; if ('$slice' in arg) { if (typeof arg.$slice !== "number") - throw Error("$slice must be a numeric value"); + throw MinimongoError("$slice must be a numeric value"); // XXX should check to make sure integer if (arg.$slice > 0) - throw Error("$slice in $push must be zero or negative"); + throw MinimongoError("$slice in $push must be zero or negative"); slice = arg.$slice; } @@ -200,14 +214,14 @@ LocalCollection._modifiers = { var sortFunction = undefined; if (arg.$sort) { if (slice === undefined) - throw Error("$sort requires $slice to be present"); + throw MinimongoError("$sort requires $slice to be present"); // XXX this allows us to use a $sort whose value is an array, but that's // actually an extension of the Node driver, so it won't work // server-side. Could be confusing! sortFunction = LocalCollection._compileSort(arg.$sort); for (var i = 0; i < toPush.length; i++) { if (LocalCollection._f._type(toPush[i]) !== 3) { - throw Error("$push like modifiers using $sort " + + throw MinimongoError("$push like modifiers using $sort " + "require all elements to be objects"); } } @@ -231,12 +245,12 @@ LocalCollection._modifiers = { }, $pushAll: function (target, field, arg) { if (!(typeof arg === "object" && arg instanceof Array)) - throw Error("Modifier $pushAll/pullAll allowed for arrays only"); + throw MinimongoError("Modifier $pushAll/pullAll allowed for arrays only"); var x = target[field]; if (x === undefined) target[field] = arg; else if (!(x instanceof Array)) - throw Error("Cannot apply $pushAll modifier to non-array"); + throw MinimongoError("Cannot apply $pushAll modifier to non-array"); else { for (var i = 0; i < arg.length; i++) x.push(arg[i]); @@ -247,7 +261,7 @@ LocalCollection._modifiers = { if (x === undefined) target[field] = [arg]; else if (!(x instanceof Array)) - throw Error("Cannot apply $addToSet modifier to non-array"); + throw MinimongoError("Cannot apply $addToSet modifier to non-array"); else { var isEach = false; if (typeof arg === "object") { @@ -273,7 +287,7 @@ LocalCollection._modifiers = { if (x === undefined) return; else if (!(x instanceof Array)) - throw Error("Cannot apply $pop modifier to non-array"); + throw MinimongoError("Cannot apply $pop modifier to non-array"); else { if (typeof arg === 'number' && arg < 0) x.splice(0, 1); @@ -288,7 +302,7 @@ LocalCollection._modifiers = { if (x === undefined) return; else if (!(x instanceof Array)) - throw Error("Cannot apply $pull/pullAll modifier to non-array"); + throw MinimongoError("Cannot apply $pull/pullAll modifier to non-array"); else { var out = [] if (typeof arg === "object" && !(arg instanceof Array)) { @@ -315,14 +329,14 @@ LocalCollection._modifiers = { }, $pullAll: function (target, field, arg) { if (!(typeof arg === "object" && arg instanceof Array)) - throw Error("Modifier $pushAll/pullAll allowed for arrays only"); + throw MinimongoError("Modifier $pushAll/pullAll allowed for arrays only"); if (target === undefined) return; var x = target[field]; if (x === undefined) return; else if (!(x instanceof Array)) - throw Error("Cannot apply $pull/pullAll modifier to non-array"); + throw MinimongoError("Cannot apply $pull/pullAll modifier to non-array"); else { var out = [] for (var i = 0; i < x.length; i++) { @@ -342,11 +356,11 @@ LocalCollection._modifiers = { $rename: function (target, field, arg, keypath, doc) { if (keypath === arg) // no idea why mongo has this restriction.. - throw Error("$rename source must differ from target"); + throw MinimongoError("$rename source must differ from target"); if (target === null) - throw Error("$rename source field invalid"); + throw MinimongoError("$rename source field invalid"); if (typeof arg !== "string") - throw Error("$rename target must be a string"); + throw MinimongoError("$rename target must be a string"); if (target === undefined) return; var v = target[field]; @@ -355,14 +369,14 @@ LocalCollection._modifiers = { var keyparts = arg.split('.'); var target2 = LocalCollection._findModTarget(doc, keyparts, false, true); if (target2 === null) - throw Error("$rename target field invalid"); + throw MinimongoError("$rename target field invalid"); var field2 = keyparts.pop(); target2[field2] = v; }, $bit: function (target, field, arg) { // XXX mongo only supports $bit on integers, and we only support // native javascript numbers (doubles) so far, so we can't support $bit - throw Error("$bit is not supported"); + throw MinimongoError("$bit is not supported"); } }; @@ -373,3 +387,4 @@ LocalCollection._removeDollarOperators = function (selector) { selectorDoc[k] = selector[k]; return selectorDoc; }; + diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index d6c245f412..f3374556fb 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -159,7 +159,7 @@ var projectionDetails = function (fields) { // conflict resolution. // initial tree - Optional Object: starting tree. // @returns - Object: tree represented as a set of nested objects -var pathsToTree = function (paths, newLeafFn, conflictFn, tree) { +pathsToTree = function (paths, newLeafFn, conflictFn, tree) { tree = tree || {}; _.each(paths, function (keyPath) { var treePos = tree; diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index 4406e2dfe3..1b804a358e 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -837,6 +837,57 @@ getPathsWithoutNumericKeys = function (sel) { }); }; +// @param selector - Object: MongoDB selector. Currently doesn't support +// $-operators and arrays well. +// @param modifier - Object: MongoDB-styled modifier with `$set`s and `$unsets` +// only. (assumed to come from oplog) +// @returns - Boolean: if after applying the modifier, selector can start +// accepting the modified value. +LocalCollection._canSelectorBecomeTrueByModifier = function (selector, modifier) +{ + if (!LocalCollection._isSelectorAffectedByModifier(selector, modifier)) + return false; + + modifier = _.extend({$set:{}, $unset:{}}, modifier); + + if (_.any(_.keys(selector), pathHasNumericKeys) || + _.any(_.keys(modifier.$unset), pathHasNumericKeys) || + _.any(_.keys(modifier.$set), pathHasNumericKeys)) + return true; + + if (!isLiteralSelector(selector)) + return true; + + // convert a selector into an object matching the selector + // { 'a.b': { ans: 42 }, 'foo.bar': null, 'foo.baz': "something" } + // => { a: { b: { ans: 42 } }, foo: { bar: null, baz: "something" } } + var doc = pathsToTree(_.keys(selector), + function (path) { return selector[path]; }, + _.identity /*conflict resolution is no resolution*/); + + var selectorFn = LocalCollection._compileSelector(selector); + + try { + LocalCollection._modify(doc, modifier); + } catch (e) { + // Couldn't set a property on a field which is a scalar or null in the + // selector. + // Example: + // real document: { 'a.b': 3 } + // selector: { 'a': 12 } + // converted selector (ideal document): { 'a': 12 } + // modifier: { $set: { 'a.b': 4 } } + // We don't know what real document was like but from the error raised by + // $set on a scalar field we can reason that the structure of real document + // is completely different. + if (e.name === "MinimongoError" && e.setPropertyError) + return false; + throw e; + } + + return selectorFn(doc); +}; + // Returns a list of key paths the given selector is looking for var getPaths = MinimongoTest.getSelectorPaths = function (sel) { return _.chain(sel).map(function (v, k) { @@ -851,8 +902,24 @@ var getPaths = MinimongoTest.getSelectorPaths = function (sel) { }).flatten().uniq().value(); }; +function pathHasNumericKeys (path) { + return _.any(path.split('.'), numericKey); +} + // string can be converted to integer function numericKey (s) { return /^[0-9]+$/.test(s); } +function isLiteralSelector (selector) { + return _.all(selector, function (subSelector, keyPath) { + if (keyPath.substr(0, 1) === "$" || _.isRegExp(subSelector)) + return false; + if (!_.isObject(subSelector) || _.isArray(subSelector)) + return true; + return _.all(subSelector, function (value, key) { + return key.substr(0, 1) !== "$"; + }); + }); +} + From 9a24e33002ed2c8c2d093f9872ca6cccc9cc2ac7 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Mon, 25 Nov 2013 13:25:24 -0800 Subject: [PATCH 119/190] Use LocalCollection._canSelectorBecomeTrueByModifier in oplog fetching pruning --- packages/mongo-livedata/oplog.js | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 754bc43835..5f03ec06fa 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -173,13 +173,8 @@ observeChangesWithOplog = function (cursorDescription, newDoc._id = id; LocalCollection._modify(newDoc, op.o); handleDoc(id, sharedProjectionFn(newDoc)); - } else if (LocalCollection._isSelectorAffectedByModifier( + } else if (LocalCollection._canSelectorBecomeTrueByModifier( cursorDescription.selector, op.o)) { - // XXX _isSelectorAffectedByModifier should actually be - // _canModifierChangeSelectorToTrue. because {x: 9} is affected by - // {$set: {x: 7}} but not in a way that is relevant here, because either - // x was already 9 (and this was handled by the previous clause), or x - // was not 9 and this isn't going to affect the selector needToFetch.set(id, op.ts.toString()); if (phase === PHASE.STEADY) fetchModifiedDocuments(); From 58e1749104b29c0bc53b129e4b1617efcf41a41b Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 11:33:27 -0800 Subject: [PATCH 120/190] De-singletonize InvalidationCrossbar --- packages/livedata/crossbar.js | 13 ++++++++----- packages/livedata/crossbar_tests.js | 21 +++++++++++---------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/packages/livedata/crossbar.js b/packages/livedata/crossbar.js index ef76651887..e6db276cac 100644 --- a/packages/livedata/crossbar.js +++ b/packages/livedata/crossbar.js @@ -1,4 +1,8 @@ -DDPServer._InvalidationCrossbar = function () { +// A "crossbar" is a class that provides structured notification registration. +// The "invalidation crossbar" is a specific instance used by the DDP server to +// implement write fence notifications. + +DDPServer._Crossbar = function () { var self = this; self.next_id = 1; @@ -7,7 +11,7 @@ DDPServer._InvalidationCrossbar = function () { self.listeners = {}; }; -_.extend(DDPServer._InvalidationCrossbar.prototype, { +_.extend(DDPServer._Crossbar.prototype, { // Listen for notification that match 'trigger'. A notification // matches if it has the key-value pairs in trigger as a // subset. When a notification matches, call 'callback', passing two @@ -57,7 +61,7 @@ _.extend(DDPServer._InvalidationCrossbar.prototype, { if (onComplete) onComplete = Meteor.bindEnvironment(onComplete, function (e) { - Meteor._debug("Exception in InvalidationCrossbar fire complete " + + Meteor._debug("Exception in Crossbar fire complete " + "callback", e.stack); }); @@ -99,5 +103,4 @@ _.extend(DDPServer._InvalidationCrossbar.prototype, { } }); -// singleton -DDPServer._InvalidationCrossbar = new DDPServer._InvalidationCrossbar; +DDPServer._InvalidationCrossbar = new DDPServer._Crossbar; diff --git a/packages/livedata/crossbar_tests.js b/packages/livedata/crossbar_tests.js index d5eed6cedd..2eefa6bdf5 100644 --- a/packages/livedata/crossbar_tests.js +++ b/packages/livedata/crossbar_tests.js @@ -6,15 +6,16 @@ // deep meaning to the matching function, and it could be changed later // as long as it preserves that property. Tinytest.add('livedata - crossbar', function (test) { - test.isTrue(DDPServer._InvalidationCrossbar._matches( - {collection: "C"}, {collection: "C"})); - test.isTrue(DDPServer._InvalidationCrossbar._matches( - {collection: "C", id: "X"}, {collection: "C"})); - test.isTrue(DDPServer._InvalidationCrossbar._matches( - {collection: "C"}, {collection: "C", id: "X"})); - test.isTrue(DDPServer._InvalidationCrossbar._matches( - {collection: "C", id: "X"}, {collection: "C"})); + var crossbar = new DDPServer._Crossbar; + test.isTrue(crossbar._matches({collection: "C"}, + {collection: "C"})); + test.isTrue(crossbar._matches({collection: "C", id: "X"}, + {collection: "C"})); + test.isTrue(crossbar._matches({collection: "C"}, + {collection: "C", id: "X"})); + test.isTrue(crossbar._matches({collection: "C", id: "X"}, + {collection: "C"})); - test.isFalse(DDPServer._InvalidationCrossbar._matches( - {collection: "C", id: "X"}, {collection: "C", id: "Y"})); + test.isFalse(crossbar._matches({collection: "C", id: "X"}, + {collection: "C", id: "Y"})); }); From 8f664a1236d103da1a5adbbc2f09feca00ee79a8 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 12:01:37 -0800 Subject: [PATCH 121/190] Use "selector matches specific IDs" code in oplog --- packages/livedata/crossbar.js | 26 +++++--- packages/mongo-livedata/mongo_driver.js | 83 +++++++++++++++---------- packages/mongo-livedata/oplog.js | 40 +++++++----- 3 files changed, 90 insertions(+), 59 deletions(-) diff --git a/packages/livedata/crossbar.js b/packages/livedata/crossbar.js index e6db276cac..342a5e0ae9 100644 --- a/packages/livedata/crossbar.js +++ b/packages/livedata/crossbar.js @@ -2,13 +2,16 @@ // The "invalidation crossbar" is a specific instance used by the DDP server to // implement write fence notifications. -DDPServer._Crossbar = function () { +DDPServer._Crossbar = function (options) { var self = this; + options = options || {}; - self.next_id = 1; + self.nextId = 1; // map from listener id to object. each object has keys 'trigger', // 'callback'. self.listeners = {}; + self.factPackage = options.factPackage || "livedata"; + self.factName = options.factName || null; }; _.extend(DDPServer._Crossbar.prototype, { @@ -29,14 +32,18 @@ _.extend(DDPServer._Crossbar.prototype, { // yields. listen: function (trigger, callback) { var self = this; - var id = self.next_id++; + var id = self.nextId++; self.listeners[id] = {trigger: EJSON.clone(trigger), callback: callback}; - Package.facts && Package.facts.Facts.incrementServerFact( - "livedata", "crossbar-listeners", 1); + if (self.factName && Package.facts) { + Package.facts.Facts.incrementServerFact( + self.factPackage, self.factName, 1); + } return { stop: function () { - Package.facts && Package.facts.Facts.incrementServerFact( - "livedata", "crossbar-listeners", -1); + if (self.factName && Package.facts) { + Package.facts.Facts.incrementServerFact( + self.factPackage, self.factName, -1); + } delete self.listeners[id]; } }; @@ -54,6 +61,7 @@ _.extend(DDPServer._Crossbar.prototype, { fire: function (notification, onComplete) { var self = this; var callbacks = []; + // XXX consider refactoring to "index" on "collection" _.each(self.listeners, function (l) { if (self._matches(notification, l.trigger)) callbacks.push(l.callback); @@ -103,4 +111,6 @@ _.extend(DDPServer._Crossbar.prototype, { } }); -DDPServer._InvalidationCrossbar = new DDPServer._Crossbar; +DDPServer._InvalidationCrossbar = new DDPServer._Crossbar({ + factName: "invalidation-crossbar-listeners" +}); diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index ddb01a35de..06cd8d5ea6 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -250,8 +250,9 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, var stopped = false; var tailHandle = null; var readyFuture = new Future(); - var nextId = 0; - var callbacksByCollection = {}; + var crossbar = new DDPServer._Crossbar({ + factPackage: "mongo-livedata", factName: "oplog-watchers" + }); var lastProcessedTS = null; // Lazily calculate the basic selector. Don't call baseOplogSelector() at the // top level of this function, because we don't want this function to block. @@ -277,27 +278,28 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, // XXX should close connections too }, - onOplogEntry: function (collectionName, callback) { + onOplogEntry: function (trigger, callback) { if (stopped) throw new Error("Called onOplogEntry on stopped handle!"); // Calling onOplogEntry requires us to wait for the tailing to be ready. readyFuture.wait(); - callback = Meteor.bindEnvironment(callback, function (err) { + var originalCallback = callback; + callback = Meteor.bindEnvironment(function (notification, onComplete) { + // XXX can we avoid this clone by making oplog.js careful? + try { + originalCallback(EJSON.clone(notification)); + } finally { + onComplete(); + } + }, function (err) { Meteor._debug("Error in oplog callback", err.stack); }); - if (!_.has(callbacksByCollection, collectionName)) - callbacksByCollection[collectionName] = {}; - var callbackId = nextId++; - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "oplog-watchers", 1); - callbacksByCollection[collectionName][callbackId] = callback; + var listenHandle = crossbar.listen(trigger, callback); return { stop: function () { - delete callbacksByCollection[collectionName][callbackId]; - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "oplog-watchers", -1); + listenHandle.stop(); } }; }, @@ -311,8 +313,8 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, if (stopped) throw new Error("Called waitUntilCaughtUp on stopped handle!"); - // Calling onOplogEntry requries us to wait for the oplog connection to be - // ready. + // Calling waitUntilCaughtUp requries us to wait for the oplog connection + // to be ready. readyFuture.wait(); // We need to make the selector at least as restrictive as the actual @@ -403,16 +405,21 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) throw new Error("Unexpected ns"); - var collectionName = doc.ns.substr(dbName.length + 1); + var trigger = {collection: doc.ns.substr(dbName.length + 1), op: doc}; // Is it a special command and the collection name is hidden somewhere in // operator? - if (collectionName === "$cmd") - collectionName = doc.o.drop; + if (trigger.collection === "$cmd") { + trigger.collection = doc.o.drop; + trigger.dropCollection = true; + } else { + // All other ops have an id. + trigger.id = idForOp(doc); + } - _.each(callbacksByCollection[collectionName], function (callback) { - callback(EJSON.clone(doc)); - }); + var f = new Future; + crossbar.fire(trigger, f.resolver()); + f.wait(); // Now that we've processed this operation, process pending sequencers. if (!doc.ts) @@ -1181,23 +1188,17 @@ MongoConnection.prototype._observeChanges = function ( // here, so that updates to different specific IDs don't cause us to poll. // listenCallback is the same kind of (notification, complete) callback passed // to InvalidationCrossbar.listen. + listenAll = function (cursorDescription, listenCallback) { var listeners = []; - var listenOnTrigger = function (trigger) { + forEachTrigger(cursorDescription, function (trigger) { + // The "drop collection" event is used by the oplog crossbar, not the + // invalidation crossbar. + if (trigger.dropCollection) + return; listeners.push(DDPServer._InvalidationCrossbar.listen( trigger, listenCallback)); - }; - - var key = {collection: cursorDescription.collectionName}; - var specificIds = LocalCollection._idsMatchedBySelector( - cursorDescription.selector); - if (specificIds) { - _.each(specificIds, function (id) { - listenOnTrigger(_.extend({id: id}, key)); - }); - } else { - listenOnTrigger(key); - } + }); return { stop: function () { @@ -1208,6 +1209,20 @@ listenAll = function (cursorDescription, listenCallback) { }; }; +forEachTrigger = function (cursorDescription, triggerCallback) { + var key = {collection: cursorDescription.collectionName}; + var specificIds = LocalCollection._idsMatchedBySelector( + cursorDescription.selector); + if (specificIds) { + _.each(specificIds, function (id) { + triggerCallback(_.extend({id: id}, key)); + }); + triggerCallback(_.extend({dropCollection: true}, key)); + } else { + triggerCallback(key); + } +}; + var MongoPollster = function (cursorDescription, mongoHandle, ordered, multiplexer, testOnlyPollCallback) { var self = this; diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 5f03ec06fa..f9d988f5d5 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -7,7 +7,7 @@ var PHASE = { STEADY: 3 }; -var idForOp = function (op) { +idForOp = function (op) { if (op.op === 'd') return op.o._id; else if (op.op === 'i') @@ -25,6 +25,7 @@ observeChangesWithOplog = function (cursorDescription, mongoHandle, multiplexer) { var stopped = false; + var stopHandles = []; Package.facts && Package.facts.Facts.incrementServerFact( "mongo-livedata", "oplog-observers", 1); @@ -186,22 +187,26 @@ observeChangesWithOplog = function (cursorDescription, }; oplogEntryHandlers[PHASE.FETCHING] = oplogEntryHandlers[PHASE.STEADY]; - - var oplogEntryHandle = mongoHandle._oplogHandle.onOplogEntry( - cursorDescription.collectionName, function (op) { - if (op.op === 'c') { - published.forEach(function (fields, id) { - remove(id); - }); - } else { - // All other operators should be handled depending on phase - oplogEntryHandlers[phase](op); + forEachTrigger(cursorDescription, function (trigger) { + stopHandles.push(mongoHandle._oplogHandle.onOplogEntry( + trigger, function (notification) { + var op = notification.op; + if (op.op === 'c') { + // XXX actually, drop collection needs to be handled by doing a + // re-query + published.forEach(function (fields, id) { + remove(id); + }); + } else { + // All other operators should be handled depending on phase + oplogEntryHandlers[phase](op); + } } - } - ); + )); + }); // XXX ordering w.r.t. everything else? - var listenersHandle = listenAll( + stopHandles.push(listenAll( cursorDescription, function (notification, complete) { // If we're not in a write fence, we don't have to do anything. var fence = DDPServer._CurrentWriteFence.get(); @@ -225,7 +230,7 @@ observeChangesWithOplog = function (cursorDescription, } }); } - ); + )); // observeChangesWithOplog cannot yield (because the manipulation of // mongoHandle._observeMultiplexers needs to be yield-free); calling @@ -268,8 +273,9 @@ observeChangesWithOplog = function (cursorDescription, if (stopped) return; stopped = true; - listenersHandle.stop(); - oplogEntryHandle.stop(); + _.each(stopHandles, function (handle) { + handle.stop(); + }); published = null; selector = null; From 3a73ccdc38aad28c564842cba99008eda1786353 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 13:24:15 -0800 Subject: [PATCH 122/190] Fix use of crossbar --- packages/mongo-livedata/mongo_driver.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 06cd8d5ea6..88add32ffb 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -405,13 +405,16 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) throw new Error("Unexpected ns"); - var trigger = {collection: doc.ns.substr(dbName.length + 1), op: doc}; + var trigger = {collection: doc.ns.substr(dbName.length + 1), + dropCollection: false, + op: doc}; // Is it a special command and the collection name is hidden somewhere in // operator? if (trigger.collection === "$cmd") { trigger.collection = doc.o.drop; trigger.dropCollection = true; + trigger.id = null; } else { // All other ops have an id. trigger.id = idForOp(doc); From f50f88f83d8c46d582aba00bcf233cbacd0dfe96 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 13:28:45 -0800 Subject: [PATCH 123/190] Minimize _noYieldsAllowed block Previously, observeChangesWithOplog was in it, and it can yield (if we need to wait for the global oplog handle to be ready) --- packages/mongo-livedata/mongo_driver.js | 45 ++++++++++++++----------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 88add32ffb..f382ae9af9 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1139,13 +1139,13 @@ MongoConnection.prototype._observeChanges = function ( var observeKey = JSON.stringify( _.extend({ordered: ordered}, cursorDescription)); - var multiplexer, observeHandle; + var multiplexer, observeImplementation; + var firstHandle = false; // Find a matching ObserveMultiplexer, or create a new one. This next block is // guaranteed to not yield (and it doesn't call anything that can observe a // new query), so no other calls to this function can interleave with it. Meteor._noYieldsAllowed(function () { - var observeImplementation; if (_.has(self._observeMultiplexers, observeKey)) { multiplexer = self._observeMultiplexers[observeKey]; } else { @@ -1158,27 +1158,32 @@ MongoConnection.prototype._observeChanges = function ( } }); self._observeMultiplexers[observeKey] = multiplexer; - - if (self._oplogHandle && !ordered && !callbacks._testOnlyPollCallback - && cursorSupportedByOplogTailing(cursorDescription)) { - observeImplementation = observeChangesWithOplog( - cursorDescription, self, multiplexer); - } else { - // Start polling. - observeImplementation = new MongoPollster( - cursorDescription, - self, - ordered, - multiplexer, - callbacks._testOnlyPollCallback); - } + firstHandle = true; } - observeHandle = new ObserveHandle(multiplexer, callbacks); + }); + + var observeHandle = new ObserveHandle(multiplexer, callbacks); + + if (firstHandle) { + if (self._oplogHandle && !ordered && !callbacks._testOnlyPollCallback + && cursorSupportedByOplogTailing(cursorDescription)) { + // Can yield! + observeImplementation = observeChangesWithOplog( + cursorDescription, self, multiplexer); + } else { + // Start polling. + observeImplementation = new MongoPollster( + cursorDescription, + self, + ordered, + multiplexer, + callbacks._testOnlyPollCallback); + } + // This field is only set for the first ObserveHandle in an // ObserveMultiplexer. It is only there for use by one test. - if (observeImplementation) - observeHandle._observeImplementation = observeImplementation; - }); + observeHandle._observeImplementation = observeImplementation; + } // Blocks until the initial adds have been sent. multiplexer.addHandleAndSendInitialAdds(observeHandle); From 5953b30897d32724ee001f3937418b8a383f78e9 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 14:35:04 -0800 Subject: [PATCH 124/190] Stuff from in-person code review --- packages/minimongo/observe.js | 8 ++++++- packages/mongo-livedata/observe_multiplex.js | 17 +++++++++++---- packages/mongo-livedata/oplog.js | 23 +++++++++++++------- 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/packages/minimongo/observe.js b/packages/minimongo/observe.js index f6697bfa80..e7c868f721 100644 --- a/packages/minimongo/observe.js +++ b/packages/minimongo/observe.js @@ -1,6 +1,12 @@ // XXX maybe move these into another ObserveHelpers package or something -// Wrapped callbacks should not mutate self.docs. +// _CachingChangeObserver is an object which receives observeChanges callbacks +// and keeps a cache of the current cursor state up to date in self.docs. Users +// of this class should read the docs field but not modify it. You should pass +// the "applyChange" field as the callbacks to the underlying observeChanges +// call. Optionally, you can specify your own observeChanges callbacks which are +// invoked immediately before the docs field is updated; this object is made +// available as `this` to those callbacks. LocalCollection._CachingChangeObserver = function (options) { var self = this; options = options || {}; diff --git a/packages/mongo-livedata/observe_multiplex.js b/packages/mongo-livedata/observe_multiplex.js index 53d7fa1380..ecdea7a379 100644 --- a/packages/mongo-livedata/observe_multiplex.js +++ b/packages/mongo-livedata/observe_multiplex.js @@ -14,10 +14,13 @@ ObserveMultiplexer = function (options) { self._queue = new Meteor._SynchronousQueue(); self._handles = {}; self._ready = false; + self._becomingReady = false; self._readyFuture = new Future; // Any handles added between creation and the first doc being added (or the // cursor being made ready while empty) get special handling: their adds get - // delivered immediately instead of waiting for ready. + // delivered immediately instead of waiting for ready. This is so that new + // queries get their results streamed to the user rather than waiting until + // the whole query is done. self._initialHandles = {}; self._cache = new LocalCollection._CachingChangeObserver({ ordered: options.ordered}); @@ -92,6 +95,8 @@ _.extend(ObserveMultiplexer.prototype, { // Call stop callback (which kills the underlying process which sends us // callbacks and removes us from the connection's dictionary). self._onStop(); + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "observe-multiplexers", -1); // Cause future addHandleAndSendInitialAdds calls to throw (but the onStop // callback should make our connection forget about us). self._handles = null; @@ -101,9 +106,6 @@ _.extend(ObserveMultiplexer.prototype, { throw Error("surprising _stop: not ready"); if (!self._readyFuture.isResolved()) throw Error("surprising _stop: unresolved"); - - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "observe-multiplexers", -1); }, _waitUntilReady: function (handle) { var self = this; @@ -112,6 +114,7 @@ _.extend(ObserveMultiplexer.prototype, { // Sends initial adds to all the handles we know about so far. Does not block. ready: function () { var self = this; + self._becomingReady = true; self._queue.queueTask(function () { if (self._ready) throw Error("can't make ObserveMultiplex ready twice!"); @@ -125,12 +128,18 @@ _.extend(ObserveMultiplexer.prototype, { self._sendAdds(handle); }); self._initialHandles = null; + self._becomingReady = false; self._ready = true; self._readyFuture.return(); }); }, + // Calls "cb" once the effects of all "ready", "addHandleAndSendInitialAdds" + // and observe callbacks which came before this call have been propagated to + // all handles. onFlush: function (cb) { var self = this; + if (!self._ready && !self._becomingReady) + throw Error("can only call onFlush on a multiplexer that will be ready"); self._queue.queueTask(cb); }, callbackNames: function () { diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index f9d988f5d5..a8835beefb 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -125,8 +125,10 @@ observeChangesWithOplog = function (cursorDescription, phase = PHASE.STEADY; var writes = writesToCommitWhenWeReachSteady; writesToCommitWhenWeReachSteady = []; - _.each(writes, function (w) { - w.committed(); + multiplexer.onFlush(function () { + _.each(writes, function (w) { + w.committed(); + }); }); }; @@ -222,13 +224,13 @@ observeChangesWithOplog = function (cursorDescription, // Make sure that all of the callbacks have made it through the // multiplexer and been delivered to ObserveHandles before committing // writes. - multiplexer.onFlush(function (){ - if (stopped || phase === PHASE.STEADY) { + if (stopped || phase === PHASE.STEADY) { + multiplexer.onFlush(function () { write.committed(); - } else { - writesToCommitWhenWeReachSteady.push(write); - } - }); + }); + } else { + writesToCommitWhenWeReachSteady.push(write); + } } )); @@ -282,6 +284,11 @@ observeChangesWithOplog = function (cursorDescription, needToFetch = null; currentlyFetching = null; + // Note: we *don't* use multiplexer.onFlush here because this stop + // callback is actually invoked by the multiplexer itself when it has + // determined that there are no handles left. So nothing is actually going + // to get flushed (and it's probably not valid to call methods on the + // dying multiplexer). _.each(writesToCommitWhenWeReachSteady, function (w) { w.committed(); }); From 2a4189a32a4012536cdbc00f45ea5e2e29af03c6 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 26 Nov 2013 15:38:52 -0800 Subject: [PATCH 125/190] Move _combineSelectorAndProjection to a separate file Move _isSelectorAffectedByModifier and _canSelectorBecomeTrueByModifier Exctract server-only files and tests to server-only part of the package --- packages/minimongo/minimongo_server_tests.js | 457 +++++++++++++++++++ packages/minimongo/minimongo_tests.js | 457 ------------------- packages/minimongo/package.js | 10 +- packages/minimongo/projection.js | 60 +-- packages/minimongo/selector.js | 139 +----- packages/minimongo/selector_modifier.js | 137 ++++++ packages/minimongo/selector_projection.js | 58 +++ 7 files changed, 662 insertions(+), 656 deletions(-) create mode 100644 packages/minimongo/minimongo_server_tests.js create mode 100644 packages/minimongo/selector_modifier.js create mode 100644 packages/minimongo/selector_projection.js diff --git a/packages/minimongo/minimongo_server_tests.js b/packages/minimongo/minimongo_server_tests.js new file mode 100644 index 0000000000..afd0487c87 --- /dev/null +++ b/packages/minimongo/minimongo_server_tests.js @@ -0,0 +1,457 @@ +Tinytest.add("minimongo - modifier affects selector", function (test) { + function testSelectorPaths (sel, paths, desc) { + test.isTrue(_.isEqual(MinimongoTest.getSelectorPaths(sel), paths), desc); + } + + testSelectorPaths({ + foo: { + bar: 3, + baz: 42 + } + }, ['foo'], "literal"); + + testSelectorPaths({ + foo: 42, + bar: 33 + }, ['foo', 'bar'], "literal"); + + testSelectorPaths({ + foo: [ 'something' ], + bar: "asdf" + }, ['foo', 'bar'], "literal"); + + testSelectorPaths({ + a: { $lt: 3 }, + b: "you know, literal", + 'path.is.complicated': { $not: { $regex: 'acme.*corp' } } + }, ['a', 'b', 'path.is.complicated'], "literal + operators"); + + testSelectorPaths({ + $or: [{ 'a.b': 1 }, { 'a.b.c': { $lt: 22 } }, + {$and: [{ 'x.d': { $ne: 5, $gte: 433 } }, { 'a.b': 234 }]}] + }, ['a.b', 'a.b.c', 'x.d'], 'group operators + duplicates'); + + // When top-level value is an object, it is treated as a literal, + // so when you query col.find({ a: { foo: 1, bar: 2 } }) + // it doesn't mean you are looking for anything that has 'a.foo' to be 1 and + // 'a.bar' to be 2, instead you are looking for 'a' to be exatly that object + // with exatly that order of keys. { a: { foo: 1, bar: 2, baz: 3 } } wouldn't + // match it. That's why in this selector 'a' would be important key, not a.foo + // and a.bar. + testSelectorPaths({ + a: { + foo: 1, + bar: 2 + }, + 'b.c': { + literal: "object", + but: "we still observe any changes in 'b.c'" + } + }, ['a', 'b.c'], "literal object"); + + function testSelectorAffectedByModifier (sel, mod, yes, desc) { + if (yes) + test.isTrue(LocalCollection._isSelectorAffectedByModifier(sel, mod, desc)); + else + test.isFalse(LocalCollection._isSelectorAffectedByModifier(sel, mod, desc)); + } + + function affected(sel, mod, desc) { + testSelectorAffectedByModifier(sel, mod, 1, desc); + } + function notAffected(sel, mod, desc) { + testSelectorAffectedByModifier(sel, mod, 0, desc); + } + + notAffected({ foo: 0 }, { $set: { bar: 1 } }, "simplest"); + affected({ foo: 0 }, { $set: { foo: 1 } }, "simplest"); + affected({ foo: 0 }, { $set: { 'foo.bar': 1 } }, "simplest"); + notAffected({ 'foo.bar': 0 }, { $set: { 'foo.baz': 1 } }, "simplest"); + affected({ 'foo.bar': 0 }, { $set: { 'foo.1': 1 } }, "simplest"); + affected({ 'foo.bar': 0 }, { $set: { 'foo.2.bar': 1 } }, "simplest"); + + notAffected({ 'foo': 0 }, { $set: { 'foobaz': 1 } }, "correct prefix check"); + notAffected({ 'foobar': 0 }, { $unset: { 'foo': 1 } }, "correct prefix check"); + notAffected({ 'foo.bar': 0 }, { $unset: { 'foob': 1 } }, "correct prefix check"); + + notAffected({ 'foo.Infinity.x': 0 }, { $unset: { 'foo.x': 1 } }, "we convert integer fields correctly"); + notAffected({ 'foo.1e3.x': 0 }, { $unset: { 'foo.x': 1 } }, "we convert integer fields correctly"); + + affected({ 'foo.3.bar': 0 }, { $set: { 'foo.3.bar': 1 } }, "observe for an array element"); + + notAffected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.3.bar': 1 } }, "delicate work with numeric fields in selector"); + notAffected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.bar': 1 } }, "delicate work with numeric fields in selector"); + affected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.4.bar': 1 } }, "delicate work with numeric fields in selector"); + affected({ 'foo.bar.baz': 0 }, { $unset: { 'foo.3.bar': 1 } }, "delicate work with numeric fields in selector"); + + affected({ 'foo.0.bar': 0 }, { $set: { 'foo.0.0.bar': 1 } }, "delicate work with nested arrays and selectors by indecies"); +}); + +Tinytest.add("minimongo - selector and projection combination", function (test) { + function testSelProjectionComb (sel, proj, expected, desc) { + test.equal(LocalCollection._combineSelectorAndProjection(sel, proj), expected, desc); + } + + // Test with inclusive projection + testSelProjectionComb({ a: 1, b: 2 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true }, "simplest incl"); + testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true, e: true }, "simplest incl, branching"); + testSelProjectionComb({ + 'a.b': { $lt: 3 }, + 'y.0': -1, + 'a.c': 15 + }, { + 'd': 1, + 'z': 1 + }, { + 'a.b': true, + 'y': true, + 'a.c': true, + 'd': true, + 'z': true + }, "multikey paths in selector - incl"); + + testSelProjectionComb({ + foo: 1234, + $and: [{ k: -1 }, { $or: [{ b: 15 }] }] + }, { + 'foo.bar': 1, + 'foo.zzz': 1, + 'b.asdf': 1 + }, { + foo: true, + b: true, + k: true + }, "multikey paths in fields - incl"); + + testSelProjectionComb({ + 'a.b.c': 123, + 'a.b.d': 321, + 'b.c.0': 111, + 'a.e': 12345 + }, { + 'a.b.z': 1, + 'a.b.d.g': 1, + 'c.c.c': 1 + }, { + 'a.b.c': true, + 'a.b.d': true, + 'a.b.z': true, + 'b.c': true, + 'a.e': true, + 'c.c.c': true + }, "multikey both paths - incl"); + + testSelProjectionComb({ + 'a.b.c.d': 123, + 'a.b1.c.d': 421, + 'a.b.c.e': 111 + }, { + 'a.b': 1 + }, { + 'a.b': true, + 'a.b1.c.d': true + }, "shadowing one another - incl"); + + testSelProjectionComb({ + 'a.b': 123, + 'foo.bar': false + }, { + 'a.b.c.d': 1, + 'foo': 1 + }, { + 'a.b': true, + 'foo': true + }, "shadowing one another - incl"); + + testSelProjectionComb({ + 'a.b.c': 1 + }, { + 'a.b.c': 1 + }, { + 'a.b.c': true + }, "same paths - incl"); + + testSelProjectionComb({ + 'x.4.y': 42, + 'z.0.1': 33 + }, { + 'x.x': 1 + }, { + 'x.x': true, + 'x.y': true, + 'z': true + }, "numbered keys in selector - incl"); + + testSelProjectionComb({ + 'a.b.c': 42, + $where: function () { return true; } + }, { + 'a.b': 1, + 'z.z': 1 + }, {}, "$where in the selector - incl"); + + testSelProjectionComb({ + $or: [ + {'a.b.c': 42}, + {$where: function () { return true; } } + ] + }, { + 'a.b': 1, + 'z.z': 1 + }, {}, "$where in the selector - incl"); + + // Test with exclusive projection + testSelProjectionComb({ a: 1, b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl"); + testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl, branching"); + testSelProjectionComb({ + 'a.b': { $lt: 3 }, + 'y.0': -1, + 'a.c': 15 + }, { + 'd': 0, + 'z': 0 + }, { + d: false, + z: false + }, "multikey paths in selector - excl"); + + testSelProjectionComb({ + foo: 1234, + $and: [{ k: -1 }, { $or: [{ b: 15 }] }] + }, { + 'foo.bar': 0, + 'foo.zzz': 0, + 'b.asdf': 0 + }, { + }, "multikey paths in fields - excl"); + + testSelProjectionComb({ + 'a.b.c': 123, + 'a.b.d': 321, + 'b.c.0': 111, + 'a.e': 12345 + }, { + 'a.b.z': 0, + 'a.b.d.g': 0, + 'c.c.c': 0 + }, { + 'a.b.z': false, + 'c.c.c': false + }, "multikey both paths - excl"); + + testSelProjectionComb({ + 'a.b.c.d': 123, + 'a.b1.c.d': 421, + 'a.b.c.e': 111 + }, { + 'a.b': 0 + }, { + }, "shadowing one another - excl"); + + testSelProjectionComb({ + 'a.b': 123, + 'foo.bar': false + }, { + 'a.b.c.d': 0, + 'foo': 0 + }, { + }, "shadowing one another - excl"); + + testSelProjectionComb({ + 'a.b.c': 1 + }, { + 'a.b.c': 0 + }, { + }, "same paths - excl"); + + testSelProjectionComb({ + 'a.b': 123, + 'a.c.d': 222, + 'ddd': 123 + }, { + 'a.b': 0, + 'a.c.e': 0, + 'asdf': 0 + }, { + 'a.c.e': false, + 'asdf': false + }, "intercept the selector path - excl"); + + testSelProjectionComb({ + 'a.b.c': 14 + }, { + 'a.b.d': 0 + }, { + 'a.b.d': false + }, "different branches - excl"); + + testSelProjectionComb({ + 'a.b.c.d': "124", + 'foo.bar.baz.que': "some value" + }, { + 'a.b.c.d.e': 0, + 'foo.bar': 0 + }, { + }, "excl on incl paths - excl"); + + testSelProjectionComb({ + 'x.4.y': 42, + 'z.0.1': 33 + }, { + 'x.x': 0, + 'x.y': 0 + }, { + 'x.x': false, + }, "numbered keys in selector - excl"); + + testSelProjectionComb({ + 'a.b.c': 42, + $where: function () { return true; } + }, { + 'a.b': 0, + 'z.z': 0 + }, {}, "$where in the selector - excl"); + + testSelProjectionComb({ + $or: [ + {'a.b.c': 42}, + {$where: function () { return true; } } + ] + }, { + 'a.b': 0, + 'z.z': 0 + }, {}, "$where in the selector - excl"); + +}); + +(function () { + // TODO: Tests for "can selector become true by modifier" are incomplete, + // absent or test the functionality of "not ideal" implementation (test checks + // that certain case always returns true as implementation is incomplete) + // - tests with $and/$or/$nor/$not branches (are absent) + // - more tests with arrays fields and numeric keys (incomplete and test "not + // ideal" implementation) + // - tests when numeric keys actually mean numeric keys, not array indexes + // (are absent) + // - tests with $-operators in the selector (are incomplete and test "not + // ideal" implementation) + + var test = null; // set this global in the beginning of every test + // T - should return true + // F - should return false + function T (sel, mod, desc) { + test.isTrue(LocalCollection._canSelectorBecomeTrueByModifier(sel, mod), desc); + } + function F (sel, mod, desc) { + test.isFalse(LocalCollection._canSelectorBecomeTrueByModifier(sel, mod), desc); + } + + Tinytest.add("minimongo - can selector become true by modifier - literals (structured tests)", function (t) { + test = t; + + var selector = { + 'a.b.c': 2, + 'foo.bar': { + z: { y: 1 } + }, + 'foo.baz': [ {ans: 42}, "string", false, undefined ], + 'empty.field': null + }; + + T(selector, {$set:{ 'a.b.c': 2 }}); + F(selector, {$unset:{ 'a': 1 }}); + F(selector, {$unset:{ 'a.b': 1 }}); + F(selector, {$unset:{ 'a.b.c': 1 }}); + T(selector, {$set:{ 'a.b': { c: 2 } }}); + F(selector, {$set:{ 'a.b': {} }}); + T(selector, {$set:{ 'a.b': { c: 2, x: 5 } }}); + F(selector, {$set:{ 'a.b.c.k': 3 }}); + F(selector, {$set:{ 'a.b.c.k': {} }}); + + F(selector, {$unset:{ 'foo': 1 }}); + F(selector, {$unset:{ 'foo.bar': 1 }}); + F(selector, {$unset:{ 'foo.bar.z': 1 }}); + F(selector, {$unset:{ 'foo.bar.z.y': 1 }}); + F(selector, {$set:{ 'foo.bar.x': 1 }}); + F(selector, {$set:{ 'foo.bar': {} }}); + F(selector, {$set:{ 'foo.bar': 3 }}); + T(selector, {$set:{ 'foo.bar': { z: { y: 1 } } }}); + T(selector, {$set:{ 'foo.bar.z': { y: 1 } }}); + T(selector, {$set:{ 'foo.bar.z.y': 1 }}); + + F(selector, {$set:{ 'empty.field': {} }}); + T(selector, {$set:{ 'empty': {} }}); + T(selector, {$set:{ 'empty.field': null }}); + T(selector, {$set:{ 'empty.field': undefined }}); + F(selector, {$set:{ 'empty.field.a': 3 }}); + }); + + Tinytest.add("minimongo - can selector become true by modifier - literals (adhoc tests)", function (t) { + test = t; + T({x:1}, {$set:{x:1}}, "simple set scalar"); + T({x:"a"}, {$set:{x:"a"}}, "simple set scalar"); + T({x:false}, {$set:{x:false}}, "simple set scalar"); + F({x:true}, {$set:{x:false}}, "simple set scalar"); + F({x:2}, {$set:{x:3}}, "simple set scalar"); + + F({'foo.bar.baz': 1, x:1}, {$unset:{'foo.bar.baz': 1}, $set:{x:1}}, "simple unset of the interesting path"); + F({'foo.bar.baz': 1, x:1}, {$unset:{'foo.bar': 1}, $set:{x:1}}, "simple unset of the interesting path prefix"); + F({'foo.bar.baz': 1, x:1}, {$unset:{'foo': 1}, $set:{x:1}}, "simple unset of the interesting path prefix"); + F({'foo.bar.baz': 1}, {$unset:{'foo.baz': 1}}, "simple unset of the interesting path prefix"); + F({'foo.bar.baz': 1}, {$unset:{'foo.bar.bar': 1}}, "simple unset of the interesting path prefix"); + }); + + Tinytest.add("minimongo - can selector become true by modifier - regexps", function (t) { + test = t; + + // Regexp + T({ 'foo.bar': /^[0-9]+$/i }, { $set: {'foo.bar': '01233'} }, "set of regexp"); + // XXX this test should be False, should be fixed within improved implementation + T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $set: {'foo.bar': '0a1233', x: 1} }, "set of regexp"); + // XXX this test should be False, should be fixed within improved implementation + T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $unset: {'foo.bar': 1}, $set: { x: 1 } }, "unset of regexp"); + T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $set: { x: 1 } }, "don't touch regexp"); + }); + + Tinytest.add("minimongo - can selector become true by modifier - undefined/null", function (t) { + test = t; + // Nulls / Undefined + T({ 'foo.bar': null }, {$set:{'foo.bar': null}}, "set of null looking for null"); + T({ 'foo.bar': null }, {$set:{'foo.bar': undefined}}, "set of undefined looking for null"); + T({ 'foo.bar': undefined }, {$set:{'foo.bar': null}}, "set of null looking for undefined"); + T({ 'foo.bar': undefined }, {$set:{'foo.bar': undefined}}, "set of undefined looking for undefined"); + T({ 'foo.bar': null }, {$set:{'foo': null}}, "set of null of parent path looking for null"); + F({ 'foo.bar': null }, {$set:{'foo.bar.baz': null}}, "set of null of different path looking for null"); + T({ 'foo.bar': null }, { $unset: { 'foo': 1 } }, "unset the parent"); + T({ 'foo.bar': null }, { $unset: { 'foo.bar': 1 } }, "unset tracked path"); + T({ 'foo.bar': null }, { $set: { 'foo': 3 } }, "set the parent"); + T({ 'foo.bar': null }, { $set: { 'foo': {baz:1} } }, "set the parent"); + + }); + + Tinytest.add("minimongo - can selector become true by modifier - literals with arrays", function (t) { + test = t; + // These tests are incomplete and in theory they all should return true as we + // don't support any case with numeric fields yet. + T({'a.1.b': 1, x:1}, {$unset:{'a.1.b': 1}, $set:{x:1}}, "unset of array element's field with exactly the same index as selector"); + F({'a.2.b': 1}, {$unset:{'a.1.b': 1}}, "unset of array element's field with different index as selector"); + // This is false, because if you are looking for array but in reality it is an + // object, it just can't get to true. + F({'a.2.b': 1}, {$unset:{'a.b': 1}}, "unset of field while selector is looking for index"); + T({ 'foo.bar': null }, {$set:{'foo.1.bar': null}}, "set array's element's field to null looking for null"); + T({ 'foo.bar': null }, {$set:{'foo.0.bar': 1, 'foo.1.bar': null}}, "set array's element's field to null looking for null"); + // This is false, because there may remain other array elements that match + // but we modified this test as we don't support this case yet + T({'a.b': 1}, {$unset:{'a.1.b': 1}}, "unset of array element's field"); + }); + + Tinytest.add("minimongo - can selector become true by modifier - set an object literal whose fields are selected", function (t) { + test = t; + T({ 'a.b.c': 1 }, { $set: { 'a.b': { c: 1 } } }, "a simple scalar selector and simple set"); + F({ 'a.b.c': 1 }, { $set: { 'a.b': { c: 2 } } }, "a simple scalar selector and simple set to false"); + F({ 'a.b.c': 1 }, { $set: { 'a.b': { d: 1 } } }, "a simple scalar selector and simple set a wrong literal"); + F({ 'a.b.c': 1 }, { $set: { 'a.b': 222 } }, "a simple scalar selector and simple set a wrong type"); + }); + +})(); + diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index d59ae5385b..0c8dc6e7cd 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -2411,460 +2411,3 @@ Tinytest.add("minimongo - $near operator tests", function (test) { }); }); -Tinytest.add("minimongo - modifier affects selector", function (test) { - function testSelectorPaths (sel, paths, desc) { - test.isTrue(_.isEqual(MinimongoTest.getSelectorPaths(sel), paths), desc); - } - - testSelectorPaths({ - foo: { - bar: 3, - baz: 42 - } - }, ['foo'], "literal"); - - testSelectorPaths({ - foo: 42, - bar: 33 - }, ['foo', 'bar'], "literal"); - - testSelectorPaths({ - foo: [ 'something' ], - bar: "asdf" - }, ['foo', 'bar'], "literal"); - - testSelectorPaths({ - a: { $lt: 3 }, - b: "you know, literal", - 'path.is.complicated': { $not: { $regex: 'acme.*corp' } } - }, ['a', 'b', 'path.is.complicated'], "literal + operators"); - - testSelectorPaths({ - $or: [{ 'a.b': 1 }, { 'a.b.c': { $lt: 22 } }, - {$and: [{ 'x.d': { $ne: 5, $gte: 433 } }, { 'a.b': 234 }]}] - }, ['a.b', 'a.b.c', 'x.d'], 'group operators + duplicates'); - - // When top-level value is an object, it is treated as a literal, - // so when you query col.find({ a: { foo: 1, bar: 2 } }) - // it doesn't mean you are looking for anything that has 'a.foo' to be 1 and - // 'a.bar' to be 2, instead you are looking for 'a' to be exatly that object - // with exatly that order of keys. { a: { foo: 1, bar: 2, baz: 3 } } wouldn't - // match it. That's why in this selector 'a' would be important key, not a.foo - // and a.bar. - testSelectorPaths({ - a: { - foo: 1, - bar: 2 - }, - 'b.c': { - literal: "object", - but: "we still observe any changes in 'b.c'" - } - }, ['a', 'b.c'], "literal object"); - - function testSelectorAffectedByModifier (sel, mod, yes, desc) { - if (yes) - test.isTrue(LocalCollection._isSelectorAffectedByModifier(sel, mod, desc)); - else - test.isFalse(LocalCollection._isSelectorAffectedByModifier(sel, mod, desc)); - } - - function affected(sel, mod, desc) { - testSelectorAffectedByModifier(sel, mod, 1, desc); - } - function notAffected(sel, mod, desc) { - testSelectorAffectedByModifier(sel, mod, 0, desc); - } - - notAffected({ foo: 0 }, { $set: { bar: 1 } }, "simplest"); - affected({ foo: 0 }, { $set: { foo: 1 } }, "simplest"); - affected({ foo: 0 }, { $set: { 'foo.bar': 1 } }, "simplest"); - notAffected({ 'foo.bar': 0 }, { $set: { 'foo.baz': 1 } }, "simplest"); - affected({ 'foo.bar': 0 }, { $set: { 'foo.1': 1 } }, "simplest"); - affected({ 'foo.bar': 0 }, { $set: { 'foo.2.bar': 1 } }, "simplest"); - - notAffected({ 'foo': 0 }, { $set: { 'foobaz': 1 } }, "correct prefix check"); - notAffected({ 'foobar': 0 }, { $unset: { 'foo': 1 } }, "correct prefix check"); - notAffected({ 'foo.bar': 0 }, { $unset: { 'foob': 1 } }, "correct prefix check"); - - notAffected({ 'foo.Infinity.x': 0 }, { $unset: { 'foo.x': 1 } }, "we convert integer fields correctly"); - notAffected({ 'foo.1e3.x': 0 }, { $unset: { 'foo.x': 1 } }, "we convert integer fields correctly"); - - affected({ 'foo.3.bar': 0 }, { $set: { 'foo.3.bar': 1 } }, "observe for an array element"); - - notAffected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.3.bar': 1 } }, "delicate work with numeric fields in selector"); - notAffected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.bar': 1 } }, "delicate work with numeric fields in selector"); - affected({ 'foo.4.bar.baz': 0 }, { $unset: { 'foo.4.bar': 1 } }, "delicate work with numeric fields in selector"); - affected({ 'foo.bar.baz': 0 }, { $unset: { 'foo.3.bar': 1 } }, "delicate work with numeric fields in selector"); - - affected({ 'foo.0.bar': 0 }, { $set: { 'foo.0.0.bar': 1 } }, "delicate work with nested arrays and selectors by indecies"); -}); - -Tinytest.add("minimongo - selector and projection combination", function (test) { - function testSelProjectionComb (sel, proj, expected, desc) { - test.equal(LocalCollection._combineSelectorAndProjection(sel, proj), expected, desc); - } - - // Test with inclusive projection - testSelProjectionComb({ a: 1, b: 2 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true }, "simplest incl"); - testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 1, c: 1, d: 1 }, { a: true, b: true, c: true, d: true, e: true }, "simplest incl, branching"); - testSelProjectionComb({ - 'a.b': { $lt: 3 }, - 'y.0': -1, - 'a.c': 15 - }, { - 'd': 1, - 'z': 1 - }, { - 'a.b': true, - 'y': true, - 'a.c': true, - 'd': true, - 'z': true - }, "multikey paths in selector - incl"); - - testSelProjectionComb({ - foo: 1234, - $and: [{ k: -1 }, { $or: [{ b: 15 }] }] - }, { - 'foo.bar': 1, - 'foo.zzz': 1, - 'b.asdf': 1 - }, { - foo: true, - b: true, - k: true - }, "multikey paths in fields - incl"); - - testSelProjectionComb({ - 'a.b.c': 123, - 'a.b.d': 321, - 'b.c.0': 111, - 'a.e': 12345 - }, { - 'a.b.z': 1, - 'a.b.d.g': 1, - 'c.c.c': 1 - }, { - 'a.b.c': true, - 'a.b.d': true, - 'a.b.z': true, - 'b.c': true, - 'a.e': true, - 'c.c.c': true - }, "multikey both paths - incl"); - - testSelProjectionComb({ - 'a.b.c.d': 123, - 'a.b1.c.d': 421, - 'a.b.c.e': 111 - }, { - 'a.b': 1 - }, { - 'a.b': true, - 'a.b1.c.d': true - }, "shadowing one another - incl"); - - testSelProjectionComb({ - 'a.b': 123, - 'foo.bar': false - }, { - 'a.b.c.d': 1, - 'foo': 1 - }, { - 'a.b': true, - 'foo': true - }, "shadowing one another - incl"); - - testSelProjectionComb({ - 'a.b.c': 1 - }, { - 'a.b.c': 1 - }, { - 'a.b.c': true - }, "same paths - incl"); - - testSelProjectionComb({ - 'x.4.y': 42, - 'z.0.1': 33 - }, { - 'x.x': 1 - }, { - 'x.x': true, - 'x.y': true, - 'z': true - }, "numbered keys in selector - incl"); - - testSelProjectionComb({ - 'a.b.c': 42, - $where: function () { return true; } - }, { - 'a.b': 1, - 'z.z': 1 - }, {}, "$where in the selector - incl"); - - testSelProjectionComb({ - $or: [ - {'a.b.c': 42}, - {$where: function () { return true; } } - ] - }, { - 'a.b': 1, - 'z.z': 1 - }, {}, "$where in the selector - incl"); - - // Test with exclusive projection - testSelProjectionComb({ a: 1, b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl"); - testSelProjectionComb({ $or: [{ a: 1234, e: {$lt: 5} }], b: 2 }, { b: 0, c: 0, d: 0 }, { c: false, d: false }, "simplest excl, branching"); - testSelProjectionComb({ - 'a.b': { $lt: 3 }, - 'y.0': -1, - 'a.c': 15 - }, { - 'd': 0, - 'z': 0 - }, { - d: false, - z: false - }, "multikey paths in selector - excl"); - - testSelProjectionComb({ - foo: 1234, - $and: [{ k: -1 }, { $or: [{ b: 15 }] }] - }, { - 'foo.bar': 0, - 'foo.zzz': 0, - 'b.asdf': 0 - }, { - }, "multikey paths in fields - excl"); - - testSelProjectionComb({ - 'a.b.c': 123, - 'a.b.d': 321, - 'b.c.0': 111, - 'a.e': 12345 - }, { - 'a.b.z': 0, - 'a.b.d.g': 0, - 'c.c.c': 0 - }, { - 'a.b.z': false, - 'c.c.c': false - }, "multikey both paths - excl"); - - testSelProjectionComb({ - 'a.b.c.d': 123, - 'a.b1.c.d': 421, - 'a.b.c.e': 111 - }, { - 'a.b': 0 - }, { - }, "shadowing one another - excl"); - - testSelProjectionComb({ - 'a.b': 123, - 'foo.bar': false - }, { - 'a.b.c.d': 0, - 'foo': 0 - }, { - }, "shadowing one another - excl"); - - testSelProjectionComb({ - 'a.b.c': 1 - }, { - 'a.b.c': 0 - }, { - }, "same paths - excl"); - - testSelProjectionComb({ - 'a.b': 123, - 'a.c.d': 222, - 'ddd': 123 - }, { - 'a.b': 0, - 'a.c.e': 0, - 'asdf': 0 - }, { - 'a.c.e': false, - 'asdf': false - }, "intercept the selector path - excl"); - - testSelProjectionComb({ - 'a.b.c': 14 - }, { - 'a.b.d': 0 - }, { - 'a.b.d': false - }, "different branches - excl"); - - testSelProjectionComb({ - 'a.b.c.d': "124", - 'foo.bar.baz.que': "some value" - }, { - 'a.b.c.d.e': 0, - 'foo.bar': 0 - }, { - }, "excl on incl paths - excl"); - - testSelProjectionComb({ - 'x.4.y': 42, - 'z.0.1': 33 - }, { - 'x.x': 0, - 'x.y': 0 - }, { - 'x.x': false, - }, "numbered keys in selector - excl"); - - testSelProjectionComb({ - 'a.b.c': 42, - $where: function () { return true; } - }, { - 'a.b': 0, - 'z.z': 0 - }, {}, "$where in the selector - excl"); - - testSelProjectionComb({ - $or: [ - {'a.b.c': 42}, - {$where: function () { return true; } } - ] - }, { - 'a.b': 0, - 'z.z': 0 - }, {}, "$where in the selector - excl"); - -}); - -(function () { - // TODO: Tests for "can selector become true by modifier" are incomplete, - // absent or test the functionality of "not ideal" implementation (test checks - // that certain case always returns true as implementation is incomplete) - // - tests with $and/$or/$nor/$not branches (are absent) - // - more tests with arrays fields and numeric keys (incomplete and test "not - // ideal" implementation) - // - tests when numeric keys actually mean numeric keys, not array indexes - // (are absent) - // - tests with $-operators in the selector (are incomplete and test "not - // ideal" implementation) - - var test = null; // set this global in the beginning of every test - // T - should return true - // F - should return false - function T (sel, mod, desc) { - test.isTrue(LocalCollection._canSelectorBecomeTrueByModifier(sel, mod), desc); - } - function F (sel, mod, desc) { - test.isFalse(LocalCollection._canSelectorBecomeTrueByModifier(sel, mod), desc); - } - - Tinytest.add("minimongo - can selector become true by modifier - literals (structured tests)", function (t) { - test = t; - - var selector = { - 'a.b.c': 2, - 'foo.bar': { - z: { y: 1 } - }, - 'foo.baz': [ {ans: 42}, "string", false, undefined ], - 'empty.field': null - }; - - T(selector, {$set:{ 'a.b.c': 2 }}); - F(selector, {$unset:{ 'a': 1 }}); - F(selector, {$unset:{ 'a.b': 1 }}); - F(selector, {$unset:{ 'a.b.c': 1 }}); - T(selector, {$set:{ 'a.b': { c: 2 } }}); - F(selector, {$set:{ 'a.b': {} }}); - T(selector, {$set:{ 'a.b': { c: 2, x: 5 } }}); - F(selector, {$set:{ 'a.b.c.k': 3 }}); - F(selector, {$set:{ 'a.b.c.k': {} }}); - - F(selector, {$unset:{ 'foo': 1 }}); - F(selector, {$unset:{ 'foo.bar': 1 }}); - F(selector, {$unset:{ 'foo.bar.z': 1 }}); - F(selector, {$unset:{ 'foo.bar.z.y': 1 }}); - F(selector, {$set:{ 'foo.bar.x': 1 }}); - F(selector, {$set:{ 'foo.bar': {} }}); - F(selector, {$set:{ 'foo.bar': 3 }}); - T(selector, {$set:{ 'foo.bar': { z: { y: 1 } } }}); - T(selector, {$set:{ 'foo.bar.z': { y: 1 } }}); - T(selector, {$set:{ 'foo.bar.z.y': 1 }}); - - F(selector, {$set:{ 'empty.field': {} }}); - T(selector, {$set:{ 'empty': {} }}); - T(selector, {$set:{ 'empty.field': null }}); - T(selector, {$set:{ 'empty.field': undefined }}); - F(selector, {$set:{ 'empty.field.a': 3 }}); - }); - - Tinytest.add("minimongo - can selector become true by modifier - literals (adhoc tests)", function (t) { - test = t; - T({x:1}, {$set:{x:1}}, "simple set scalar"); - T({x:"a"}, {$set:{x:"a"}}, "simple set scalar"); - T({x:false}, {$set:{x:false}}, "simple set scalar"); - F({x:true}, {$set:{x:false}}, "simple set scalar"); - F({x:2}, {$set:{x:3}}, "simple set scalar"); - - F({'foo.bar.baz': 1, x:1}, {$unset:{'foo.bar.baz': 1}, $set:{x:1}}, "simple unset of the interesting path"); - F({'foo.bar.baz': 1, x:1}, {$unset:{'foo.bar': 1}, $set:{x:1}}, "simple unset of the interesting path prefix"); - F({'foo.bar.baz': 1, x:1}, {$unset:{'foo': 1}, $set:{x:1}}, "simple unset of the interesting path prefix"); - F({'foo.bar.baz': 1}, {$unset:{'foo.baz': 1}}, "simple unset of the interesting path prefix"); - F({'foo.bar.baz': 1}, {$unset:{'foo.bar.bar': 1}}, "simple unset of the interesting path prefix"); - }); - - Tinytest.add("minimongo - can selector become true by modifier - regexps", function (t) { - test = t; - - // Regexp - T({ 'foo.bar': /^[0-9]+$/i }, { $set: {'foo.bar': '01233'} }, "set of regexp"); - // XXX this test should be False, should be fixed within improved implementation - T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $set: {'foo.bar': '0a1233', x: 1} }, "set of regexp"); - // XXX this test should be False, should be fixed within improved implementation - T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $unset: {'foo.bar': 1}, $set: { x: 1 } }, "unset of regexp"); - T({ 'foo.bar': /^[0-9]+$/i, x: 1 }, { $set: { x: 1 } }, "don't touch regexp"); - }); - - Tinytest.add("minimongo - can selector become true by modifier - undefined/null", function (t) { - test = t; - // Nulls / Undefined - T({ 'foo.bar': null }, {$set:{'foo.bar': null}}, "set of null looking for null"); - T({ 'foo.bar': null }, {$set:{'foo.bar': undefined}}, "set of undefined looking for null"); - T({ 'foo.bar': undefined }, {$set:{'foo.bar': null}}, "set of null looking for undefined"); - T({ 'foo.bar': undefined }, {$set:{'foo.bar': undefined}}, "set of undefined looking for undefined"); - T({ 'foo.bar': null }, {$set:{'foo': null}}, "set of null of parent path looking for null"); - F({ 'foo.bar': null }, {$set:{'foo.bar.baz': null}}, "set of null of different path looking for null"); - T({ 'foo.bar': null }, { $unset: { 'foo': 1 } }, "unset the parent"); - T({ 'foo.bar': null }, { $unset: { 'foo.bar': 1 } }, "unset tracked path"); - T({ 'foo.bar': null }, { $set: { 'foo': 3 } }, "set the parent"); - T({ 'foo.bar': null }, { $set: { 'foo': {baz:1} } }, "set the parent"); - - }); - - Tinytest.add("minimongo - can selector become true by modifier - literals with arrays", function (t) { - test = t; - // These tests are incomplete and in theory they all should return true as we - // don't support any case with numeric fields yet. - T({'a.1.b': 1, x:1}, {$unset:{'a.1.b': 1}, $set:{x:1}}, "unset of array element's field with exactly the same index as selector"); - F({'a.2.b': 1}, {$unset:{'a.1.b': 1}}, "unset of array element's field with different index as selector"); - // This is false, because if you are looking for array but in reality it is an - // object, it just can't get to true. - F({'a.2.b': 1}, {$unset:{'a.b': 1}}, "unset of field while selector is looking for index"); - T({ 'foo.bar': null }, {$set:{'foo.1.bar': null}}, "set array's element's field to null looking for null"); - T({ 'foo.bar': null }, {$set:{'foo.0.bar': 1, 'foo.1.bar': null}}, "set array's element's field to null looking for null"); - // This is false, because there may remain other array elements that match - // but we modified this test as we don't support this case yet - T({'a.b': 1}, {$unset:{'a.1.b': 1}}, "unset of array element's field"); - }); - - Tinytest.add("minimongo - can selector become true by modifier - set an object literal whose fields are selected", function (t) { - test = t; - T({ 'a.b.c': 1 }, { $set: { 'a.b': { c: 1 } } }, "a simple scalar selector and simple set"); - F({ 'a.b.c': 1 }, { $set: { 'a.b': { c: 2 } } }, "a simple scalar selector and simple set to false"); - F({ 'a.b.c': 1 }, { $set: { 'a.b': { d: 1 } } }, "a simple scalar selector and simple set a wrong literal"); - F({ 'a.b.c': 1 }, { $set: { 'a.b': 222 } }, "a simple scalar selector and simple set a wrong type"); - }); - -})(); - diff --git a/packages/minimongo/package.js b/packages/minimongo/package.js index 3469e2acc2..e226b80ce0 100644 --- a/packages/minimongo/package.js +++ b/packages/minimongo/package.js @@ -20,13 +20,19 @@ Package.on_use(function (api) { 'observe.js', 'objectid.js' ]); + + // Functionality used only by oplog tailing on the server side + api.add_files([ + 'selector_projection.js', + 'selector_modifier.js' + ], 'server'); }); Package.on_test(function (api) { - api.use('geojson-utils', 'client'); - api.use('minimongo', 'client'); + api.use('minimongo', ['client', 'server']); api.use('test-helpers', 'client'); api.use(['tinytest', 'underscore', 'ejson', 'ordered-dict', 'random', 'deps']); api.add_files('minimongo_tests.js', 'client'); + api.add_files('minimongo_server_tests.js', 'server'); }); diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index f3374556fb..a26ebc85c4 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -44,55 +44,13 @@ LocalCollection._compileProjection = function (fields) { }; }; -// Knows how to combine a mongo selector and a fields projection to a new fields -// projection taking into account active fields from the passed selector. -// @returns Object - projection object (same as fields option of mongo cursor) -LocalCollection._combineSelectorAndProjection = function (selector, projection) -{ - var selectorPaths = getPathsWithoutNumericKeys(selector); - - // Special case for $where operator in the selector - projection should depend - // on all fields of the document. getSelectorPaths returns a list of paths - // selector depends on. If one of the paths is '' (empty string) representing - // the root or the whole document, complete projection should be returned. - if (_.contains(selectorPaths, '')) - return {}; - - var prjDetails = projectionDetails(projection); - var tree = prjDetails.tree; - var mergedProjection = {}; - - // merge the paths to include - tree = pathsToTree(selectorPaths, - function (path) { return true; }, - function (node, path, fullPath) { return true; }, - tree); - mergedProjection = treeToPaths(tree); - if (prjDetails.including) { - // both selector and projection are pointing on fields to include - // so we can just return the merged tree - return mergedProjection; - } else { - // selector is pointing at fields to include - // projection is pointing at fields to exclude - // make sure we don't exclude important paths - var mergedExclProjection = {}; - _.each(mergedProjection, function (incl, path) { - if (!incl) - mergedExclProjection[path] = false; - }); - - return mergedExclProjection; - } -}; - // Traverses the keys of passed projection and constructs a tree where all // leaves are either all True or all False // @returns Object: // - tree - Object - tree representation of keys involved in projection // (exception for '_id' as it is a special case handled separately) // - including - Boolean - "take only certain fields" type of projection -var projectionDetails = function (fields) { +projectionDetails = function (fields) { if (!_.isObject(fields)) throw MinimongoError("fields option must be an object"); @@ -194,19 +152,3 @@ pathsToTree = function (paths, newLeafFn, conflictFn, tree) { return tree; }; -// Returns a set of key paths similar to -// { 'foo.bar': 1, 'a.b.c': 1 } -var treeToPaths = function (tree, prefix) { - prefix = prefix || ''; - var result = {}; - - _.each(tree, function (val, key) { - if (_.isObject(val)) - _.extend(result, treeToPaths(val, prefix + key + '.')); - else - result[prefix + key] = val; - }); - - return result; -}; - diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index 1b804a358e..d6410ad2fb 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -95,7 +95,7 @@ var compileValueSelector = function (valueSelector, selector, cursor) { }; // XXX can factor out common logic below -var LOGICAL_OPERATORS = { +LOGICAL_OPERATORS = { "$and": function(subSelector, operators, cursor) { if (!isArray(subSelector) || _.isEmpty(subSelector)) throw Error("$and/$or/$nor must be nonempty array"); @@ -786,140 +786,3 @@ LocalCollection._compileSort = function (spec, cursor) { }; }; -// Returns true if the modifier applied to some document may change the result -// of matching the document by selector -// The modifier is always in a form of Object: -// - $set -// - 'a.b.22.z': value -// - 'foo.bar': 42 -// - $unset -// - 'abc.d': 1 -LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { - // safe check for $set/$unset being objects - modifier = _.extend({ $set: {}, $unset: {} }, modifier); - var modifiedPaths = _.keys(modifier.$set).concat(_.keys(modifier.$unset)); - var meaningfulPaths = getPaths(selector); - - return _.any(modifiedPaths, function (path) { - var mod = path.split('.'); - return _.any(meaningfulPaths, function (meaningfulPath) { - var sel = meaningfulPath.split('.'); - var i = 0, j = 0; - - while (i < sel.length && j < mod.length) { - if (numericKey(sel[i]) && numericKey(mod[j])) { - // foo.4.bar selector affected by foo.4 modifier - // foo.3.bar selector unaffected by foo.4 modifier - if (sel[i] === mod[j]) - i++, j++; - else - return false; - } else if (numericKey(sel[i])) { - // foo.4.bar selector unaffected by foo.bar modifier - return false; - } else if (numericKey(mod[j])) { - j++; - } else if (sel[i] === mod[j]) - i++, j++; - else - return false; - } - - // One is a prefix of another, taking numeric fields into account - return true; - }); - }); -}; - -getPathsWithoutNumericKeys = function (sel) { - return _.map(getPaths(sel), function (path) { - return _.reject(path.split('.'), numericKey).join('.'); - }); -}; - -// @param selector - Object: MongoDB selector. Currently doesn't support -// $-operators and arrays well. -// @param modifier - Object: MongoDB-styled modifier with `$set`s and `$unsets` -// only. (assumed to come from oplog) -// @returns - Boolean: if after applying the modifier, selector can start -// accepting the modified value. -LocalCollection._canSelectorBecomeTrueByModifier = function (selector, modifier) -{ - if (!LocalCollection._isSelectorAffectedByModifier(selector, modifier)) - return false; - - modifier = _.extend({$set:{}, $unset:{}}, modifier); - - if (_.any(_.keys(selector), pathHasNumericKeys) || - _.any(_.keys(modifier.$unset), pathHasNumericKeys) || - _.any(_.keys(modifier.$set), pathHasNumericKeys)) - return true; - - if (!isLiteralSelector(selector)) - return true; - - // convert a selector into an object matching the selector - // { 'a.b': { ans: 42 }, 'foo.bar': null, 'foo.baz': "something" } - // => { a: { b: { ans: 42 } }, foo: { bar: null, baz: "something" } } - var doc = pathsToTree(_.keys(selector), - function (path) { return selector[path]; }, - _.identity /*conflict resolution is no resolution*/); - - var selectorFn = LocalCollection._compileSelector(selector); - - try { - LocalCollection._modify(doc, modifier); - } catch (e) { - // Couldn't set a property on a field which is a scalar or null in the - // selector. - // Example: - // real document: { 'a.b': 3 } - // selector: { 'a': 12 } - // converted selector (ideal document): { 'a': 12 } - // modifier: { $set: { 'a.b': 4 } } - // We don't know what real document was like but from the error raised by - // $set on a scalar field we can reason that the structure of real document - // is completely different. - if (e.name === "MinimongoError" && e.setPropertyError) - return false; - throw e; - } - - return selectorFn(doc); -}; - -// Returns a list of key paths the given selector is looking for -var getPaths = MinimongoTest.getSelectorPaths = function (sel) { - return _.chain(sel).map(function (v, k) { - // we don't know how to handle $where because it can be anything - if (k === "$where") - return ''; // matches everything - // we branch from $or/$and/$nor operator - if (_.has(LOGICAL_OPERATORS, k)) - return _.map(v, getPaths); - // the value is a literal or some comparison operator - return k; - }).flatten().uniq().value(); -}; - -function pathHasNumericKeys (path) { - return _.any(path.split('.'), numericKey); -} - -// string can be converted to integer -function numericKey (s) { - return /^[0-9]+$/.test(s); -} - -function isLiteralSelector (selector) { - return _.all(selector, function (subSelector, keyPath) { - if (keyPath.substr(0, 1) === "$" || _.isRegExp(subSelector)) - return false; - if (!_.isObject(subSelector) || _.isArray(subSelector)) - return true; - return _.all(subSelector, function (value, key) { - return key.substr(0, 1) !== "$"; - }); - }); -} - diff --git a/packages/minimongo/selector_modifier.js b/packages/minimongo/selector_modifier.js new file mode 100644 index 0000000000..c8d49d65ba --- /dev/null +++ b/packages/minimongo/selector_modifier.js @@ -0,0 +1,137 @@ +// Returns true if the modifier applied to some document may change the result +// of matching the document by selector +// The modifier is always in a form of Object: +// - $set +// - 'a.b.22.z': value +// - 'foo.bar': 42 +// - $unset +// - 'abc.d': 1 +LocalCollection._isSelectorAffectedByModifier = function (selector, modifier) { + // safe check for $set/$unset being objects + modifier = _.extend({ $set: {}, $unset: {} }, modifier); + var modifiedPaths = _.keys(modifier.$set).concat(_.keys(modifier.$unset)); + var meaningfulPaths = getPaths(selector); + + return _.any(modifiedPaths, function (path) { + var mod = path.split('.'); + return _.any(meaningfulPaths, function (meaningfulPath) { + var sel = meaningfulPath.split('.'); + var i = 0, j = 0; + + while (i < sel.length && j < mod.length) { + if (numericKey(sel[i]) && numericKey(mod[j])) { + // foo.4.bar selector affected by foo.4 modifier + // foo.3.bar selector unaffected by foo.4 modifier + if (sel[i] === mod[j]) + i++, j++; + else + return false; + } else if (numericKey(sel[i])) { + // foo.4.bar selector unaffected by foo.bar modifier + return false; + } else if (numericKey(mod[j])) { + j++; + } else if (sel[i] === mod[j]) + i++, j++; + else + return false; + } + + // One is a prefix of another, taking numeric fields into account + return true; + }); + }); +}; + +getPathsWithoutNumericKeys = function (sel) { + return _.map(getPaths(sel), function (path) { + return _.reject(path.split('.'), numericKey).join('.'); + }); +}; + +// @param selector - Object: MongoDB selector. Currently doesn't support +// $-operators and arrays well. +// @param modifier - Object: MongoDB-styled modifier with `$set`s and `$unsets` +// only. (assumed to come from oplog) +// @returns - Boolean: if after applying the modifier, selector can start +// accepting the modified value. +LocalCollection._canSelectorBecomeTrueByModifier = function (selector, modifier) +{ + if (!LocalCollection._isSelectorAffectedByModifier(selector, modifier)) + return false; + + modifier = _.extend({$set:{}, $unset:{}}, modifier); + + if (_.any(_.keys(selector), pathHasNumericKeys) || + _.any(_.keys(modifier.$unset), pathHasNumericKeys) || + _.any(_.keys(modifier.$set), pathHasNumericKeys)) + return true; + + if (!isLiteralSelector(selector)) + return true; + + // convert a selector into an object matching the selector + // { 'a.b': { ans: 42 }, 'foo.bar': null, 'foo.baz': "something" } + // => { a: { b: { ans: 42 } }, foo: { bar: null, baz: "something" } } + var doc = pathsToTree(_.keys(selector), + function (path) { return selector[path]; }, + _.identity /*conflict resolution is no resolution*/); + + var selectorFn = LocalCollection._compileSelector(selector); + + try { + LocalCollection._modify(doc, modifier); + } catch (e) { + // Couldn't set a property on a field which is a scalar or null in the + // selector. + // Example: + // real document: { 'a.b': 3 } + // selector: { 'a': 12 } + // converted selector (ideal document): { 'a': 12 } + // modifier: { $set: { 'a.b': 4 } } + // We don't know what real document was like but from the error raised by + // $set on a scalar field we can reason that the structure of real document + // is completely different. + if (e.name === "MinimongoError" && e.setPropertyError) + return false; + throw e; + } + + return selectorFn(doc); +}; + +// Returns a list of key paths the given selector is looking for +var getPaths = MinimongoTest.getSelectorPaths = function (sel) { + return _.chain(sel).map(function (v, k) { + // we don't know how to handle $where because it can be anything + if (k === "$where") + return ''; // matches everything + // we branch from $or/$and/$nor operator + if (_.has(LOGICAL_OPERATORS, k)) + return _.map(v, getPaths); + // the value is a literal or some comparison operator + return k; + }).flatten().uniq().value(); +}; + +function pathHasNumericKeys (path) { + return _.any(path.split('.'), numericKey); +} + +// string can be converted to integer +function numericKey (s) { + return /^[0-9]+$/.test(s); +} + +function isLiteralSelector (selector) { + return _.all(selector, function (subSelector, keyPath) { + if (keyPath.substr(0, 1) === "$" || _.isRegExp(subSelector)) + return false; + if (!_.isObject(subSelector) || _.isArray(subSelector)) + return true; + return _.all(subSelector, function (value, key) { + return key.substr(0, 1) !== "$"; + }); + }); +} + diff --git a/packages/minimongo/selector_projection.js b/packages/minimongo/selector_projection.js new file mode 100644 index 0000000000..ece29b8470 --- /dev/null +++ b/packages/minimongo/selector_projection.js @@ -0,0 +1,58 @@ +// Knows how to combine a mongo selector and a fields projection to a new fields +// projection taking into account active fields from the passed selector. +// @returns Object - projection object (same as fields option of mongo cursor) +LocalCollection._combineSelectorAndProjection = function (selector, projection) +{ + var selectorPaths = getPathsWithoutNumericKeys(selector); + + // Special case for $where operator in the selector - projection should depend + // on all fields of the document. getSelectorPaths returns a list of paths + // selector depends on. If one of the paths is '' (empty string) representing + // the root or the whole document, complete projection should be returned. + if (_.contains(selectorPaths, '')) + return {}; + + var prjDetails = projectionDetails(projection); + var tree = prjDetails.tree; + var mergedProjection = {}; + + // merge the paths to include + tree = pathsToTree(selectorPaths, + function (path) { return true; }, + function (node, path, fullPath) { return true; }, + tree); + mergedProjection = treeToPaths(tree); + if (prjDetails.including) { + // both selector and projection are pointing on fields to include + // so we can just return the merged tree + return mergedProjection; + } else { + // selector is pointing at fields to include + // projection is pointing at fields to exclude + // make sure we don't exclude important paths + var mergedExclProjection = {}; + _.each(mergedProjection, function (incl, path) { + if (!incl) + mergedExclProjection[path] = false; + }); + + return mergedExclProjection; + } +}; + +// Returns a set of key paths similar to +// { 'foo.bar': 1, 'a.b.c': 1 } +var treeToPaths = function (tree, prefix) { + prefix = prefix || ''; + var result = {}; + + _.each(tree, function (val, key) { + if (_.isObject(val)) + _.extend(result, treeToPaths(val, prefix + key + '.')); + else + result[prefix + key] = val; + }); + + return result; +}; + From 97043de7e408d4cd312ed2c201a91ceed1f3a13c Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 26 Nov 2013 16:19:09 -0800 Subject: [PATCH 126/190] Move LOGICAL_OPERATORS back to file scope --- packages/minimongo/selector.js | 2 +- packages/minimongo/selector_modifier.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/minimongo/selector.js b/packages/minimongo/selector.js index d6410ad2fb..e077b9363c 100644 --- a/packages/minimongo/selector.js +++ b/packages/minimongo/selector.js @@ -95,7 +95,7 @@ var compileValueSelector = function (valueSelector, selector, cursor) { }; // XXX can factor out common logic below -LOGICAL_OPERATORS = { +var LOGICAL_OPERATORS = { "$and": function(subSelector, operators, cursor) { if (!isArray(subSelector) || _.isEmpty(subSelector)) throw Error("$and/$or/$nor must be nonempty array"); diff --git a/packages/minimongo/selector_modifier.js b/packages/minimongo/selector_modifier.js index c8d49d65ba..6e6a65f3b8 100644 --- a/packages/minimongo/selector_modifier.js +++ b/packages/minimongo/selector_modifier.js @@ -107,7 +107,7 @@ var getPaths = MinimongoTest.getSelectorPaths = function (sel) { if (k === "$where") return ''; // matches everything // we branch from $or/$and/$nor operator - if (_.has(LOGICAL_OPERATORS, k)) + if (_.contains(['$or', '$and', '$nor'], k)) return _.map(v, getPaths); // the value is a literal or some comparison operator return k; From 67f9ce8b8435b83754600bc1c79e3fa97b153553 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 16:27:19 -0800 Subject: [PATCH 127/190] stream ALL initial adds simplify internal state of ObserveMultiplexer --- packages/mongo-livedata/observe_multiplex.js | 90 +++++++------------- 1 file changed, 32 insertions(+), 58 deletions(-) diff --git a/packages/mongo-livedata/observe_multiplex.js b/packages/mongo-livedata/observe_multiplex.js index ecdea7a379..8aa41ac6eb 100644 --- a/packages/mongo-livedata/observe_multiplex.js +++ b/packages/mongo-livedata/observe_multiplex.js @@ -13,15 +13,7 @@ ObserveMultiplexer = function (options) { self._onStop = options.onStop || function () {}; self._queue = new Meteor._SynchronousQueue(); self._handles = {}; - self._ready = false; - self._becomingReady = false; self._readyFuture = new Future; - // Any handles added between creation and the first doc being added (or the - // cursor being made ready while empty) get special handling: their adds get - // delivered immediately instead of waiting for ready. This is so that new - // queries get their results streamed to the user rather than waiting until - // the whole query is done. - self._initialHandles = {}; self._cache = new LocalCollection._CachingChangeObserver({ ordered: options.ordered}); // Number of addHandleAndSendInitialAdds tasks scheduled but not yet @@ -54,15 +46,13 @@ _.extend(ObserveMultiplexer.prototype, { self._queue.runTask(function () { self._handles[handle._id] = handle; - if (self._ready) { - self._sendAdds(handle); - } else if (self._cache.docs.empty()) { - self._initialHandles[handle._id] = handle; - } + // Send out whatever adds we have so far (whether or not we the + // multiplexer is ready). + self._sendAdds(handle); --self._addHandleTasksScheduledButNotPerformed; }); // *outside* the task, since otherwise we'd deadlock - self._waitUntilReady(); + self._readyFuture.wait(); }, // Remove an observe handle. If it was the last observe handle, call the @@ -77,7 +67,7 @@ _.extend(ObserveMultiplexer.prototype, { // This should not be possible: you can only call removeHandle by having // access to the ObserveHandle, which isn't returned to user code until the // multiplex is ready. - if (!self._ready || self._initialHandles) + if (!self._ready()) throw new Error("Can't remove handles until the multiplex is ready"); delete self._handles[id]; @@ -92,55 +82,41 @@ _.extend(ObserveMultiplexer.prototype, { }, _stop: function () { var self = this; + // It shouldn't be possible for us to stop when all our handles still + // haven't been returned from observeChanges! + if (!self._ready()) + throw Error("surprising _stop: not ready"); + // Call stop callback (which kills the underlying process which sends us // callbacks and removes us from the connection's dictionary). self._onStop(); Package.facts && Package.facts.Facts.incrementServerFact( "mongo-livedata", "observe-multiplexers", -1); + // Cause future addHandleAndSendInitialAdds calls to throw (but the onStop // callback should make our connection forget about us). self._handles = null; - // It shouldn't be possible for us to stop when all our handles still - // haven't been returned from observeChanges! - if (!self._ready) - throw Error("surprising _stop: not ready"); - if (!self._readyFuture.isResolved()) - throw Error("surprising _stop: unresolved"); }, - _waitUntilReady: function (handle) { - var self = this; - self._readyFuture.wait(); - }, - // Sends initial adds to all the handles we know about so far. Does not block. + // Allows all addHandleAndSendInitialAdds calls to return, once all preceding + // adds have been processed. Does not block. ready: function () { var self = this; - self._becomingReady = true; self._queue.queueTask(function () { - if (self._ready) + if (self._ready()) throw Error("can't make ObserveMultiplex ready twice!"); - // We can assume that removeHandle isn't called during this loop because - // you can't stop a handle until the synchronous bit is done. (If it is, - // removeHandle will throw due to _ready being false.) - _.each(self._handles, function (handle, handleId) { - // If this was an "initial handle", we already sent its adds. - if (_.has(self._initialHandles, handleId)) - return; - self._sendAdds(handle); - }); - self._initialHandles = null; - self._becomingReady = false; - self._ready = true; self._readyFuture.return(); }); }, // Calls "cb" once the effects of all "ready", "addHandleAndSendInitialAdds" // and observe callbacks which came before this call have been propagated to - // all handles. + // all handles. "ready" must have already been called on this multiplexer. onFlush: function (cb) { var self = this; - if (!self._ready && !self._becomingReady) - throw Error("can only call onFlush on a multiplexer that will be ready"); - self._queue.queueTask(cb); + self._queue.queueTask(function () { + if (!self._ready()) + throw Error("only call onFlush on a multiplexer that will be ready"); + cb(); + }); }, callbackNames: function () { var self = this; @@ -149,6 +125,9 @@ _.extend(ObserveMultiplexer.prototype, { else return ["added", "changed", "removed"]; }, + _ready: function () { + return this._readyFuture.isResolved(); + }, _applyCallback: function (callbackName, args) { var self = this; self._queue.queueTask(function () { @@ -159,15 +138,11 @@ _.extend(ObserveMultiplexer.prototype, { // though. self._cache.applyChange[callbackName].apply(null, EJSON.clone(args)); - var handleIds = _.keys(self._handles); - // If we haven't finished the initial adds, then the only callbacks that - // we multiplex out are those to the "initial handles": handles that got - // added before any initial adds were received. (This allows us to stream - // the first handle's adds out rather than buffering them until ready().) - if (!self._ready) { - if (callbackName !== 'added' && callbackName !== 'addedBefore') - throw new Error("Got " + callbackName + " during initial adds"); - handleIds = _.keys(self._initialHandles); + // If we haven't finished the initial adds, then we should only be getting + // adds. + if (!self._ready() && + (callbackName !== 'added' && callbackName !== 'addedBefore')) { + throw new Error("Got " + callbackName + " during initial adds"); } // Now multiplex the callbacks out to all observe handles. It's OK if @@ -175,7 +150,7 @@ _.extend(ObserveMultiplexer.prototype, { // can continue until these are done. (But we do have to be careful to not // use a handle that got removed, because removeHandle does not use the // queue; thus, we iterate over an array of keys that we control.) - _.each(handleIds, function (handleId) { + _.each(_.keys(self._handles), function (handleId) { var handle = self._handles[handleId]; if (!handle) return; @@ -187,10 +162,9 @@ _.extend(ObserveMultiplexer.prototype, { }, // Sends initial adds to a handle. It should only be called from within a task - // (either the task that is processing the ready() call or the task that is - // processing the addHandleAndSendInitialAdds call). It synchronously invokes - // the handle's added or addedBefore; there's no need to flush the queue - // afterwards to ensure that the callbacks get out. + // (the task that is processing the addHandleAndSendInitialAdds call). It + // synchronously invokes the handle's added or addedBefore; there's no need to + // flush the queue afterwards to ensure that the callbacks get out. _sendAdds: function (handle) { var self = this; if (self._queue.safeToRunTask()) From 4fa09c32cdc5a63ade9038f0e773f8ddfe35fc58 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 16:31:18 -0800 Subject: [PATCH 128/190] extract MongoPollster to its own file --- packages/mongo-livedata/mongo_driver.js | 186 ---------------------- packages/mongo-livedata/mongo_pollster.js | 185 +++++++++++++++++++++ packages/mongo-livedata/package.js | 2 +- 3 files changed, 186 insertions(+), 187 deletions(-) create mode 100644 packages/mongo-livedata/mongo_pollster.js diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index f382ae9af9..e4010e9740 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -1231,192 +1231,6 @@ forEachTrigger = function (cursorDescription, triggerCallback) { } }; -var MongoPollster = function (cursorDescription, mongoHandle, ordered, - multiplexer, testOnlyPollCallback) { - var self = this; - - self._cursorDescription = cursorDescription; - self._mongoHandle = mongoHandle; - self._ordered = ordered; - self._multiplexer = multiplexer; - self._stopCallbacks = []; - self._stopped = false; - - // This constructor cannot yield, so we don't create the synchronousCursor yet - // (since that can yield). - self._synchronousCursor = null; - - // previous results snapshot. on each poll cycle, diffs against - // results drives the callbacks. - self._results = null; - - // The number of _pollMongo calls that have been added to self._taskQueue but - // have not started running. Used to make sure we never schedule more than one - // _pollMongo (other than possibly the one that is currently running). It's - // also used by _suspendPolling to pretend there's a poll scheduled. Usually, - // it's either 0 (for "no polls scheduled other than maybe one currently - // running") or 1 (for "a poll scheduled that isn't running yet"), but it can - // also be 2 if incremented by _suspendPolling. - self._pollsScheduledButNotStarted = 0; - self._pendingWrites = []; // people to notify when polling completes - - // Make sure to create a separately throttled function for each MongoPollster - // object. - self._ensurePollIsScheduled = _.throttle( - self._unthrottledEnsurePollIsScheduled, 50 /* ms */); - - // XXX figure out if we still need a queue - self._taskQueue = new Meteor._SynchronousQueue(); - - var listenersHandle = listenAll( - cursorDescription, function (notification, complete) { - // When someone does a transaction that might affect us, schedule a poll - // of the database. If that transaction happens inside of a write fence, - // block the fence until we've polled and notified observers. - var fence = DDPServer._CurrentWriteFence.get(); - if (fence) - self._pendingWrites.push(fence.beginWrite()); - // Ensure a poll is scheduled... but if we already know that one is, - // don't hit the throttled _ensurePollIsScheduled function (which might - // lead to us calling it unnecessarily in 50ms). - if (self._pollsScheduledButNotStarted === 0) - self._ensurePollIsScheduled(); - complete(); - } - ); - self._stopCallbacks.push(function () { listenersHandle.stop(); }); - - // every once and a while, poll even if we don't think we're dirty, for - // eventual consistency with database writes from outside the Meteor - // universe. - // - // For testing, there's an undocumented callback argument to observeChanges - // which disables time-based polling and gets called at the beginning of each - // poll. - if (testOnlyPollCallback) { - self._testOnlyPollCallback = testOnlyPollCallback; - } else { - var intervalHandle = Meteor.setInterval( - _.bind(self._ensurePollIsScheduled, self), 10 * 1000); - self._stopCallbacks.push(function () { - Meteor.clearInterval(intervalHandle); - }); - } - - // Make sure we actually poll soon! - self._unthrottledEnsurePollIsScheduled(); - - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "mongo-pollsters", 1); -}; - -_.extend(MongoPollster.prototype, { - // This is always called through _.throttle (except once at startup). - _unthrottledEnsurePollIsScheduled: function () { - var self = this; - if (self._pollsScheduledButNotStarted > 0) - return; - ++self._pollsScheduledButNotStarted; - self._taskQueue.queueTask(function () { - self._pollMongo(); - }); - }, - - // test-only interface for controlling polling. - // - // _suspendPolling blocks until any currently running and scheduled polls are - // done, and prevents any further polls from being scheduled. (new - // ObserveHandles can be added and receive their initial added callbacks, - // though.) - // - // _resumePolling immediately polls, and allows further polls to occur. - _suspendPolling: function() { - var self = this; - // Pretend that there's another poll scheduled (which will prevent - // _ensurePollIsScheduled from queueing any more polls). - ++self._pollsScheduledButNotStarted; - // Now block until all currently running or scheduled polls are done. - self._taskQueue.runTask(function() {}); - - // Confirm that there is only one "poll" (the fake one we're pretending to - // have) scheduled. - if (self._pollsScheduledButNotStarted !== 1) - throw new Error("_pollsScheduledButNotStarted is " + - self._pollsScheduledButNotStarted); - }, - _resumePolling: function() { - var self = this; - // We should be in the same state as in the end of _suspendPolling. - if (self._pollsScheduledButNotStarted !== 1) - throw new Error("_pollsScheduledButNotStarted is " + - self._pollsScheduledButNotStarted); - // Run a poll synchronously (which will counteract the - // ++_pollsScheduledButNotStarted from _suspendPolling). - self._taskQueue.runTask(function () { - self._pollMongo(); - }); - }, - - _pollMongo: function () { - var self = this; - --self._pollsScheduledButNotStarted; - - var first = false; - if (!self._results) { - first = true; - // XXX maybe use _IdMap/OrderedDict instead? - self._results = self.ordered ? [] : {}; - } - - self._testOnlyPollCallback && self._testOnlyPollCallback(); - - // Save the list of pending writes which this round will commit. - var writesForCycle = self._pendingWrites; - self._pendingWrites = []; - - // Get the new query results. (These calls can yield.) - if (self._synchronousCursor) { - self._synchronousCursor.rewind(); - } else { - self._synchronousCursor = self._mongoHandle._createSynchronousCursor( - self._cursorDescription); - } - var newResults = self._synchronousCursor.getRawObjects(self._ordered); - var oldResults = self._results; - - // Run diffs. (This can yield too.) - if (!self._stopped) { - LocalCollection._diffQueryChanges( - self._ordered, oldResults, newResults, self._multiplexer); - } - - // Replace self._results atomically. - self._results = newResults; - - // Signals the multiplexer to call all initial adds. - if (first) - self._multiplexer.ready(); - - // Once the ObserveMultiplexer has processed everything we've done in this - // round, mark all the writes which existed before this call as - // commmitted. (If new writes have shown up in the meantime, there'll - // already be another _pollMongo task scheduled.) - self._multiplexer.onFlush(function () { - _.each(writesForCycle, function (w) { - w.committed(); - }); - }); - }, - - stop: function () { - var self = this; - self._stopped = true; - _.each(self._stopCallbacks, function (c) { c(); }); - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "mongo-pollsters", -1); - } -}); - // observeChanges for tailable cursors on capped collections. // // Some differences from normal cursors: diff --git a/packages/mongo-livedata/mongo_pollster.js b/packages/mongo-livedata/mongo_pollster.js new file mode 100644 index 0000000000..b956d20cb5 --- /dev/null +++ b/packages/mongo-livedata/mongo_pollster.js @@ -0,0 +1,185 @@ +MongoPollster = function (cursorDescription, mongoHandle, ordered, + multiplexer, testOnlyPollCallback) { + var self = this; + + self._cursorDescription = cursorDescription; + self._mongoHandle = mongoHandle; + self._ordered = ordered; + self._multiplexer = multiplexer; + self._stopCallbacks = []; + self._stopped = false; + + // This constructor cannot yield, so we don't create the synchronousCursor yet + // (since that can yield). + self._synchronousCursor = null; + + // previous results snapshot. on each poll cycle, diffs against + // results drives the callbacks. + self._results = null; + + // The number of _pollMongo calls that have been added to self._taskQueue but + // have not started running. Used to make sure we never schedule more than one + // _pollMongo (other than possibly the one that is currently running). It's + // also used by _suspendPolling to pretend there's a poll scheduled. Usually, + // it's either 0 (for "no polls scheduled other than maybe one currently + // running") or 1 (for "a poll scheduled that isn't running yet"), but it can + // also be 2 if incremented by _suspendPolling. + self._pollsScheduledButNotStarted = 0; + self._pendingWrites = []; // people to notify when polling completes + + // Make sure to create a separately throttled function for each MongoPollster + // object. + self._ensurePollIsScheduled = _.throttle( + self._unthrottledEnsurePollIsScheduled, 50 /* ms */); + + // XXX figure out if we still need a queue + self._taskQueue = new Meteor._SynchronousQueue(); + + var listenersHandle = listenAll( + cursorDescription, function (notification, complete) { + // When someone does a transaction that might affect us, schedule a poll + // of the database. If that transaction happens inside of a write fence, + // block the fence until we've polled and notified observers. + var fence = DDPServer._CurrentWriteFence.get(); + if (fence) + self._pendingWrites.push(fence.beginWrite()); + // Ensure a poll is scheduled... but if we already know that one is, + // don't hit the throttled _ensurePollIsScheduled function (which might + // lead to us calling it unnecessarily in 50ms). + if (self._pollsScheduledButNotStarted === 0) + self._ensurePollIsScheduled(); + complete(); + } + ); + self._stopCallbacks.push(function () { listenersHandle.stop(); }); + + // every once and a while, poll even if we don't think we're dirty, for + // eventual consistency with database writes from outside the Meteor + // universe. + // + // For testing, there's an undocumented callback argument to observeChanges + // which disables time-based polling and gets called at the beginning of each + // poll. + if (testOnlyPollCallback) { + self._testOnlyPollCallback = testOnlyPollCallback; + } else { + var intervalHandle = Meteor.setInterval( + _.bind(self._ensurePollIsScheduled, self), 10 * 1000); + self._stopCallbacks.push(function () { + Meteor.clearInterval(intervalHandle); + }); + } + + // Make sure we actually poll soon! + self._unthrottledEnsurePollIsScheduled(); + + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "mongo-pollsters", 1); +}; + +_.extend(MongoPollster.prototype, { + // This is always called through _.throttle (except once at startup). + _unthrottledEnsurePollIsScheduled: function () { + var self = this; + if (self._pollsScheduledButNotStarted > 0) + return; + ++self._pollsScheduledButNotStarted; + self._taskQueue.queueTask(function () { + self._pollMongo(); + }); + }, + + // test-only interface for controlling polling. + // + // _suspendPolling blocks until any currently running and scheduled polls are + // done, and prevents any further polls from being scheduled. (new + // ObserveHandles can be added and receive their initial added callbacks, + // though.) + // + // _resumePolling immediately polls, and allows further polls to occur. + _suspendPolling: function() { + var self = this; + // Pretend that there's another poll scheduled (which will prevent + // _ensurePollIsScheduled from queueing any more polls). + ++self._pollsScheduledButNotStarted; + // Now block until all currently running or scheduled polls are done. + self._taskQueue.runTask(function() {}); + + // Confirm that there is only one "poll" (the fake one we're pretending to + // have) scheduled. + if (self._pollsScheduledButNotStarted !== 1) + throw new Error("_pollsScheduledButNotStarted is " + + self._pollsScheduledButNotStarted); + }, + _resumePolling: function() { + var self = this; + // We should be in the same state as in the end of _suspendPolling. + if (self._pollsScheduledButNotStarted !== 1) + throw new Error("_pollsScheduledButNotStarted is " + + self._pollsScheduledButNotStarted); + // Run a poll synchronously (which will counteract the + // ++_pollsScheduledButNotStarted from _suspendPolling). + self._taskQueue.runTask(function () { + self._pollMongo(); + }); + }, + + _pollMongo: function () { + var self = this; + --self._pollsScheduledButNotStarted; + + var first = false; + if (!self._results) { + first = true; + // XXX maybe use _IdMap/OrderedDict instead? + self._results = self.ordered ? [] : {}; + } + + self._testOnlyPollCallback && self._testOnlyPollCallback(); + + // Save the list of pending writes which this round will commit. + var writesForCycle = self._pendingWrites; + self._pendingWrites = []; + + // Get the new query results. (These calls can yield.) + if (self._synchronousCursor) { + self._synchronousCursor.rewind(); + } else { + self._synchronousCursor = self._mongoHandle._createSynchronousCursor( + self._cursorDescription); + } + var newResults = self._synchronousCursor.getRawObjects(self._ordered); + var oldResults = self._results; + + // Run diffs. (This can yield too.) + if (!self._stopped) { + LocalCollection._diffQueryChanges( + self._ordered, oldResults, newResults, self._multiplexer); + } + + // Replace self._results atomically. + self._results = newResults; + + // Signals the multiplexer to call all initial adds. + if (first) + self._multiplexer.ready(); + + // Once the ObserveMultiplexer has processed everything we've done in this + // round, mark all the writes which existed before this call as + // commmitted. (If new writes have shown up in the meantime, there'll + // already be another _pollMongo task scheduled.) + self._multiplexer.onFlush(function () { + _.each(writesForCycle, function (w) { + w.committed(); + }); + }); + }, + + stop: function () { + var self = this; + self._stopped = true; + _.each(self._stopCallbacks, function (c) { c(); }); + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "mongo-pollsters", -1); + } +}); diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 6cb718001a..2b0061ef15 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -43,7 +43,7 @@ Package.on_use(function (api) { api.export('MongoTest', 'server', {testOnly: true}); api.add_files(['doc_fetcher.js', 'mongo_driver.js', 'observe_multiplex.js', - 'oplog.js'], 'server'); + 'mongo_pollster.js', 'oplog.js'], 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); api.add_files('collection.js', ['client', 'server']); From a6fc84aed8b2f2223fb00c2e3dc06274a3b0999f Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 16:36:52 -0800 Subject: [PATCH 129/190] It's now OK for MongoPollster constructor to yield --- packages/mongo-livedata/mongo_pollster.js | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/packages/mongo-livedata/mongo_pollster.js b/packages/mongo-livedata/mongo_pollster.js index b956d20cb5..8152678b20 100644 --- a/packages/mongo-livedata/mongo_pollster.js +++ b/packages/mongo-livedata/mongo_pollster.js @@ -9,9 +9,8 @@ MongoPollster = function (cursorDescription, mongoHandle, ordered, self._stopCallbacks = []; self._stopped = false; - // This constructor cannot yield, so we don't create the synchronousCursor yet - // (since that can yield). - self._synchronousCursor = null; + self._synchronousCursor = self._mongoHandle._createSynchronousCursor( + self._cursorDescription); // previous results snapshot. on each poll cycle, diffs against // results drives the callbacks. @@ -142,12 +141,8 @@ _.extend(MongoPollster.prototype, { self._pendingWrites = []; // Get the new query results. (These calls can yield.) - if (self._synchronousCursor) { + if (!first) self._synchronousCursor.rewind(); - } else { - self._synchronousCursor = self._mongoHandle._createSynchronousCursor( - self._cursorDescription); - } var newResults = self._synchronousCursor.getRawObjects(self._ordered); var oldResults = self._results; From cf665da9d4fc21eb1c861346255d26e4c7fbb1f8 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 16:41:58 -0800 Subject: [PATCH 130/190] Update comments in observeChangesWithOplog --- packages/mongo-livedata/oplog.js | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index a8835beefb..9d967d104b 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -234,10 +234,8 @@ observeChangesWithOplog = function (cursorDescription, } )); - // observeChangesWithOplog cannot yield (because the manipulation of - // mongoHandle._observeMultiplexers needs to be yield-free); calling - // multiplexer.ready() is the equivalent of the observeChanges "synchronous" - // return. + // Give _observeChanges a chance to add the new ObserveHandle to our + // multiplexer, so that the added calls get streamed. Meteor.defer(function () { if (stopped) throw new Error("oplog stopped surprisingly early"); @@ -248,7 +246,7 @@ observeChangesWithOplog = function (cursorDescription, }); if (stopped) throw new Error("oplog stopped quite early"); - // Actually send out the initial adds to the ObserveHandles. + // Allow observeChanges calls to return. multiplexer.ready(); if (stopped) From 16cc4edc07cae007b4240500a90cc87c5255179e Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 17:05:43 -0800 Subject: [PATCH 131/190] make OplogTailer a real class --- packages/mongo-livedata/mongo_driver.js | 14 +- packages/mongo-livedata/mongo_pollster.js | 4 +- packages/mongo-livedata/oplog.js | 580 +++++++++++----------- 3 files changed, 312 insertions(+), 286 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index e4010e9740..e40d4cc3e2 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -842,8 +842,14 @@ MongoConnection.prototype._dropIndex = function (collectionName, index) { // ObserveMultiplexer allows multiple identical ObserveHandles to be driven by a // single low-level observe process such as a MongoPollster. // -// A MongoPollster caches the results of a query and reruns it when necessary. -// It is hooked up to an ObserveMultiplexer. +// There are two "observe implementations" which drive ObserveMultiplexers: +// - MongoPollster caches the results of a query and reruns it when +// necessary. +// - OplogTailer follows the Mongo operation log to directly observe +// database changes. +// Both implementations follow the same simple interface: when you create them, +// they start sending observeChanges callbacks (and a ready() invocation) to +// their ObserveMultiplexer, and you stop them by calling their stop() method. var CursorDescription = function (collectionName, selector, options) { var self = this; @@ -1168,15 +1174,15 @@ MongoConnection.prototype._observeChanges = function ( if (self._oplogHandle && !ordered && !callbacks._testOnlyPollCallback && cursorSupportedByOplogTailing(cursorDescription)) { // Can yield! - observeImplementation = observeChangesWithOplog( + observeImplementation = new OplogTailer( cursorDescription, self, multiplexer); } else { // Start polling. observeImplementation = new MongoPollster( cursorDescription, self, - ordered, multiplexer, + ordered, callbacks._testOnlyPollCallback); } diff --git a/packages/mongo-livedata/mongo_pollster.js b/packages/mongo-livedata/mongo_pollster.js index 8152678b20..6c81e1188d 100644 --- a/packages/mongo-livedata/mongo_pollster.js +++ b/packages/mongo-livedata/mongo_pollster.js @@ -1,5 +1,5 @@ -MongoPollster = function (cursorDescription, mongoHandle, ordered, - multiplexer, testOnlyPollCallback) { +MongoPollster = function (cursorDescription, mongoHandle, multiplexer, + ordered, testOnlyPollCallback) { var self = this; self._cursorDescription = cursorDescription; diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index 9d967d104b..a7016581dd 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -7,6 +7,306 @@ var PHASE = { STEADY: 3 }; +// OplogTailer is an alternative to MongoPollster which follows the Mongo +// operation log instead of just re-polling the query. It obeys the same simple +// interface: constructing it starts sending observeChanges callbacks (and a +// ready() invocation) to the ObserveMultiplexer, and you stop it by calling +// the stop() method. +OplogTailer = function (cursorDescription, mongoHandle, multiplexer) { + var self = this; + + self._cursorDescription = cursorDescription; + self._mongoHandle = mongoHandle; + self._multiplexer = multiplexer; + + self._stopped = false; + self._stopHandles = []; + + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "oplog-observers", 1); + + self._phase = PHASE.INITIALIZING; + + self._published = new LocalCollection._IdMap; + var selector = cursorDescription.selector; + self._selectorFn = LocalCollection._compileSelector(selector); + var projection = cursorDescription.options.fields || {}; + self._projectionFn = LocalCollection._compileProjection(projection); + // Projection function, result of combining important fields for selector and + // existing fields projection + var sharedProjection = LocalCollection._combineSelectorAndProjection( + selector, projection); + self._sharedProjectionFn = LocalCollection._compileProjection( + sharedProjection); + + self._needToFetch = new LocalCollection._IdMap; + self._currentlyFetching = new LocalCollection._IdMap; + + self._writesToCommitWhenWeReachSteady = []; + + forEachTrigger(cursorDescription, function (trigger) { + self._stopHandles.push(self._mongoHandle._oplogHandle.onOplogEntry( + trigger, function (notification) { + var op = notification.op; + if (op.op === 'c') { + // XXX actually, drop collection needs to be handled by doing a + // re-query + self._published.forEach(function (fields, id) { + self._remove(id); + }); + } else { + // All other operators should be handled depending on phase + if (self._phase === PHASE.INITIALIZING) + self._handleOplogEntryInitializing(op); + else + self._handleOplogEntrySteadyOrFetching(op); + } + } + )); + }); + + // XXX ordering w.r.t. everything else? + self._stopHandles.push(listenAll( + cursorDescription, function (notification, complete) { + // If we're not in a write fence, we don't have to do anything. + var fence = DDPServer._CurrentWriteFence.get(); + if (!fence) { + complete(); + return; + } + var write = fence.beginWrite(); + // This write cannot complete until we've caught up to "this point" in the + // oplog, and then made it back to the steady state. + Meteor.defer(complete); + self._mongoHandle._oplogHandle.waitUntilCaughtUp(); + if (self._stopped) { + // We're stopped, so just immediately commit. + write.committed(); + } else if (self._phase === PHASE.STEADY) { + // Make sure that all of the callbacks have made it through the + // multiplexer and been delivered to ObserveHandles before committing + // writes. + self._multiplexer.onFlush(function () { + write.committed(); + }); + } else { + self._writesToCommitWhenWeReachSteady.push(write); + } + } + )); + + // Give _observeChanges a chance to add the new ObserveHandle to our + // multiplexer, so that the added calls get streamed. + Meteor.defer(function () { + self._runInitialQuery(); + }); +}; + +_.extend(OplogTailer.prototype, { + _add: function (doc) { + var self = this; + var id = doc._id; + var fields = _.clone(doc); + delete fields._id; + if (self._published.has(id)) + throw Error("tried to add something already published " + id); + self._published.set(id, self._sharedProjectionFn(fields)); + self._multiplexer.added(id, self._projectionFn(fields)); + }, + _remove: function (id) { + var self = this; + if (!self._published.has(id)) + throw Error("tried to remove something unpublished " + id); + self._published.remove(id); + self._multiplexer.removed(id); + }, + _handleDoc: function (id, newDoc) { + var self = this; + newDoc = _.clone(newDoc); + var matchesNow = newDoc && self._selectorFn(newDoc); + var matchedBefore = self._published.has(id); + if (matchesNow && !matchedBefore) { + self._add(newDoc); + } else if (matchedBefore && !matchesNow) { + self._remove(id); + } else if (matchesNow) { + var oldDoc = self._published.get(id); + if (!oldDoc) + throw Error("thought that " + id + " was there!"); + delete newDoc._id; + self._published.set(id, self._sharedProjectionFn(newDoc)); + var changed = LocalCollection._makeChangedFields(_.clone(newDoc), oldDoc); + changed = self._projectionFn(changed); + if (!_.isEmpty(changed)) + self._multiplexer.changed(id, changed); + } + }, + _fetchModifiedDocuments: function () { + var self = this; + self._phase = PHASE.FETCHING; + while (!self._stopped && !self._needToFetch.empty()) { + if (self._phase !== PHASE.FETCHING) + throw new Error("phase in fetchModifiedDocuments: " + self._phase); + + self._currentlyFetching = self._needToFetch; + self._needToFetch = new LocalCollection._IdMap; + var waiting = 0; + var error = null; + var fut = new Future; + Fiber(function () { + self._currentlyFetching.forEach(function (cacheKey, id) { + // currentlyFetching will not be updated during this loop. + waiting++; + self._mongoHandle._docFetcher.fetch( + self._cursorDescription.collectionName, id, cacheKey, + function (err, doc) { + if (err) { + if (!error) + error = err; + } else if (!self._stopped) { + self._handleDoc(id, doc); + } + waiting--; + if (waiting == 0) + fut.return(); + }); + }); + }).run(); + fut.wait(); + if (error) + throw error; + self._currentlyFetching = new LocalCollection._IdMap; + } + self._beSteady(); + }, + _beSteady: function () { + var self = this; + self._phase = PHASE.STEADY; + var writes = self._writesToCommitWhenWeReachSteady; + self._writesToCommitWhenWeReachSteady = []; + self._multiplexer.onFlush(function () { + _.each(writes, function (w) { + w.committed(); + }); + }); + }, + _handleOplogEntryInitializing: function (op) { + var self = this; + self._needToFetch.set(idForOp(op), op.ts.toString()); + }, + _handleOplogEntrySteadyOrFetching: function (op) { + var self = this; + var id = idForOp(op); + // If we're already fetching this one, or about to, we can't optimize; make + // sure that we fetch it again if necessary. + if (self._currentlyFetching.has(id) || self._needToFetch.has(id)) { + if (self._phase !== PHASE.FETCHING) + throw Error("map not empty during steady phase"); + self._needToFetch.set(id, op.ts.toString()); + return; + } + + if (op.op === 'd') { + if (self._published.has(id)) + self._remove(id); + } else if (op.op === 'i') { + if (self._published.has(id)) + throw new Error("insert found for already-existing ID"); + + // XXX what if selector yields? for now it can't but later it could have + // $where + if (self._selectorFn(op.o)) + self._add(op.o); + } else if (op.op === 'u') { + // Is this a modifier ($set/$unset, which may require us to poll the + // database to figure out if the whole document matches the selector) or a + // replacement (in which case we can just directly re-evaluate the + // selector)? + var isReplace = !_.has(op.o, '$set') && !_.has(op.o, '$unset'); + + if (isReplace) { + self._handleDoc(id, _.extend({_id: id}, op.o)); + } else if (self._published.has(id)) { + // Oh great, we actually know what the document is, so we can apply + // this directly. + var newDoc = EJSON.clone(self._published.get(id)); + newDoc._id = id; + LocalCollection._modify(newDoc, op.o); + self._handleDoc(id, self._sharedProjectionFn(newDoc)); + } else if (LocalCollection._canSelectorBecomeTrueByModifier( + self._cursorDescription.selector, op.o)) { + self._needToFetch.set(id, op.ts.toString()); + if (self._phase === PHASE.STEADY) + self._fetchModifiedDocuments(); + } + } else { + throw Error("XXX SURPRISING OPERATION: " + op); + } + }, + _runInitialQuery: function () { + var self = this; + if (self._stopped) + throw new Error("oplog stopped surprisingly early"); + + var initialCursor = new Cursor(self._mongoHandle, self._cursorDescription); + initialCursor.forEach(function (initialDoc) { + self._add(initialDoc); + }); + if (self._stopped) + throw new Error("oplog stopped quite early"); + // Allow observeChanges calls to return. (After this, it's possible for + // stop() to be called.) + self._multiplexer.ready(); + + if (self._stopped) + return; + self._mongoHandle._oplogHandle.waitUntilCaughtUp(); + + if (self._stopped) + return; + if (self._phase !== PHASE.INITIALIZING) + throw Error("Phase unexpectedly " + self._phase); + + if (self._needToFetch.empty()) { + self._beSteady(); + } else { + self._fetchModifiedDocuments(); + } + }, + // This stop function is invoked from the onStop of the ObserveMultiplexer, so + // it shouldn't actually be possible to call it until the multiplexer is + // ready. + stop: function () { + var self = this; + if (self._stopped) + return; + self._stopped = true; + _.each(self._stopHandles, function (handle) { + handle.stop(); + }); + + // Note: we *don't* use multiplexer.onFlush here because this stop + // callback is actually invoked by the multiplexer itself when it has + // determined that there are no handles left. So nothing is actually going + // to get flushed (and it's probably not valid to call methods on the + // dying multiplexer). + _.each(self._writesToCommitWhenWeReachSteady, function (w) { + w.committed(); + }); + self._writesToCommitWhenWeReachSteady = null; + + // Proactively drop references to potentially big things. + self._published = null; + self._needToFetch = null; + self._currentlyFetching = null; + self._oplogEntryHandle = null; + self._listenersHandle = null; + + Package.facts && Package.facts.Facts.incrementServerFact( + "mongo-livedata", "oplog-observers", -1); + } +}); + idForOp = function (op) { if (op.op === 'd') return op.o._id; @@ -20,283 +320,3 @@ idForOp = function (op) { else throw Error("Unknown op: " + EJSON.stringify(op)); }; - -observeChangesWithOplog = function (cursorDescription, - mongoHandle, - multiplexer) { - var stopped = false; - var stopHandles = []; - - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "oplog-observers", 1); - - var phase = PHASE.INITIALIZING; - - var published = new LocalCollection._IdMap; - var selector = cursorDescription.selector; - var selectorFn = LocalCollection._compileSelector(selector); - var projection = cursorDescription.options.fields || {}; - var projectionFn = LocalCollection._compileProjection(projection); - // Projection function, result of combining important fields for selector and - // existing fields projection - var sharedProjection = LocalCollection._combineSelectorAndProjection(selector, projection); - var sharedProjectionFn = LocalCollection._compileProjection(sharedProjection); - - var needToFetch = new LocalCollection._IdMap; - var currentlyFetching = new LocalCollection._IdMap; - - var add = function (doc) { - var id = doc._id; - var fields = _.clone(doc); - delete fields._id; - if (published.has(id)) - throw Error("tried to add something already published " + id); - published.set(id, sharedProjectionFn(fields)); - multiplexer.added(id, projectionFn(fields)); - }; - - var remove = function (id) { - if (!published.has(id)) - throw Error("tried to remove something unpublished " + id); - published.remove(id); - multiplexer.removed(id); - }; - - var handleDoc = function (id, newDoc) { - newDoc = _.clone(newDoc); - var matchesNow = newDoc && selectorFn(newDoc); - var matchedBefore = published.has(id); - if (matchesNow && !matchedBefore) { - add(newDoc); - } else if (matchedBefore && !matchesNow) { - remove(id); - } else if (matchesNow) { - var oldDoc = published.get(id); - if (!oldDoc) - throw Error("thought that " + id + " was there!"); - delete newDoc._id; - published.set(id, sharedProjectionFn(newDoc)); - var changed = LocalCollection._makeChangedFields( - _.clone(newDoc), oldDoc); - changed = projectionFn(changed); - if (!_.isEmpty(changed)) - multiplexer.changed(id, changed); - } - }; - - var fetchModifiedDocuments = function () { - phase = PHASE.FETCHING; - while (!stopped && !needToFetch.empty()) { - if (phase !== PHASE.FETCHING) - throw new Error("Surprising phase in fetchModifiedDocuments: " + phase); - - currentlyFetching = needToFetch; - needToFetch = new LocalCollection._IdMap; - var waiting = 0; - var error = null; - var fut = new Future; - Fiber(function () { - currentlyFetching.forEach(function (cacheKey, id) { - // currentlyFetching will not be updated during this loop. - waiting++; - mongoHandle._docFetcher.fetch(cursorDescription.collectionName, id, cacheKey, function (err, doc) { - if (err) { - if (!error) - error = err; - } else if (!stopped) { - handleDoc(id, doc); - } - waiting--; - if (waiting == 0) - fut.return(); - }); - }); - }).run(); - fut.wait(); - if (error) - throw error; - currentlyFetching = new LocalCollection._IdMap; - } - beSteady(); - }; - - var writesToCommitWhenWeReachSteady = []; - var beSteady = function () { - phase = PHASE.STEADY; - var writes = writesToCommitWhenWeReachSteady; - writesToCommitWhenWeReachSteady = []; - multiplexer.onFlush(function () { - _.each(writes, function (w) { - w.committed(); - }); - }); - }; - - var oplogEntryHandlers = {}; - oplogEntryHandlers[PHASE.INITIALIZING] = function (op) { - needToFetch.set(idForOp(op), op.ts.toString()); - }; - // We can use the same handler for STEADY and FETCHING; the main difference is - // that FETCHING has non-empty currentlyFetching and/or needToFetch. - oplogEntryHandlers[PHASE.STEADY] = function (op) { - var id = idForOp(op); - // If we're already fetching this one, or about to, we can't optimize; make - // sure that we fetch it again if necessary. - if (currentlyFetching.has(id) || needToFetch.has(id)) { - if (phase !== PHASE.FETCHING) - throw Error("map not empty during steady phase"); - needToFetch.set(id, op.ts.toString()); - return; - } - - if (op.op === 'd') { - if (published.has(id)) - remove(id); - } else if (op.op === 'i') { - if (published.has(id)) - throw new Error("insert found for already-existing ID"); - - // XXX what if selector yields? for now it can't but later it could have - // $where - if (selectorFn(op.o)) - add(op.o); - } else if (op.op === 'u') { - // Is this a modifier ($set/$unset, which may require us to poll the - // database to figure out if the whole document matches the selector) or a - // replacement (in which case we can just directly re-evaluate the - // selector)? - var isReplace = !_.has(op.o, '$set') && !_.has(op.o, '$unset'); - - if (isReplace) { - handleDoc(id, _.extend({_id: id}, op.o)); - } else if (published.has(id)) { - // Oh great, we actually know what the document is, so we can apply - // this directly. - var newDoc = EJSON.clone(published.get(id)); - newDoc._id = id; - LocalCollection._modify(newDoc, op.o); - handleDoc(id, sharedProjectionFn(newDoc)); - } else if (LocalCollection._canSelectorBecomeTrueByModifier( - cursorDescription.selector, op.o)) { - needToFetch.set(id, op.ts.toString()); - if (phase === PHASE.STEADY) - fetchModifiedDocuments(); - return; - } - } else { - throw Error("XXX SURPRISING OPERATION: " + op); - } - }; - oplogEntryHandlers[PHASE.FETCHING] = oplogEntryHandlers[PHASE.STEADY]; - - forEachTrigger(cursorDescription, function (trigger) { - stopHandles.push(mongoHandle._oplogHandle.onOplogEntry( - trigger, function (notification) { - var op = notification.op; - if (op.op === 'c') { - // XXX actually, drop collection needs to be handled by doing a - // re-query - published.forEach(function (fields, id) { - remove(id); - }); - } else { - // All other operators should be handled depending on phase - oplogEntryHandlers[phase](op); - } - } - )); - }); - - // XXX ordering w.r.t. everything else? - stopHandles.push(listenAll( - cursorDescription, function (notification, complete) { - // If we're not in a write fence, we don't have to do anything. - var fence = DDPServer._CurrentWriteFence.get(); - if (!fence) { - complete(); - return; - } - var write = fence.beginWrite(); - // This write cannot complete until we've caught up to "this point" in the - // oplog, and then made it back to the steady state. - Meteor.defer(complete); - mongoHandle._oplogHandle.waitUntilCaughtUp(); - // Make sure that all of the callbacks have made it through the - // multiplexer and been delivered to ObserveHandles before committing - // writes. - if (stopped || phase === PHASE.STEADY) { - multiplexer.onFlush(function () { - write.committed(); - }); - } else { - writesToCommitWhenWeReachSteady.push(write); - } - } - )); - - // Give _observeChanges a chance to add the new ObserveHandle to our - // multiplexer, so that the added calls get streamed. - Meteor.defer(function () { - if (stopped) - throw new Error("oplog stopped surprisingly early"); - - var initialCursor = new Cursor(mongoHandle, cursorDescription); - initialCursor.forEach(function (initialDoc) { - add(initialDoc); - }); - if (stopped) - throw new Error("oplog stopped quite early"); - // Allow observeChanges calls to return. - multiplexer.ready(); - - if (stopped) - return; - mongoHandle._oplogHandle.waitUntilCaughtUp(); - - if (stopped) - return; - if (phase !== PHASE.INITIALIZING) - throw Error("Phase unexpectedly " + phase); - - if (needToFetch.empty()) { - beSteady(); - } else { - fetchModifiedDocuments(); - } - }); - - return { - // This stop function is invoked from the onStop of the ObserveMultiplexer, - // so it shouldn't actually be possible to call it until the multiplexer is - // ready. - stop: function () { - if (stopped) - return; - stopped = true; - _.each(stopHandles, function (handle) { - handle.stop(); - }); - - published = null; - selector = null; - needToFetch = null; - currentlyFetching = null; - - // Note: we *don't* use multiplexer.onFlush here because this stop - // callback is actually invoked by the multiplexer itself when it has - // determined that there are no handles left. So nothing is actually going - // to get flushed (and it's probably not valid to call methods on the - // dying multiplexer). - _.each(writesToCommitWhenWeReachSteady, function (w) { - w.committed(); - }); - writesToCommitWhenWeReachSteady = null; - - oplogEntryHandle = null; - listenersHandle = null; - - Package.facts && Package.facts.Facts.incrementServerFact( - "mongo-livedata", "oplog-observers", -1); - } - }; -}; From aeac87285e95f08e1a9ca88db94e8841f022bdd8 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 17:16:24 -0800 Subject: [PATCH 132/190] rename to {Mongo,Oplog}ObserveDriver Give a consistent constructor API betweent the two. --- packages/livedata/crossbar.js | 3 -- packages/mongo-livedata/mongo_driver.js | 36 +++++++++---------- .../mongo-livedata/mongo_livedata_tests.js | 8 ++--- packages/mongo-livedata/oplog.js | 33 +++++++++-------- packages/mongo-livedata/package.js | 2 +- .../{mongo_pollster.js => polling.js} | 25 +++++++------ 6 files changed, 52 insertions(+), 55 deletions(-) rename packages/mongo-livedata/{mongo_pollster.js => polling.js} (92%) diff --git a/packages/livedata/crossbar.js b/packages/livedata/crossbar.js index 342a5e0ae9..358ddd5a22 100644 --- a/packages/livedata/crossbar.js +++ b/packages/livedata/crossbar.js @@ -27,9 +27,6 @@ _.extend(DDPServer._Crossbar.prototype, { // // XXX It should be legal to call fire() from inside a listen() // callback? - // - // Note: the MongoPollster constructor assumes that a call to listen() never - // yields. listen: function (trigger, callback) { var self = this; var id = self.nextId++; diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index e40d4cc3e2..8145371a33 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -840,12 +840,12 @@ MongoConnection.prototype._dropIndex = function (collectionName, index) { // reference to an ObserveMultiplexer. // // ObserveMultiplexer allows multiple identical ObserveHandles to be driven by a -// single low-level observe process such as a MongoPollster. +// single observe driver. // -// There are two "observe implementations" which drive ObserveMultiplexers: -// - MongoPollster caches the results of a query and reruns it when +// There are two "observe drivers" which drive ObserveMultiplexers: +// - PollingObserveDriver caches the results of a query and reruns it when // necessary. -// - OplogTailer follows the Mongo operation log to directly observe +// - OplogObserveDriver follows the Mongo operation log to directly observe // database changes. // Both implementations follow the same simple interface: when you create them, // they start sending observeChanges callbacks (and a ready() invocation) to @@ -1145,7 +1145,7 @@ MongoConnection.prototype._observeChanges = function ( var observeKey = JSON.stringify( _.extend({ordered: ordered}, cursorDescription)); - var multiplexer, observeImplementation; + var multiplexer, observeDriver; var firstHandle = false; // Find a matching ObserveMultiplexer, or create a new one. This next block is @@ -1155,40 +1155,38 @@ MongoConnection.prototype._observeChanges = function ( if (_.has(self._observeMultiplexers, observeKey)) { multiplexer = self._observeMultiplexers[observeKey]; } else { + firstHandle = true; // Create a new ObserveMultiplexer. multiplexer = new ObserveMultiplexer({ ordered: ordered, onStop: function () { - observeImplementation.stop(); + observeDriver.stop(); delete self._observeMultiplexers[observeKey]; } }); self._observeMultiplexers[observeKey] = multiplexer; - firstHandle = true; } }); var observeHandle = new ObserveHandle(multiplexer, callbacks); if (firstHandle) { + var driverClass = PollingObserveDriver; if (self._oplogHandle && !ordered && !callbacks._testOnlyPollCallback && cursorSupportedByOplogTailing(cursorDescription)) { - // Can yield! - observeImplementation = new OplogTailer( - cursorDescription, self, multiplexer); - } else { - // Start polling. - observeImplementation = new MongoPollster( - cursorDescription, - self, - multiplexer, - ordered, - callbacks._testOnlyPollCallback); + driverClass = OplogObserveDriver; } + observeDriver = new driverClass({ + cursorDescription: cursorDescription, + mongoHandle: self, + multiplexer: multiplexer, + ordered: ordered, + _testOnlyPollCallback: callbacks._testOnlyPollCallback + }); // This field is only set for the first ObserveHandle in an // ObserveMultiplexer. It is only there for use by one test. - observeHandle._observeImplementation = observeImplementation; + observeHandle._observeDriver = observeDriver; } // Blocks until the initial adds have been sent. diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index ce32bb54e9..5c12cbfb10 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -387,8 +387,8 @@ Tinytest.addAsync("mongo-livedata - fuzz test, " + idGeneration, function(test, // run. if (Meteor.isServer) { // For now, has to be polling (not oplog). - test.isTrue(obs._observeImplementation); - test.isTrue(obs._observeImplementation._suspendPolling); + test.isTrue(obs._observeDriver); + test.isTrue(obs._observeDriver._suspendPolling); } var step = 0; @@ -423,7 +423,7 @@ Tinytest.addAsync("mongo-livedata - fuzz test, " + idGeneration, function(test, finishObserve(function () { if (Meteor.isServer) - obs._observeImplementation._suspendPolling(); + obs._observeDriver._suspendPolling(); // Do a batch of 1-10 operations var batch_count = rnd(10) + 1; @@ -456,7 +456,7 @@ Tinytest.addAsync("mongo-livedata - fuzz test, " + idGeneration, function(test, } } if (Meteor.isServer) - obs._observeImplementation._resumePolling(); + obs._observeDriver._resumePolling(); }); diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index a7016581dd..f603152a85 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -7,17 +7,19 @@ var PHASE = { STEADY: 3 }; -// OplogTailer is an alternative to MongoPollster which follows the Mongo -// operation log instead of just re-polling the query. It obeys the same simple -// interface: constructing it starts sending observeChanges callbacks (and a -// ready() invocation) to the ObserveMultiplexer, and you stop it by calling -// the stop() method. -OplogTailer = function (cursorDescription, mongoHandle, multiplexer) { +// OplogObserveDriver is an alternative to PollingObserveDriver which follows +// the Mongo operation log instead of just re-polling the query. It obeys the +// same simple interface: constructing it starts sending observeChanges +// callbacks (and a ready() invocation) to the ObserveMultiplexer, and you stop +// it by calling the stop() method. +OplogObserveDriver = function (options) { var self = this; - self._cursorDescription = cursorDescription; - self._mongoHandle = mongoHandle; - self._multiplexer = multiplexer; + self._cursorDescription = options.cursorDescription; + self._mongoHandle = options.mongoHandle; + self._multiplexer = options.multiplexer; + if (options.ordered) + throw Error("OplogObserveDriver only supports unordered observeChanges"); self._stopped = false; self._stopHandles = []; @@ -28,9 +30,10 @@ OplogTailer = function (cursorDescription, mongoHandle, multiplexer) { self._phase = PHASE.INITIALIZING; self._published = new LocalCollection._IdMap; - var selector = cursorDescription.selector; - self._selectorFn = LocalCollection._compileSelector(selector); - var projection = cursorDescription.options.fields || {}; + var selector = self._cursorDescription.selector; + self._selectorFn = LocalCollection._compileSelector( + self._cursorDescription.selector); + var projection = self._cursorDescription.options.fields || {}; self._projectionFn = LocalCollection._compileProjection(projection); // Projection function, result of combining important fields for selector and // existing fields projection @@ -44,7 +47,7 @@ OplogTailer = function (cursorDescription, mongoHandle, multiplexer) { self._writesToCommitWhenWeReachSteady = []; - forEachTrigger(cursorDescription, function (trigger) { + forEachTrigger(self._cursorDescription, function (trigger) { self._stopHandles.push(self._mongoHandle._oplogHandle.onOplogEntry( trigger, function (notification) { var op = notification.op; @@ -67,7 +70,7 @@ OplogTailer = function (cursorDescription, mongoHandle, multiplexer) { // XXX ordering w.r.t. everything else? self._stopHandles.push(listenAll( - cursorDescription, function (notification, complete) { + self._cursorDescription, function (notification, complete) { // If we're not in a write fence, we don't have to do anything. var fence = DDPServer._CurrentWriteFence.get(); if (!fence) { @@ -102,7 +105,7 @@ OplogTailer = function (cursorDescription, mongoHandle, multiplexer) { }); }; -_.extend(OplogTailer.prototype, { +_.extend(OplogObserveDriver.prototype, { _add: function (doc) { var self = this; var id = doc._id; diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 2b0061ef15..5553acc731 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -43,7 +43,7 @@ Package.on_use(function (api) { api.export('MongoTest', 'server', {testOnly: true}); api.add_files(['doc_fetcher.js', 'mongo_driver.js', 'observe_multiplex.js', - 'mongo_pollster.js', 'oplog.js'], 'server'); + 'polling.js', 'oplog.js'], 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); api.add_files('collection.js', ['client', 'server']); diff --git a/packages/mongo-livedata/mongo_pollster.js b/packages/mongo-livedata/polling.js similarity index 92% rename from packages/mongo-livedata/mongo_pollster.js rename to packages/mongo-livedata/polling.js index 6c81e1188d..938798e519 100644 --- a/packages/mongo-livedata/mongo_pollster.js +++ b/packages/mongo-livedata/polling.js @@ -1,11 +1,10 @@ -MongoPollster = function (cursorDescription, mongoHandle, multiplexer, - ordered, testOnlyPollCallback) { +PollingObserveDriver = function (options) { var self = this; - self._cursorDescription = cursorDescription; - self._mongoHandle = mongoHandle; - self._ordered = ordered; - self._multiplexer = multiplexer; + self._cursorDescription = options.cursorDescription; + self._mongoHandle = options.mongoHandle; + self._ordered = options.ordered; + self._multiplexer = options.multiplexer; self._stopCallbacks = []; self._stopped = false; @@ -26,8 +25,8 @@ MongoPollster = function (cursorDescription, mongoHandle, multiplexer, self._pollsScheduledButNotStarted = 0; self._pendingWrites = []; // people to notify when polling completes - // Make sure to create a separately throttled function for each MongoPollster - // object. + // Make sure to create a separately throttled function for each + // PollingObserveDriver object. self._ensurePollIsScheduled = _.throttle( self._unthrottledEnsurePollIsScheduled, 50 /* ms */); @@ -35,7 +34,7 @@ MongoPollster = function (cursorDescription, mongoHandle, multiplexer, self._taskQueue = new Meteor._SynchronousQueue(); var listenersHandle = listenAll( - cursorDescription, function (notification, complete) { + self._cursorDescription, function (notification, complete) { // When someone does a transaction that might affect us, schedule a poll // of the database. If that transaction happens inside of a write fence, // block the fence until we've polled and notified observers. @@ -59,8 +58,8 @@ MongoPollster = function (cursorDescription, mongoHandle, multiplexer, // For testing, there's an undocumented callback argument to observeChanges // which disables time-based polling and gets called at the beginning of each // poll. - if (testOnlyPollCallback) { - self._testOnlyPollCallback = testOnlyPollCallback; + if (options._testOnlyPollCallback) { + self._testOnlyPollCallback = options._testOnlyPollCallback; } else { var intervalHandle = Meteor.setInterval( _.bind(self._ensurePollIsScheduled, self), 10 * 1000); @@ -76,7 +75,7 @@ MongoPollster = function (cursorDescription, mongoHandle, multiplexer, "mongo-livedata", "mongo-pollsters", 1); }; -_.extend(MongoPollster.prototype, { +_.extend(PollingObserveDriver.prototype, { // This is always called through _.throttle (except once at startup). _unthrottledEnsurePollIsScheduled: function () { var self = this; @@ -131,7 +130,7 @@ _.extend(MongoPollster.prototype, { if (!self._results) { first = true; // XXX maybe use _IdMap/OrderedDict instead? - self._results = self.ordered ? [] : {}; + self._results = self._ordered ? [] : {}; } self._testOnlyPollCallback && self._testOnlyPollCallback(); From 4ff3004e3caf25facbbb77b2e2910b05552054d4 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 17:32:38 -0800 Subject: [PATCH 133/190] move more oplog stuff out of mongo_driver.js --- packages/mongo-livedata/doc_fetcher.js | 2 + packages/mongo-livedata/mongo_driver.js | 252 +---------------------- packages/mongo-livedata/oplog.js | 34 +++ packages/mongo-livedata/oplog_tailing.js | 211 +++++++++++++++++++ packages/mongo-livedata/oplog_tests.js | 4 +- packages/mongo-livedata/package.js | 4 +- 6 files changed, 255 insertions(+), 252 deletions(-) create mode 100644 packages/mongo-livedata/oplog_tailing.js diff --git a/packages/mongo-livedata/doc_fetcher.js b/packages/mongo-livedata/doc_fetcher.js index cdcbfb9cd2..86f7e82cf7 100644 --- a/packages/mongo-livedata/doc_fetcher.js +++ b/packages/mongo-livedata/doc_fetcher.js @@ -57,3 +57,5 @@ _.extend(DocFetcher.prototype, { }).run(); } }); + +MongoTest.DocFetcher = DocFetcher; diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 8145371a33..37adfe0704 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -13,6 +13,7 @@ var Fiber = Npm.require('fibers'); var Future = Npm.require(path.join('fibers', 'future')); MongoInternals = {}; +MongoTest = {}; var replaceNames = function (filter, thing) { if (typeof thing === "object") { @@ -228,216 +229,6 @@ MongoConnection.prototype._maybeBeginWrite = function () { return {committed: function () {}}; }; -var OPLOG_COLLECTION = 'oplog.rs'; - -// Like Perl's quotemeta: quotes all regexp metacharacters. See -// https://github.com/substack/quotemeta/blob/master/index.js -// XXX this is duplicated with accounts_server.js -var quotemeta = function (str) { - return String(str).replace(/(\W)/g, '\\$1'); -}; - -var showTS = function (ts) { - return "Timestamp(" + ts.getHighBits() + ", " + ts.getLowBits() + ")"; -}; - -MongoConnection.prototype._startOplogTailing = function (oplogUrl, - dbNameFuture) { - var self = this; - - var oplogLastEntryConnection = null; - var oplogTailConnection = null; - var stopped = false; - var tailHandle = null; - var readyFuture = new Future(); - var crossbar = new DDPServer._Crossbar({ - factPackage: "mongo-livedata", factName: "oplog-watchers" - }); - var lastProcessedTS = null; - // Lazily calculate the basic selector. Don't call baseOplogSelector() at the - // top level of this function, because we don't want this function to block. - var baseOplogSelector = _.once(function () { - return { - ns: new RegExp('^' + quotemeta(dbNameFuture.wait()) + '\\.'), - $or: [ - { op: {$in: ['i', 'u', 'd']} }, - // If it is not db.collection.drop(), ignore it - { op: 'c', 'o.drop': { $exists: true } }] - }; - }); - // XXX doc - var catchingUpFutures = []; - - self._oplogHandle = { - stop: function () { - if (stopped) - return; - stopped = true; - if (tailHandle) - tailHandle.stop(); - // XXX should close connections too - }, - - onOplogEntry: function (trigger, callback) { - if (stopped) - throw new Error("Called onOplogEntry on stopped handle!"); - - // Calling onOplogEntry requires us to wait for the tailing to be ready. - readyFuture.wait(); - - var originalCallback = callback; - callback = Meteor.bindEnvironment(function (notification, onComplete) { - // XXX can we avoid this clone by making oplog.js careful? - try { - originalCallback(EJSON.clone(notification)); - } finally { - onComplete(); - } - }, function (err) { - Meteor._debug("Error in oplog callback", err.stack); - }); - var listenHandle = crossbar.listen(trigger, callback); - return { - stop: function () { - listenHandle.stop(); - } - }; - }, - - // Calls `callback` once the oplog has been processed up to a point that is - // roughly "now": specifically, once we've processed all ops that are - // currently visible. - // XXX become convinced that this is actually safe even if oplogConnection - // is some kind of pool - waitUntilCaughtUp: function () { - if (stopped) - throw new Error("Called waitUntilCaughtUp on stopped handle!"); - - // Calling waitUntilCaughtUp requries us to wait for the oplog connection - // to be ready. - readyFuture.wait(); - - // We need to make the selector at least as restrictive as the actual - // tailing selector (ie, we need to specify the DB name) or else we - // might find a TS that won't show up in the actual tail stream. - var lastEntry = oplogLastEntryConnection.findOne( - OPLOG_COLLECTION, baseOplogSelector(), - {fields: {ts: 1}, sort: {$natural: -1}}); - - if (!lastEntry) { - // Really, nothing in the oplog? Well, we've processed everything. - return; - } - - var ts = lastEntry.ts; - if (!ts) - throw Error("oplog entry without ts: " + EJSON.stringify(lastEntry)); - - if (lastProcessedTS && ts.lessThanOrEqual(lastProcessedTS)) { - // We've already caught up to here. - return; - } - - var insertAfter = catchingUpFutures.length; - while (insertAfter - 1 > 0 - && catchingUpFutures[insertAfter - 1].ts.greaterThan(ts)) { - insertAfter--; - } - - // XXX this can occur if we fail over from one primary to another. so - // this check needs to be removed before we merge oplog. that said, it - // has been helpful so far at proving that we are properly using - // poolSize 1. Also, we could keep something like it if we could - // actually detect failover; see - // https://github.com/mongodb/node-mongodb-native/issues/1120 - if (insertAfter !== catchingUpFutures.length) { - throw Error("found misordered oplog: " - + showTS(_.last(catchingUpFutures).ts) + " vs " - + showTS(ts)); - } - var f = new Future; - catchingUpFutures.splice(insertAfter, 0, {ts: ts, future: f}); - f.wait(); - } - }; - - // Setting up the connections and tail handler is a blocking operation, so we - // do it "later". - Meteor.defer(function () { - // We make two separate connections to Mongo. The Node Mongo driver - // implements a naive round-robin connection pool: each "connection" is a - // pool of several (5 by default) TCP connections, and each request is - // rotated through the pools. Tailable cursor queries block on the server - // until there is some data to return (or until a few seconds have - // passed). So if the connection pool used for tailing cursors is the same - // pool used for other queries, the other queries will be delayed by seconds - // 1/5 of the time. - // - // The tail connection will only ever be running a single tail command, so - // it only needs to make one underlying TCP connection. - oplogTailConnection = new MongoConnection(oplogUrl, {poolSize: 1}); - // XXX better docs, but: it's to get monotonic results - // XXX is it safe to say "if there's an in flight query, just use its - // results"? I don't think so but should consider that - oplogLastEntryConnection = new MongoConnection(oplogUrl, {poolSize: 1}); - - // Find the last oplog entry. Blocks until the connection is ready. - var lastOplogEntry = oplogLastEntryConnection.findOne( - OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); - - var dbName = dbNameFuture.wait(); - - var oplogSelector = _.clone(baseOplogSelector()); - if (lastOplogEntry) { - // Start after the last entry that currently exists. - oplogSelector.ts = {$gt: lastOplogEntry.ts}; - // If there are any calls to callWhenProcessedLatest before any other - // oplog entries show up, allow callWhenProcessedLatest to call its - // callback immediately. - lastProcessedTS = lastOplogEntry.ts; - } - - var cursorDescription = new CursorDescription( - OPLOG_COLLECTION, oplogSelector, {tailable: true}); - - tailHandle = oplogTailConnection.tail(cursorDescription, function (doc) { - if (!(doc.ns && doc.ns.length > dbName.length + 1 && - doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) - throw new Error("Unexpected ns"); - - var trigger = {collection: doc.ns.substr(dbName.length + 1), - dropCollection: false, - op: doc}; - - // Is it a special command and the collection name is hidden somewhere in - // operator? - if (trigger.collection === "$cmd") { - trigger.collection = doc.o.drop; - trigger.dropCollection = true; - trigger.id = null; - } else { - // All other ops have an id. - trigger.id = idForOp(doc); - } - - var f = new Future; - crossbar.fire(trigger, f.resolver()); - f.wait(); - - // Now that we've processed this operation, process pending sequencers. - if (!doc.ts) - throw Error("oplog entry without ts: " + EJSON.stringify(doc)); - lastProcessedTS = doc.ts; - while (!_.isEmpty(catchingUpFutures) - && catchingUpFutures[0].ts.lessThanOrEqual(lastProcessedTS)) { - var sequencer = catchingUpFutures.shift(); - sequencer.future.return(); - } - }); - readyFuture.return(); - }); -}; - //////////// Public API ////////// @@ -851,7 +642,7 @@ MongoConnection.prototype._dropIndex = function (collectionName, index) { // they start sending observeChanges callbacks (and a ready() invocation) to // their ObserveMultiplexer, and you stop them by calling their stop() method. -var CursorDescription = function (collectionName, selector, options) { +CursorDescription = function (collectionName, selector, options) { var self = this; self.collectionName = collectionName; self.selector = Meteor.Collection._rewriteSelector(selector); @@ -1173,7 +964,7 @@ MongoConnection.prototype._observeChanges = function ( if (firstHandle) { var driverClass = PollingObserveDriver; if (self._oplogHandle && !ordered && !callbacks._testOnlyPollCallback - && cursorSupportedByOplogTailing(cursorDescription)) { + && OplogObserveDriver.cursorSupported(cursorDescription)) { driverClass = OplogObserveDriver; } observeDriver = new driverClass({ @@ -1289,38 +1080,6 @@ MongoConnection.prototype._observeChangesTailable = function ( }); }; -// Does our oplog tailing code support this cursor? For now, we are being very -// conservative and allowing only simple queries with simple options. -var cursorSupportedByOplogTailing = function (cursorDescription) { - // First, check the options. - var options = cursorDescription.options; - - // This option (which are mostly used for sorted cursors) require us to figure - // out where a given document fits in an order to know if it's included or - // not, and we don't track that information when doing oplog tailing. - if (options.limit || options.skip) return false; - - // For now, we're just dealing with equality queries: no $operators, regexps, - // or $and/$or/$where/etc clauses. We can expand the scope of what we're - // comfortable processing later. ($where will get pretty scary since it will - // allow selector processing to yield!) - return _.all(cursorDescription.selector, function (value, field) { - // No logical operators like $and. - if (field.substr(0, 1) === '$') - return false; - // We only allow scalars, not sub-documents or $operators or RegExp. - // XXX Date would be easy too, though I doubt anyone is doing equality - // lookups on dates - return typeof value === "string" || - typeof value === "number" || - typeof value === "boolean" || - value === null || - value instanceof Meteor.Collection.ObjectID; - }); -}; - - - // XXX We probably need to find a better way to expose this. Right now // it's only used by tests, but in fact you need it in normal // operation to interact with capped collections (eg, Galaxy uses it). @@ -1328,8 +1087,3 @@ MongoInternals.MongoTimestamp = MongoDB.Timestamp; MongoInternals.Connection = MongoConnection; MongoInternals.NpmModule = MongoDB; - -MongoTest = { - cursorSupportedByOplogTailing: cursorSupportedByOplogTailing, - DocFetcher: DocFetcher -}; diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog.js index f603152a85..ffad9d2b28 100644 --- a/packages/mongo-livedata/oplog.js +++ b/packages/mongo-livedata/oplog.js @@ -310,6 +310,38 @@ _.extend(OplogObserveDriver.prototype, { } }); +// Does our oplog tailing code support this cursor? For now, we are being very +// conservative and allowing only simple queries with simple options. +// (This is a "static method".) +OplogObserveDriver.cursorSupported = function (cursorDescription) { + // First, check the options. + var options = cursorDescription.options; + + // This option (which are mostly used for sorted cursors) require us to figure + // out where a given document fits in an order to know if it's included or + // not, and we don't track that information when doing oplog tailing. + if (options.limit || options.skip) return false; + + // For now, we're just dealing with equality queries: no $operators, regexps, + // or $and/$or/$where/etc clauses. We can expand the scope of what we're + // comfortable processing later. ($where will get pretty scary since it will + // allow selector processing to yield!) + return _.all(cursorDescription.selector, function (value, field) { + // No logical operators like $and. + if (field.substr(0, 1) === '$') + return false; + // We only allow scalars, not sub-documents or $operators or RegExp. + // XXX Date would be easy too, though I doubt anyone is doing equality + // lookups on dates + return typeof value === "string" || + typeof value === "number" || + typeof value === "boolean" || + value === null || + value instanceof Meteor.Collection.ObjectID; + }); +}; + + idForOp = function (op) { if (op.op === 'd') return op.o._id; @@ -323,3 +355,5 @@ idForOp = function (op) { else throw Error("Unknown op: " + EJSON.stringify(op)); }; + +MongoTest.OplogObserveDriver = OplogObserveDriver; diff --git a/packages/mongo-livedata/oplog_tailing.js b/packages/mongo-livedata/oplog_tailing.js new file mode 100644 index 0000000000..3b0a9d5e7a --- /dev/null +++ b/packages/mongo-livedata/oplog_tailing.js @@ -0,0 +1,211 @@ +var Future = Npm.require('fibers/future'); + +var OPLOG_COLLECTION = 'oplog.rs'; + +// Like Perl's quotemeta: quotes all regexp metacharacters. See +// https://github.com/substack/quotemeta/blob/master/index.js +// XXX this is duplicated with accounts_server.js +var quotemeta = function (str) { + return String(str).replace(/(\W)/g, '\\$1'); +}; + +var showTS = function (ts) { + return "Timestamp(" + ts.getHighBits() + ", " + ts.getLowBits() + ")"; +}; + +MongoConnection.prototype._startOplogTailing = function (oplogUrl, + dbNameFuture) { + var self = this; + + var oplogLastEntryConnection = null; + var oplogTailConnection = null; + var stopped = false; + var tailHandle = null; + var readyFuture = new Future(); + var crossbar = new DDPServer._Crossbar({ + factPackage: "mongo-livedata", factName: "oplog-watchers" + }); + var lastProcessedTS = null; + // Lazily calculate the basic selector. Don't call baseOplogSelector() at the + // top level of this function, because we don't want this function to block. + var baseOplogSelector = _.once(function () { + return { + ns: new RegExp('^' + quotemeta(dbNameFuture.wait()) + '\\.'), + $or: [ + { op: {$in: ['i', 'u', 'd']} }, + // If it is not db.collection.drop(), ignore it + { op: 'c', 'o.drop': { $exists: true } }] + }; + }); + // XXX doc + var catchingUpFutures = []; + + self._oplogHandle = { + stop: function () { + if (stopped) + return; + stopped = true; + if (tailHandle) + tailHandle.stop(); + // XXX should close connections too + }, + + onOplogEntry: function (trigger, callback) { + if (stopped) + throw new Error("Called onOplogEntry on stopped handle!"); + + // Calling onOplogEntry requires us to wait for the tailing to be ready. + readyFuture.wait(); + + var originalCallback = callback; + callback = Meteor.bindEnvironment(function (notification, onComplete) { + // XXX can we avoid this clone by making oplog.js careful? + try { + originalCallback(EJSON.clone(notification)); + } finally { + onComplete(); + } + }, function (err) { + Meteor._debug("Error in oplog callback", err.stack); + }); + var listenHandle = crossbar.listen(trigger, callback); + return { + stop: function () { + listenHandle.stop(); + } + }; + }, + + // Calls `callback` once the oplog has been processed up to a point that is + // roughly "now": specifically, once we've processed all ops that are + // currently visible. + // XXX become convinced that this is actually safe even if oplogConnection + // is some kind of pool + waitUntilCaughtUp: function () { + if (stopped) + throw new Error("Called waitUntilCaughtUp on stopped handle!"); + + // Calling waitUntilCaughtUp requries us to wait for the oplog connection + // to be ready. + readyFuture.wait(); + + // We need to make the selector at least as restrictive as the actual + // tailing selector (ie, we need to specify the DB name) or else we + // might find a TS that won't show up in the actual tail stream. + var lastEntry = oplogLastEntryConnection.findOne( + OPLOG_COLLECTION, baseOplogSelector(), + {fields: {ts: 1}, sort: {$natural: -1}}); + + if (!lastEntry) { + // Really, nothing in the oplog? Well, we've processed everything. + return; + } + + var ts = lastEntry.ts; + if (!ts) + throw Error("oplog entry without ts: " + EJSON.stringify(lastEntry)); + + if (lastProcessedTS && ts.lessThanOrEqual(lastProcessedTS)) { + // We've already caught up to here. + return; + } + + var insertAfter = catchingUpFutures.length; + while (insertAfter - 1 > 0 + && catchingUpFutures[insertAfter - 1].ts.greaterThan(ts)) { + insertAfter--; + } + + // XXX this can occur if we fail over from one primary to another. so + // this check needs to be removed before we merge oplog. that said, it + // has been helpful so far at proving that we are properly using + // poolSize 1. Also, we could keep something like it if we could + // actually detect failover; see + // https://github.com/mongodb/node-mongodb-native/issues/1120 + if (insertAfter !== catchingUpFutures.length) { + throw Error("found misordered oplog: " + + showTS(_.last(catchingUpFutures).ts) + " vs " + + showTS(ts)); + } + var f = new Future; + catchingUpFutures.splice(insertAfter, 0, {ts: ts, future: f}); + f.wait(); + } + }; + + // Setting up the connections and tail handler is a blocking operation, so we + // do it "later". + Meteor.defer(function () { + // We make two separate connections to Mongo. The Node Mongo driver + // implements a naive round-robin connection pool: each "connection" is a + // pool of several (5 by default) TCP connections, and each request is + // rotated through the pools. Tailable cursor queries block on the server + // until there is some data to return (or until a few seconds have + // passed). So if the connection pool used for tailing cursors is the same + // pool used for other queries, the other queries will be delayed by seconds + // 1/5 of the time. + // + // The tail connection will only ever be running a single tail command, so + // it only needs to make one underlying TCP connection. + oplogTailConnection = new MongoConnection(oplogUrl, {poolSize: 1}); + // XXX better docs, but: it's to get monotonic results + // XXX is it safe to say "if there's an in flight query, just use its + // results"? I don't think so but should consider that + oplogLastEntryConnection = new MongoConnection(oplogUrl, {poolSize: 1}); + + // Find the last oplog entry. Blocks until the connection is ready. + var lastOplogEntry = oplogLastEntryConnection.findOne( + OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); + + var dbName = dbNameFuture.wait(); + + var oplogSelector = _.clone(baseOplogSelector()); + if (lastOplogEntry) { + // Start after the last entry that currently exists. + oplogSelector.ts = {$gt: lastOplogEntry.ts}; + // If there are any calls to callWhenProcessedLatest before any other + // oplog entries show up, allow callWhenProcessedLatest to call its + // callback immediately. + lastProcessedTS = lastOplogEntry.ts; + } + + var cursorDescription = new CursorDescription( + OPLOG_COLLECTION, oplogSelector, {tailable: true}); + + tailHandle = oplogTailConnection.tail(cursorDescription, function (doc) { + if (!(doc.ns && doc.ns.length > dbName.length + 1 && + doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) + throw new Error("Unexpected ns"); + + var trigger = {collection: doc.ns.substr(dbName.length + 1), + dropCollection: false, + op: doc}; + + // Is it a special command and the collection name is hidden somewhere in + // operator? + if (trigger.collection === "$cmd") { + trigger.collection = doc.o.drop; + trigger.dropCollection = true; + trigger.id = null; + } else { + // All other ops have an id. + trigger.id = idForOp(doc); + } + + var f = new Future; + crossbar.fire(trigger, f.resolver()); + f.wait(); + + // Now that we've processed this operation, process pending sequencers. + if (!doc.ts) + throw Error("oplog entry without ts: " + EJSON.stringify(doc)); + lastProcessedTS = doc.ts; + while (!_.isEmpty(catchingUpFutures) + && catchingUpFutures[0].ts.lessThanOrEqual(lastProcessedTS)) { + var sequencer = catchingUpFutures.shift(); + sequencer.future.return(); + } + }); + readyFuture.return(); + }); +}; diff --git a/packages/mongo-livedata/oplog_tests.js b/packages/mongo-livedata/oplog_tests.js index d8f27c7727..dc403c3766 100644 --- a/packages/mongo-livedata/oplog_tests.js +++ b/packages/mongo-livedata/oplog_tests.js @@ -1,10 +1,10 @@ var OplogCollection = new Meteor.Collection("oplog-" + Random.id()); -Tinytest.add("mongo-livedata - oplog - cursorSupportedByOplogTailing", function (test) { +Tinytest.add("mongo-livedata - oplog - cursorSupported", function (test) { var supported = function (expected, selector) { var cursor = OplogCollection.find(selector); test.equal( - MongoTest.cursorSupportedByOplogTailing(cursor._cursorDescription), + MongoTest.OplogObserveDriver.cursorSupported(cursor._cursorDescription), expected); }; diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 5553acc731..498b9ef64f 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -42,7 +42,9 @@ Package.on_use(function (api) { // For tests only. api.export('MongoTest', 'server', {testOnly: true}); - api.add_files(['doc_fetcher.js', 'mongo_driver.js', 'observe_multiplex.js', + api.add_files(['mongo_driver.js', 'oplog_tailing.js', + 'observe_multiplex.js', 'doc_fetcher.js', + // XXX rename to have _observe_driver 'polling.js', 'oplog.js'], 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); From 9abfffbb66060a927a5f763c672f57ae1d29b4ea Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 17:33:46 -0800 Subject: [PATCH 134/190] add "_observe_driver" to filenames --- packages/mongo-livedata/{oplog.js => oplog_observe_driver.js} | 0 packages/mongo-livedata/package.js | 4 ++-- .../mongo-livedata/{polling.js => polling_observe_driver.js} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename packages/mongo-livedata/{oplog.js => oplog_observe_driver.js} (100%) rename packages/mongo-livedata/{polling.js => polling_observe_driver.js} (100%) diff --git a/packages/mongo-livedata/oplog.js b/packages/mongo-livedata/oplog_observe_driver.js similarity index 100% rename from packages/mongo-livedata/oplog.js rename to packages/mongo-livedata/oplog_observe_driver.js diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 498b9ef64f..1144aefaa1 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -44,8 +44,8 @@ Package.on_use(function (api) { api.add_files(['mongo_driver.js', 'oplog_tailing.js', 'observe_multiplex.js', 'doc_fetcher.js', - // XXX rename to have _observe_driver - 'polling.js', 'oplog.js'], 'server'); + 'polling_observe_driver.js','oplog_observe_driver.js'], + 'server'); api.add_files('local_collection_driver.js', ['client', 'server']); api.add_files('remote_collection_driver.js', 'server'); api.add_files('collection.js', ['client', 'server']); diff --git a/packages/mongo-livedata/polling.js b/packages/mongo-livedata/polling_observe_driver.js similarity index 100% rename from packages/mongo-livedata/polling.js rename to packages/mongo-livedata/polling_observe_driver.js From 1189a7ae42baf4c0a5ed3e34fac641ed5ee18d57 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Tue, 26 Nov 2013 17:43:29 -0800 Subject: [PATCH 135/190] make connection._oplogHandle into a classy object --- packages/mongo-livedata/mongo_driver.js | 2 +- .../mongo-livedata/oplog_observe_driver.js | 14 - packages/mongo-livedata/oplog_tailing.js | 318 ++++++++++-------- 3 files changed, 172 insertions(+), 162 deletions(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 37adfe0704..ac5ba5c897 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -165,7 +165,7 @@ MongoConnection = function (url, options) { self._withDb(function (db) { dbNameFuture.return(db.databaseName); }); - self._startOplogTailing(options.oplogUrl, dbNameFuture); + self._oplogHandle = new OplogHandle(options.oplogUrl, dbNameFuture); } }; diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index ffad9d2b28..160182b0d0 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -342,18 +342,4 @@ OplogObserveDriver.cursorSupported = function (cursorDescription) { }; -idForOp = function (op) { - if (op.op === 'd') - return op.o._id; - else if (op.op === 'i') - return op.o._id; - else if (op.op === 'u') - return op.o2._id; - else if (op.op === 'c') - throw Error("Operator 'c' doesn't supply an object with id: " + - EJSON.stringify(op)); - else - throw Error("Unknown op: " + EJSON.stringify(op)); -}; - MongoTest.OplogObserveDriver = OplogObserveDriver; diff --git a/packages/mongo-livedata/oplog_tailing.js b/packages/mongo-livedata/oplog_tailing.js index 3b0a9d5e7a..66d6874e65 100644 --- a/packages/mongo-livedata/oplog_tailing.js +++ b/packages/mongo-livedata/oplog_tailing.js @@ -13,24 +13,40 @@ var showTS = function (ts) { return "Timestamp(" + ts.getHighBits() + ", " + ts.getLowBits() + ")"; }; -MongoConnection.prototype._startOplogTailing = function (oplogUrl, - dbNameFuture) { - var self = this; +idForOp = function (op) { + if (op.op === 'd') + return op.o._id; + else if (op.op === 'i') + return op.o._id; + else if (op.op === 'u') + return op.o2._id; + else if (op.op === 'c') + throw Error("Operator 'c' doesn't supply an object with id: " + + EJSON.stringify(op)); + else + throw Error("Unknown op: " + EJSON.stringify(op)); +}; - var oplogLastEntryConnection = null; - var oplogTailConnection = null; - var stopped = false; - var tailHandle = null; - var readyFuture = new Future(); - var crossbar = new DDPServer._Crossbar({ +OplogHandle = function (oplogUrl, dbNameFuture) { + var self = this; + self._oplogUrl = oplogUrl; + self._dbNameFuture = dbNameFuture; + + self._oplogLastEntryConnection = null; + self._oplogTailConnection = null; + self._stopped = false; + self._tailHandle = null; + self._readyFuture = new Future(); + self._crossbar = new DDPServer._Crossbar({ factPackage: "mongo-livedata", factName: "oplog-watchers" }); - var lastProcessedTS = null; - // Lazily calculate the basic selector. Don't call baseOplogSelector() at the - // top level of this function, because we don't want this function to block. - var baseOplogSelector = _.once(function () { + self._lastProcessedTS = null; + // Lazily calculate the basic selector. Don't call _baseOplogSelector() at the + // top level of the constructor, because we don't want the constructor to + // block. Note that the _.once is per-handle. + self._baseOplogSelector = _.once(function () { return { - ns: new RegExp('^' + quotemeta(dbNameFuture.wait()) + '\\.'), + ns: new RegExp('^' + quotemeta(self._dbNameFuture.wait()) + '\\.'), $or: [ { op: {$in: ['i', 'u', 'd']} }, // If it is not db.collection.drop(), ignore it @@ -38,104 +54,108 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, }; }); // XXX doc - var catchingUpFutures = []; - - self._oplogHandle = { - stop: function () { - if (stopped) - return; - stopped = true; - if (tailHandle) - tailHandle.stop(); - // XXX should close connections too - }, - - onOplogEntry: function (trigger, callback) { - if (stopped) - throw new Error("Called onOplogEntry on stopped handle!"); - - // Calling onOplogEntry requires us to wait for the tailing to be ready. - readyFuture.wait(); - - var originalCallback = callback; - callback = Meteor.bindEnvironment(function (notification, onComplete) { - // XXX can we avoid this clone by making oplog.js careful? - try { - originalCallback(EJSON.clone(notification)); - } finally { - onComplete(); - } - }, function (err) { - Meteor._debug("Error in oplog callback", err.stack); - }); - var listenHandle = crossbar.listen(trigger, callback); - return { - stop: function () { - listenHandle.stop(); - } - }; - }, - - // Calls `callback` once the oplog has been processed up to a point that is - // roughly "now": specifically, once we've processed all ops that are - // currently visible. - // XXX become convinced that this is actually safe even if oplogConnection - // is some kind of pool - waitUntilCaughtUp: function () { - if (stopped) - throw new Error("Called waitUntilCaughtUp on stopped handle!"); - - // Calling waitUntilCaughtUp requries us to wait for the oplog connection - // to be ready. - readyFuture.wait(); - - // We need to make the selector at least as restrictive as the actual - // tailing selector (ie, we need to specify the DB name) or else we - // might find a TS that won't show up in the actual tail stream. - var lastEntry = oplogLastEntryConnection.findOne( - OPLOG_COLLECTION, baseOplogSelector(), - {fields: {ts: 1}, sort: {$natural: -1}}); - - if (!lastEntry) { - // Really, nothing in the oplog? Well, we've processed everything. - return; - } - - var ts = lastEntry.ts; - if (!ts) - throw Error("oplog entry without ts: " + EJSON.stringify(lastEntry)); - - if (lastProcessedTS && ts.lessThanOrEqual(lastProcessedTS)) { - // We've already caught up to here. - return; - } - - var insertAfter = catchingUpFutures.length; - while (insertAfter - 1 > 0 - && catchingUpFutures[insertAfter - 1].ts.greaterThan(ts)) { - insertAfter--; - } - - // XXX this can occur if we fail over from one primary to another. so - // this check needs to be removed before we merge oplog. that said, it - // has been helpful so far at proving that we are properly using - // poolSize 1. Also, we could keep something like it if we could - // actually detect failover; see - // https://github.com/mongodb/node-mongodb-native/issues/1120 - if (insertAfter !== catchingUpFutures.length) { - throw Error("found misordered oplog: " - + showTS(_.last(catchingUpFutures).ts) + " vs " - + showTS(ts)); - } - var f = new Future; - catchingUpFutures.splice(insertAfter, 0, {ts: ts, future: f}); - f.wait(); - } - }; + self._catchingUpFutures = []; // Setting up the connections and tail handler is a blocking operation, so we // do it "later". Meteor.defer(function () { + self._startTailing(); + }); +}; + +_.extend(OplogHandle.prototype, { + stop: function () { + var self = this; + if (self._stopped) + return; + self._stopped = true; + if (self._tailHandle) + self._tailHandle.stop(); + // XXX should close connections too + }, + onOplogEntry: function (trigger, callback) { + var self = this; + if (self._stopped) + throw new Error("Called onOplogEntry on stopped handle!"); + + // Calling onOplogEntry requires us to wait for the tailing to be ready. + self._readyFuture.wait(); + + var originalCallback = callback; + callback = Meteor.bindEnvironment(function (notification, onComplete) { + // XXX can we avoid this clone by making oplog.js careful? + try { + originalCallback(EJSON.clone(notification)); + } finally { + onComplete(); + } + }, function (err) { + Meteor._debug("Error in oplog callback", err.stack); + }); + var listenHandle = self._crossbar.listen(trigger, callback); + return { + stop: function () { + listenHandle.stop(); + } + }; + }, + // Calls `callback` once the oplog has been processed up to a point that is + // roughly "now": specifically, once we've processed all ops that are + // currently visible. + // XXX become convinced that this is actually safe even if oplogConnection + // is some kind of pool + waitUntilCaughtUp: function () { + var self = this; + if (self._stopped) + throw new Error("Called waitUntilCaughtUp on stopped handle!"); + + // Calling waitUntilCaughtUp requries us to wait for the oplog connection to + // be ready. + self._readyFuture.wait(); + + // We need to make the selector at least as restrictive as the actual + // tailing selector (ie, we need to specify the DB name) or else we might + // find a TS that won't show up in the actual tail stream. + var lastEntry = self._oplogLastEntryConnection.findOne( + OPLOG_COLLECTION, self._baseOplogSelector(), + {fields: {ts: 1}, sort: {$natural: -1}}); + + if (!lastEntry) { + // Really, nothing in the oplog? Well, we've processed everything. + return; + } + + var ts = lastEntry.ts; + if (!ts) + throw Error("oplog entry without ts: " + EJSON.stringify(lastEntry)); + + if (self._lastProcessedTS && ts.lessThanOrEqual(self._lastProcessedTS)) { + // We've already caught up to here. + return; + } + + var insertAfter = self._catchingUpFutures.length; + while (insertAfter - 1 > 0 + && self._catchingUpFutures[insertAfter - 1].ts.greaterThan(ts)) { + insertAfter--; + } + + // XXX this can occur if we fail over from one primary to another. so this + // check needs to be removed before we merge oplog. that said, it has been + // helpful so far at proving that we are properly using poolSize 1. Also, we + // could keep something like it if we could actually detect failover; see + // https://github.com/mongodb/node-mongodb-native/issues/1120 + if (insertAfter !== self._catchingUpFutures.length) { + throw Error("found misordered oplog: " + + showTS(_.last(self._catchingUpFutures).ts) + " vs " + + showTS(ts)); + } + var f = new Future; + self._catchingUpFutures.splice(insertAfter, 0, {ts: ts, future: f}); + f.wait(); + }, + _startTailing: function () { + var self = this; // We make two separate connections to Mongo. The Node Mongo driver // implements a naive round-robin connection pool: each "connection" is a // pool of several (5 by default) TCP connections, and each request is @@ -147,65 +167,69 @@ MongoConnection.prototype._startOplogTailing = function (oplogUrl, // // The tail connection will only ever be running a single tail command, so // it only needs to make one underlying TCP connection. - oplogTailConnection = new MongoConnection(oplogUrl, {poolSize: 1}); + self._oplogTailConnection = new MongoConnection( + self._oplogUrl, {poolSize: 1}); // XXX better docs, but: it's to get monotonic results // XXX is it safe to say "if there's an in flight query, just use its // results"? I don't think so but should consider that - oplogLastEntryConnection = new MongoConnection(oplogUrl, {poolSize: 1}); + self._oplogLastEntryConnection = new MongoConnection( + self._oplogUrl, {poolSize: 1}); // Find the last oplog entry. Blocks until the connection is ready. - var lastOplogEntry = oplogLastEntryConnection.findOne( + var lastOplogEntry = self._oplogLastEntryConnection.findOne( OPLOG_COLLECTION, {}, {sort: {$natural: -1}}); - var dbName = dbNameFuture.wait(); + var dbName = self._dbNameFuture.wait(); - var oplogSelector = _.clone(baseOplogSelector()); + var oplogSelector = _.clone(self._baseOplogSelector()); if (lastOplogEntry) { // Start after the last entry that currently exists. oplogSelector.ts = {$gt: lastOplogEntry.ts}; // If there are any calls to callWhenProcessedLatest before any other // oplog entries show up, allow callWhenProcessedLatest to call its // callback immediately. - lastProcessedTS = lastOplogEntry.ts; + self._lastProcessedTS = lastOplogEntry.ts; } var cursorDescription = new CursorDescription( OPLOG_COLLECTION, oplogSelector, {tailable: true}); - tailHandle = oplogTailConnection.tail(cursorDescription, function (doc) { - if (!(doc.ns && doc.ns.length > dbName.length + 1 && - doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) - throw new Error("Unexpected ns"); + self._tailHandle = self._oplogTailConnection.tail( + cursorDescription, function (doc) { + if (!(doc.ns && doc.ns.length > dbName.length + 1 && + doc.ns.substr(0, dbName.length + 1) === (dbName + '.'))) + throw new Error("Unexpected ns"); - var trigger = {collection: doc.ns.substr(dbName.length + 1), - dropCollection: false, - op: doc}; + var trigger = {collection: doc.ns.substr(dbName.length + 1), + dropCollection: false, + op: doc}; - // Is it a special command and the collection name is hidden somewhere in - // operator? - if (trigger.collection === "$cmd") { - trigger.collection = doc.o.drop; - trigger.dropCollection = true; - trigger.id = null; - } else { - // All other ops have an id. - trigger.id = idForOp(doc); - } + // Is it a special command and the collection name is hidden somewhere + // in operator? + if (trigger.collection === "$cmd") { + trigger.collection = doc.o.drop; + trigger.dropCollection = true; + trigger.id = null; + } else { + // All other ops have an id. + trigger.id = idForOp(doc); + } - var f = new Future; - crossbar.fire(trigger, f.resolver()); - f.wait(); + var f = new Future; + self._crossbar.fire(trigger, f.resolver()); + f.wait(); - // Now that we've processed this operation, process pending sequencers. - if (!doc.ts) - throw Error("oplog entry without ts: " + EJSON.stringify(doc)); - lastProcessedTS = doc.ts; - while (!_.isEmpty(catchingUpFutures) - && catchingUpFutures[0].ts.lessThanOrEqual(lastProcessedTS)) { - var sequencer = catchingUpFutures.shift(); - sequencer.future.return(); - } - }); - readyFuture.return(); - }); -}; + // Now that we've processed this operation, process pending sequencers. + if (!doc.ts) + throw Error("oplog entry without ts: " + EJSON.stringify(doc)); + self._lastProcessedTS = doc.ts; + while (!_.isEmpty(self._catchingUpFutures) + && self._catchingUpFutures[0].ts.lessThanOrEqual( + self._lastProcessedTS)) { + var sequencer = self._catchingUpFutures.shift(); + sequencer.future.return(); + } + }); + self._readyFuture.return(); + } +}); From 5314ffedd3de5298e5150aab25e9081b5c6d233f Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 3 Dec 2013 13:39:50 -0800 Subject: [PATCH 136/190] Properly handle projections where '_id' is the only rule. + Tests. Fixes #1651 Conflicts: packages/minimongo/minimongo.js --- packages/minimongo/minimongo_tests.js | 24 ++++++++++++++++++++++++ packages/minimongo/projection.js | 9 ++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 0c8dc6e7cd..9e276bbc8e 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -992,6 +992,30 @@ Tinytest.add("minimongo - projection_compiler", function (test) { "blacklist nested - path not found in doc"] ]); + testProjection({ _id: 1 }, [ + [{ _id: 42, x: 1, y: { z: "2" } }, + { _id: 42 }, + "_id whitelisted"], + [{ _id: 33 }, + { _id: 33 }, + "_id whitelisted, _id only"], + [{ x: 1 }, + {}, + "_id whitelisted, no _id"] + ]); + + testProjection({ _id: 0 }, [ + [{ _id: 42, x: 1, y: { z: "2" } }, + { x: 1, y: { z: "2" } }, + "_id blacklisted"], + [{ _id: 33 }, + {}, + "_id blacklisted, _id only"], + [{ x: 1 }, + { x: 1 }, + "_id blacklisted, no _id"] + ]); + test.throws(function () { testProjection({ 'inc': 1, 'excl': 0 }, [ [ { inc: 42, excl: 42 }, { inc: 42 }, "Can't combine incl/excl rules" ] diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index a26ebc85c4..1c7b1472fb 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -61,7 +61,14 @@ projectionDetails = function (fields) { // Find the non-_id keys (_id is handled specially because it is included unless // explicitly excluded). Sort the keys, so that our code to detect overlaps // like 'foo' and 'foo.bar' can assume that 'foo' comes first. - var fieldsKeys = _.reject(_.keys(fields).sort(), function (key) { return key === '_id'; }); + var fieldsKeys = _.keys(fields).sort(); + + // If there are other rules other than '_id', treat '_id' differently in a + // separate case. If '_id' is the only rule, use it to understand if it is + // including/excluding projection. + if (fieldsKeys.length > 0 && !(fieldsKeys.length === 1 && fieldsKeys[0] === '_id')) + fieldsKeys = _.reject(fieldsKeys, function (key) { return key === '_id'; }); + var including = null; // Unknown _.each(fieldsKeys, function (keyPath) { From 001d2811d22c25adf5a43d24888d9f9500afb777 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 3 Dec 2013 14:59:22 -0800 Subject: [PATCH 137/190] Throw on unsupported fields projection --- packages/minimongo/minimongo_tests.js | 12 ++++++++++++ packages/minimongo/projection.js | 14 ++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index 9e276bbc8e..ce2cc52e52 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -1116,6 +1116,18 @@ Tinytest.add("minimongo - fetch with fields", function (test) { if (!i) return; test.isTrue(x.i === arr[i-1].i + 1); }); + + // Temporary unsupported operators + // queries are taken from MongoDB docs examples + test.throws(function () { + c.find({}, { fields: { 'grades.$': 1 } }); + }); + test.throws(function () { + c.find({}, { fields: { grades: { $elemMatch: { mean: 70 } } } }); + }); + test.throws(function () { + c.find({}, { fields: { grades: { $slice: [20, 10] } } }); + }); }); Tinytest.add("minimongo - fetch with projection, subarrays", function (test) { diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 1c7b1472fb..c3b8bca066 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -6,6 +6,11 @@ // according to projection rules. Doesn't retain subfields // of passed argument. LocalCollection._compileProjection = function (fields) { + // XXX: $-operators are not supported in fields projections yet + if (! LocalCollection._supportedProjection(fields)) + throw MinimongoError("Minimongo doesn't support fields projections " + + "with $-operators yet"); + var _idProjection = _.isUndefined(fields._id) ? true : fields._id; var details = projectionDetails(fields); @@ -159,3 +164,12 @@ pathsToTree = function (paths, newLeafFn, conflictFn, tree) { return tree; }; +LocalCollection._supportedProjection = function (fields) { + return _.all(fields, function (val, keyPath) { + if (_.contains(keyPath.split('.'), '$')) + return false; + return !_.isObject(val) || + (!_.has(val, '$slice') && !_.has(val, '$elemMatch')); + }); +}; + From fb2886d88ad2b9dbaf086b9abdca5e72de439bb4 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 3 Dec 2013 15:27:47 -0800 Subject: [PATCH 138/190] Don't use oplog tailing for queries with unsupported fields projection --- packages/mongo-livedata/oplog_observe_driver.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 160182b0d0..a44a0e918d 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -322,6 +322,11 @@ OplogObserveDriver.cursorSupported = function (cursorDescription) { // not, and we don't track that information when doing oplog tailing. if (options.limit || options.skip) return false; + // If a fields projection option is given check if it is supported by + // minimongo (some operators are not supported). + if (options.fields && !LocalCollection._supportedProjection(options.fields)) + return false; + // For now, we're just dealing with equality queries: no $operators, regexps, // or $and/$or/$where/etc clauses. We can expand the scope of what we're // comfortable processing later. ($where will get pretty scary since it will From eeea007645541514b5666d1d24dc32b92cb59800 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 3 Dec 2013 17:28:07 -0800 Subject: [PATCH 139/190] Move checks around --- packages/minimongo/projection.js | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index c3b8bca066..b95ea7c623 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -6,6 +6,9 @@ // according to projection rules. Doesn't retain subfields // of passed argument. LocalCollection._compileProjection = function (fields) { + if (!_.isObject(fields)) + throw MinimongoError("fields option must be an object"); + // XXX: $-operators are not supported in fields projections yet if (! LocalCollection._supportedProjection(fields)) throw MinimongoError("Minimongo doesn't support fields projections " @@ -56,13 +59,6 @@ LocalCollection._compileProjection = function (fields) { // (exception for '_id' as it is a special case handled separately) // - including - Boolean - "take only certain fields" type of projection projectionDetails = function (fields) { - if (!_.isObject(fields)) - throw MinimongoError("fields option must be an object"); - - if (_.any(_.values(fields), function (x) { - return _.indexOf([1, 0, true, false], x) === -1; })) - throw MinimongoError("Projection values should be one of 1, 0, true, or false"); - // Find the non-_id keys (_id is handled specially because it is included unless // explicitly excluded). Sort the keys, so that our code to detect overlaps // like 'foo' and 'foo.bar' can assume that 'foo' comes first. @@ -168,8 +164,7 @@ LocalCollection._supportedProjection = function (fields) { return _.all(fields, function (val, keyPath) { if (_.contains(keyPath.split('.'), '$')) return false; - return !_.isObject(val) || - (!_.has(val, '$slice') && !_.has(val, '$elemMatch')); + return _.indexOf([1, 0, true, false], val) !== -1; }); }; From 1b27b7cf14f58c1f90e2380ed5c5c3aa568a2196 Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 3 Dec 2013 17:30:16 -0800 Subject: [PATCH 140/190] Tests for an empty projection --- packages/minimongo/minimongo_tests.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/minimongo/minimongo_tests.js b/packages/minimongo/minimongo_tests.js index ce2cc52e52..efb3d86770 100644 --- a/packages/minimongo/minimongo_tests.js +++ b/packages/minimongo/minimongo_tests.js @@ -1016,6 +1016,12 @@ Tinytest.add("minimongo - projection_compiler", function (test) { "_id blacklisted, no _id"] ]); + testProjection({}, [ + [{ a: 1, b: 2, c: "3" }, + { a: 1, b: 2, c: "3" }, + "empty projection"] + ]); + test.throws(function () { testProjection({ 'inc': 1, 'excl': 0 }, [ [ { inc: 42, excl: 42 }, { inc: 42 }, "Can't combine incl/excl rules" ] From 534efb51e412f3c89da65f76fdf48ea57e94c84e Mon Sep 17 00:00:00 2001 From: Slava Kim Date: Tue, 3 Dec 2013 18:16:24 -0800 Subject: [PATCH 141/190] _checkSupportedProjection instead of _supportedProjection --- packages/minimongo/projection.js | 14 ++++++-------- packages/mongo-livedata/oplog_observe_driver.js | 12 ++++++++++-- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index b95ea7c623..15bfeeb460 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -9,10 +9,7 @@ LocalCollection._compileProjection = function (fields) { if (!_.isObject(fields)) throw MinimongoError("fields option must be an object"); - // XXX: $-operators are not supported in fields projections yet - if (! LocalCollection._supportedProjection(fields)) - throw MinimongoError("Minimongo doesn't support fields projections " - + "with $-operators yet"); + LocalCollection._checkSupportedProjection(fields); var _idProjection = _.isUndefined(fields._id) ? true : fields._id; var details = projectionDetails(fields); @@ -160,11 +157,12 @@ pathsToTree = function (paths, newLeafFn, conflictFn, tree) { return tree; }; -LocalCollection._supportedProjection = function (fields) { - return _.all(fields, function (val, keyPath) { +LocalCollection._checkSupportedProjection = function (fields) { + _.each(fields, function (val, keyPath) { if (_.contains(keyPath.split('.'), '$')) - return false; - return _.indexOf([1, 0, true, false], val) !== -1; + throw MinimongoError("Minimongo doesn't support $ operator in projections yet."); + if (_.indexOf([1, 0, true, false], val) === -1) + throw MinimongoError("Projection values should be one of 1, 0, true, or false"); }); }; diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index a44a0e918d..9de3f1fa47 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -324,8 +324,16 @@ OplogObserveDriver.cursorSupported = function (cursorDescription) { // If a fields projection option is given check if it is supported by // minimongo (some operators are not supported). - if (options.fields && !LocalCollection._supportedProjection(options.fields)) - return false; + if (options.fields) { + try { + LocalCollection._checkSupportedProjection(options.fields); + } catch (e) { + if (e.name === "MinimongoError") + return false; + else + throw e; + } + } // For now, we're just dealing with equality queries: no $operators, regexps, // or $and/$or/$where/etc clauses. We can expand the scope of what we're From 643fbad4dc0cc94eb920d2dc20976e5a439abaa7 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 4 Dec 2013 11:56:21 -0800 Subject: [PATCH 142/190] Move object check into _checkSupportedProjection also ensure 'fields' is not an array --- packages/minimongo/projection.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/minimongo/projection.js b/packages/minimongo/projection.js index 15bfeeb460..8a8851d5f0 100644 --- a/packages/minimongo/projection.js +++ b/packages/minimongo/projection.js @@ -6,9 +6,6 @@ // according to projection rules. Doesn't retain subfields // of passed argument. LocalCollection._compileProjection = function (fields) { - if (!_.isObject(fields)) - throw MinimongoError("fields option must be an object"); - LocalCollection._checkSupportedProjection(fields); var _idProjection = _.isUndefined(fields._id) ? true : fields._id; @@ -158,6 +155,9 @@ pathsToTree = function (paths, newLeafFn, conflictFn, tree) { }; LocalCollection._checkSupportedProjection = function (fields) { + if (!_.isObject(fields) || _.isArray(fields)) + throw MinimongoError("fields option must be an object"); + _.each(fields, function (val, keyPath) { if (_.contains(keyPath.split('.'), '$')) throw MinimongoError("Minimongo doesn't support $ operator in projections yet."); From 38e83bb13957f2bd785ba91d86b4536b3935fe5a Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 4 Dec 2013 12:40:09 -0800 Subject: [PATCH 143/190] implement cursor.find(s, {_disableOplog: true}) --- packages/mongo-livedata/mongo_driver.js | 2 +- .../mongo-livedata/mongo_livedata_tests.js | 18 ++++++++++++++++++ .../mongo-livedata/oplog_observe_driver.js | 6 ++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index ac5ba5c897..9cd06ea03d 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -976,7 +976,7 @@ MongoConnection.prototype._observeChanges = function ( }); // This field is only set for the first ObserveHandle in an - // ObserveMultiplexer. It is only there for use by one test. + // ObserveMultiplexer. It is only there for use tests. observeHandle._observeDriver = observeDriver; } diff --git a/packages/mongo-livedata/mongo_livedata_tests.js b/packages/mongo-livedata/mongo_livedata_tests.js index 5c12cbfb10..ba7e819bbb 100644 --- a/packages/mongo-livedata/mongo_livedata_tests.js +++ b/packages/mongo-livedata/mongo_livedata_tests.js @@ -1879,3 +1879,21 @@ if (Meteor.isServer) { elements: ['Y', 'A', 'B', 'C']}); }); } + +// This is a VERY white-box test. +Meteor.isServer && Tinytest.add("mongo-livedata - oplog - _disableOplog", function (test) { + var collName = Random.id(); + var coll = new Meteor.Collection(collName); + if (MongoInternals.defaultRemoteCollectionDriver().mongo._oplogHandle) { + var observeWithOplog = coll.find({x: 5}) + .observeChanges({added: function () {}}); + test.isTrue(observeWithOplog._observeDriver); + test.isTrue(observeWithOplog._observeDriver._usesOplog); + observeWithOplog.stop(); + } + var observeWithoutOplog = coll.find({x: 6}, {_disableOplog: true}) + .observeChanges({added: function () {}}); + test.isTrue(observeWithoutOplog._observeDriver); + test.isFalse(observeWithoutOplog._observeDriver._usesOplog); + observeWithoutOplog.stop(); +}); diff --git a/packages/mongo-livedata/oplog_observe_driver.js b/packages/mongo-livedata/oplog_observe_driver.js index 9de3f1fa47..6f5411a272 100644 --- a/packages/mongo-livedata/oplog_observe_driver.js +++ b/packages/mongo-livedata/oplog_observe_driver.js @@ -15,6 +15,8 @@ var PHASE = { OplogObserveDriver = function (options) { var self = this; + self._usesOplog = true; // tests look at this + self._cursorDescription = options.cursorDescription; self._mongoHandle = options.mongoHandle; self._multiplexer = options.multiplexer; @@ -317,6 +319,10 @@ OplogObserveDriver.cursorSupported = function (cursorDescription) { // First, check the options. var options = cursorDescription.options; + // Did the user say no explicitly? + if (options._disableOplog) + return false; + // This option (which are mostly used for sorted cursors) require us to figure // out where a given document fits in an order to know if it's included or // not, and we don't track that information when doing oplog tailing. From 880cf37a3f101c6aa36e8e3b77b5b8901cf3893a Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 4 Dec 2013 12:46:47 -0800 Subject: [PATCH 144/190] New disable-oplog package --- packages/disable-oplog/.gitignore | 1 + packages/disable-oplog/package.js | 6 ++++++ packages/mongo-livedata/mongo_driver.js | 2 +- packages/mongo-livedata/package.js | 5 +++++ 4 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 packages/disable-oplog/.gitignore create mode 100644 packages/disable-oplog/package.js diff --git a/packages/disable-oplog/.gitignore b/packages/disable-oplog/.gitignore new file mode 100644 index 0000000000..677a6fc263 --- /dev/null +++ b/packages/disable-oplog/.gitignore @@ -0,0 +1 @@ +.build* diff --git a/packages/disable-oplog/package.js b/packages/disable-oplog/package.js new file mode 100644 index 0000000000..c05a3cba27 --- /dev/null +++ b/packages/disable-oplog/package.js @@ -0,0 +1,6 @@ +Package.describe({ + summary: "Disables oplog tailing", + internal: true +}); + +// This package is empty; its presence is detected by mongo-livedata. diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 9cd06ea03d..5fd2f7d2b6 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -160,7 +160,7 @@ MongoConnection = function (url, options) { self._docFetcher = new DocFetcher(self); self._oplogHandle = null; - if (options.oplogUrl) { + if (options.oplogUrl && !Package['disable-oplog']) { var dbNameFuture = new Future; self._withDb(function (db) { dbNameFuture.return(db.databaseName); diff --git a/packages/mongo-livedata/package.js b/packages/mongo-livedata/package.js index 1144aefaa1..f0b1f9e3d2 100644 --- a/packages/mongo-livedata/package.js +++ b/packages/mongo-livedata/package.js @@ -30,6 +30,11 @@ Package.on_use(function (api) { // Allow us to detect 'autopublish', and publish collections if it's loaded. api.use('autopublish', 'server', {weak: true}); + // Allow us to detect 'disable-oplog', which turns off oplog tailing for your + // app even if it's configured in the environment. (This package will be + // probably be removed before 1.0.) + api.use('disable-oplog', 'server', {weak: true}); + // defaultRemoteCollectionDriver gets its deployConfig from something that is // (for questionable reasons) initialized by the webapp package. api.use('webapp', 'server', {weak: true}); From 593d980ba69a37e02d470cbbe152f205913ea4d2 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 4 Dec 2013 12:54:27 -0800 Subject: [PATCH 145/190] implement 'test-packages --disable-oplog' --- tools/meteor.js | 5 +++++ tools/run.js | 12 ++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/tools/meteor.js b/tools/meteor.js index 328a25a02b..929be58176 100644 --- a/tools/meteor.js +++ b/tools/meteor.js @@ -1120,6 +1120,10 @@ Fiber(function () { .boolean('production') .describe('production', 'Run in production mode. Minify and bundle CSS and JS files.') .boolean('once') // See #Once + // To ensure that QA covers both PollingObserveDriver and + // OplogObserveDriver, this option disables oplog for tests. + // (It still creates a replset, it just doesn't do oplog tailing.) + .boolean('disable-oplog') .describe('settings', 'Set optional data for Meteor.settings on the server') .usage( "Usage: meteor test-packages [--release ] [options] [package...]\n" + @@ -1201,6 +1205,7 @@ Fiber(function () { port: argv.port, minify: argv.production, once: argv.once, + disableOplog: argv['disable-oplog'], testPackages: testPackages, settingsFile: argv.settings, banner: "Tests" diff --git a/tools/run.js b/tools/run.js index 52a90ed192..cd4da15890 100644 --- a/tools/run.js +++ b/tools/run.js @@ -243,7 +243,8 @@ var startServer = function (options) { env.PORT = options.innerPort; env.MONGO_URL = options.mongoUrl; - env.MONGO_OPLOG_URL = options.oplogUrl; + if (options.oplogUrl) + env.MONGO_OPLOG_URL = options.oplogUrl; env.ROOT_URL = options.rootUrl; if (options.settings) env.METEOR_SETTINGS = options.settings; @@ -417,9 +418,12 @@ exports.run = function (context, options) { // Allow people to specify an MONGO_OPLOG_URL override. If someone specifies a // MONGO_URL but not an MONGO_OPLOG_URL, disable the oplog. If neither is // specified, use the default internal mongo oplog. - var oplogUrl = process.env.MONGO_OPLOG_URL || - (process.env.MONGO_URL ? undefined - : "mongodb://127.0.0.1:" + mongoPort + "/local"); + var oplogUrl = undefined; + if (!options.disableOplog) { + oplogUrl = process.env.MONGO_OPLOG_URL || + (process.env.MONGO_URL ? undefined + : "mongodb://127.0.0.1:" + mongoPort + "/local"); + } var firstRun = true; var serverHandle; From e63177346ac957321edeacfa8e59d802c317c390 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Thu, 14 Nov 2013 12:48:05 -0500 Subject: [PATCH 146/190] Implement Meteor.server.onConnection and SessionHandle. --- packages/livedata/livedata_common.js | 4 + packages/livedata/livedata_server.js | 62 ++++++++++++ packages/livedata/livedata_server_tests.js | 109 +++++++++++++++++++++ packages/livedata/package.js | 1 + 4 files changed, 176 insertions(+) create mode 100644 packages/livedata/livedata_server_tests.js diff --git a/packages/livedata/livedata_common.js b/packages/livedata/livedata_common.js index 5ce89ed443..5f24c573f0 100644 --- a/packages/livedata/livedata_common.js +++ b/packages/livedata/livedata_common.js @@ -33,6 +33,10 @@ MethodInvocation = function (options) { // connection can be closed if the token is no longer valid this._setLoginToken = options._setLoginToken || function () {}; + // On the server, the session id of the connection this method call + // came in on. + this.sessionId = options.sessionId; + // Scratch data scoped to this connection (livedata_connection on the // client, livedata_session on the server). This is only used // internally, but we should have real and documented API for this diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index f8dfd4e6fc..4a1c74f28c 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -251,6 +251,30 @@ var Session = function (server, version, socket) { // we want to buffer up for when we are done rerunning subscriptions self._pendingReady = []; + // List of callbacks to call when this session is closed. + self.closeCallbacks = []; + + // The `SessionHandle` for this session, passed to + // `Meteor.server.onConnection` callbacks. + self.sessionHandle = { + id: self.id, + close: function () { + self.server._destroySession(self); + }, + onClose: function (fn) { + fn = Meteor.bindEnvironment( + fn, + function (err) { + Meteor._debug( + "Exception in connection session onClose callback", + err && err.stack + ); + } + ); + self.closeCallbacks.push(fn); + } + }; + socket.send(stringifyDDP({msg: 'connected', session: self.id})); // On initial connect, spin up all the universal publishers. @@ -373,6 +397,10 @@ _.extend(Session.prototype, { // Drop the merge box data immediately. self.collectionViews = {}; self.inQueue = null; + // XXX do we need to use Meteor.defer here as well? + _.each(self.closeCallbacks, function (callback) { + callback(); + }); Package.facts && Package.facts.Facts.incrementServerFact( "livedata", "sessions", -1); }, @@ -537,6 +565,7 @@ _.extend(Session.prototype, { setUserId: setUserId, _setLoginToken: setLoginToken, unblock: unblock, + sessionId: self.id, sessionData: self.sessionData }); try { @@ -983,6 +1012,10 @@ _.extend(Subscription.prototype, { Server = function () { var self = this; + // List of callbacks to call when a new connection comes in to the + // server and completes DDP version negotiation. + self.connectionCallbacks = []; + self.publish_handlers = {}; self.universal_publish_handlers = []; @@ -1064,6 +1097,28 @@ Server = function () { _.extend(Server.prototype, { + onConnection: function (fn) { + var self = this; + + fn = Meteor.bindEnvironment( + fn, + function (err) { + Meteor._debug( + "Exception in Meteor.server.onConnection callback", + err && err.stack + ); + } + ); + + self.connectionCallbacks.push(fn); + + return { + stop: function () { + self.connectionCallbacks = _.without(self.connectionCallbacks, fn); + } + }; + }, + _handleConnect: function (socket, msg) { var self = this; // In the future, handle session resumption: something like: @@ -1074,6 +1129,9 @@ _.extend(Server.prototype, { // Creating a new session socket._meteorSession = new Session(self, version, socket); self.sessions[socket._meteorSession.id] = socket._meteorSession; + _.each(self.connectionCallbacks, function (callback) { + callback(socket._meteorSession.sessionHandle); + }); } else if (!msg.version) { // connect message without a version. This means an old (pre-pre1) // client is trying to connect. If we just disconnect the @@ -1241,6 +1299,7 @@ _.extend(Server.prototype, { throw new Error("Can't call _setLoginToken on a server " + "initiated method call"); }; + var sessionId = null; var currentInvocation = DDP._CurrentInvocation.get(); if (currentInvocation) { userId = currentInvocation.userId; @@ -1250,6 +1309,7 @@ _.extend(Server.prototype, { setLoginToken = function (newToken) { currentInvocation._setLoginToken(newToken); }; + sessionId = currentInvocation.sessionId; } var invocation = new MethodInvocation({ @@ -1257,6 +1317,8 @@ _.extend(Server.prototype, { userId: userId, setUserId: setUserId, _setLoginToken: setLoginToken, + sessionId: sessionId, + // XXX the Server object doesn't have a `sessionData` field. sessionData: self.sessionData }); try { diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js new file mode 100644 index 0000000000..2d00e4f4d6 --- /dev/null +++ b/packages/livedata/livedata_server_tests.js @@ -0,0 +1,109 @@ +Tinytest.addAsync( + "livedata server - sessionHandle.onClose()", + function (test, onComplete) { + var connection; + var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + callbackHandle.stop(); + test.isTrue(_.isString(sessionHandle.id), "sessionHandle.id exists and is a string"); + // On the server side, wait for the connection to be closed. + sessionHandle.onClose(function () { + onComplete(); + }); + // Close the connection from the client. + connection.disconnect(); + }); + connection = DDP.connect(Meteor.absoluteUrl()); + } +); + +Tinytest.addAsync( + "livedata server - sessionHandle.close()", + function (test, onComplete) { + + // XXX I don't understand why using `bindEnvironment` here is + // necessary, but I get "Meteor code must always run within a + // Fiber. Try wrapping callbacks that you pass to non-Meteor + // libraries with Meteor.bindEnvironment" if I don't. + done = Meteor.bindEnvironment( + function () { + Meteor.defer(onComplete); + }, + function (err) { + Meteor._debug("Exception thrown from Meteor.defer", err && err.stack); + } + ); + + var connection; + var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + callbackHandle.stop(); + // Wait for connection to be closed on the client side. + Deps.autorun(function (computation) { + if (computation.firstRun) + test.isTrue(connection.status().connected); + if (! connection.status().connected) { + computation.stop(); + // Avoid reconnecting from the client. + connection.disconnect(); + done(); + } + }); + // Close the connection from the server. + sessionHandle.close(); + }); + connection = DDP.connect(Meteor.absoluteUrl()); + } +); + + +var innerCalled = null; + +Meteor.methods({ + livedata_server_test_inner: function () { + var sessionId = this.sessionId; + Meteor.defer(function () { + innerCalled(sessionId); + }); + }, + + livedata_server_test_outer: function () { + Meteor.call('livedata_server_test_inner'); + } +}); + + +Tinytest.addAsync( + "livedata server - sessionId in method invocation", + function (test, onComplete) { + var sessionId; + var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + callbackHandle.stop(); + sessionId = sessionHandle.id; + }); + innerCalled = function (methodSessionId) { + test.equal(methodSessionId, sessionId); + onComplete(); + }; + var connection = DDP.connect(Meteor.absoluteUrl()); + connection.call('livedata_server_test_inner'); + connection.disconnect(); + } +); + + +Tinytest.addAsync( + "livedata server - sessionId in nested method invocation", + function (test, onComplete) { + var sessionId; + var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + callbackHandle.stop(); + sessionId = sessionHandle.id; + }); + innerCalled = function (methodSessionId) { + test.equal(methodSessionId, sessionId); + onComplete(); + }; + var connection = DDP.connect(Meteor.absoluteUrl()); + connection.call('livedata_server_test_outer'); + connection.disconnect(); + } +); diff --git a/packages/livedata/package.js b/packages/livedata/package.js index 1de163f06d..2b0011c9a8 100644 --- a/packages/livedata/package.js +++ b/packages/livedata/package.js @@ -64,6 +64,7 @@ Package.on_test(function (api) { api.use('test-helpers', ['client', 'server']); api.use(['underscore', 'tinytest', 'random', 'deps', 'minimongo']); + api.add_files('livedata_server_tests.js', 'server'); api.add_files('livedata_connection_tests.js', ['client', 'server']); api.add_files('livedata_tests.js', ['client', 'server']); api.add_files('livedata_test_service.js', ['client', 'server']); From e7f28645cdfb6d639bf5f179f9d2b40c2d89e39f Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Fri, 15 Nov 2013 09:36:27 -0500 Subject: [PATCH 147/190] Remove remaining references to login token from livedata. --- packages/accounts-base/accounts_server.js | 77 +++++++++++++++- packages/accounts-password/password_server.js | 12 +-- packages/livedata/livedata_common.js | 11 --- packages/livedata/livedata_server.js | 90 +------------------ 4 files changed, 82 insertions(+), 108 deletions(-) diff --git a/packages/accounts-base/accounts_server.js b/packages/accounts-base/accounts_server.js index 1df24b10f4..3a36d5b696 100644 --- a/packages/accounts-base/accounts_server.js +++ b/packages/accounts-base/accounts_server.js @@ -1,3 +1,72 @@ +// token -> list of session ids +var sessionsByLoginToken = {}; + +// Remove the session from the list of open sessions for the token. +var removeSessionFromToken = function (token, sessionId) { + sessionsByLoginToken[token] = _.without( + sessionsByLoginToken[token], + sessionId + ); + if (_.isEmpty(sessionsByLoginToken[token])) + delete sessionsByLoginToken[token]; +}; + +var loginTokenChanged = function (sessionId, newToken, oldToken) { + var self = this; + if (oldToken) { + removeSessionFromToken(oldToken, sessionId); + } + if (newToken) { + if (! _.has(sessionsByLoginToken, newToken)) + sessionsByLoginToken[newToken] = []; + sessionsByLoginToken[newToken].push(sessionId); + } +}; + + +Accounts.getLoginToken = function (methodInvocation) { + return methodInvocation._sessionData.loginToken; +}; + +Accounts.setLoginToken = function (methodInvocation, newToken) { + var oldToken = methodInvocation._sessionData.loginToken; + methodInvocation._sessionData.loginToken = newToken; + loginTokenChanged(methodInvocation.sessionId, newToken, oldToken); +}; + + +// sessionId -> SessionHandle +var sessionHandles = {}; + +Meteor.server.onConnection(function (sessionHandle) { + var sessionId = sessionHandle.id; + sessionHandles[sessionId] = sessionHandle; + sessionHandle.onClose(function () { + var token = sessionHandle._sessionData.loginToken; + if (token) + removeSessionFromToken(token, sessionId); + delete sessionHandles[sessionId]; + }); +}); + + + +// Close all open sessions associated with any of the tokens in +// `tokens`. +var closeSessionsForTokens = function (tokens) { + _.each(tokens, function (token) { + if (_.has(sessionsByLoginToken, token)) { + // closing a session triggers the onClose callback which + // modifies sessionsByLoginToken, so we clone it. + _.each(EJSON.clone(sessionsByLoginToken[token]), function (sessionId) { + sessionHandles[sessionId] && sessionHandles[sessionId].close(); + }); + } + }); +}; + + + /// /// CURRENT USER /// @@ -78,14 +147,14 @@ Meteor.methods({ var result = tryAllLoginHandlers(options); if (result !== null) { this.setUserId(result.id); - this._setLoginToken(result.token); + Accounts.setLoginToken(this, result.token); } return result; }, logout: function() { - var token = this._getLoginToken(); - this._setLoginToken(null); + var token = Accounts.getLoginToken(this); + Accounts.setLoginToken(this, null); if (token && this.userId) removeLoginToken(this.userId, token); this.setUserId(null); @@ -646,7 +715,7 @@ Meteor.startup(function () { /// var closeTokensForUser = function (userTokens) { - Meteor.server._closeAllForTokens(_.map(userTokens, function (token) { + closeSessionsForTokens(_.map(userTokens, function (token) { return token.token; })); }; diff --git a/packages/accounts-password/password_server.js b/packages/accounts-password/password_server.js index f85676c813..6b88e46b7c 100644 --- a/packages/accounts-password/password_server.js +++ b/packages/accounts-password/password_server.js @@ -320,8 +320,8 @@ Meteor.methods({resetPassword: function (token, newVerifier) { // logged in as. Make sure to avoid logging ourselves out if this // happens. But also make sure not to leave the connection in a state // of having a bad token set if things fail. - var oldToken = this._getLoginToken(); - this._setLoginToken(null); + var oldToken = Accounts.getLoginToken(this); + Accounts.setLoginToken(this, null); try { // Update the user record by: @@ -338,11 +338,11 @@ Meteor.methods({resetPassword: function (token, newVerifier) { }); } catch (err) { // update failed somehow. reset to old token. - this._setLoginToken(oldToken); + Accounts.setLoginToken(this, oldToken); throw err; } - this._setLoginToken(stampedLoginToken.token); + Accounts.setLoginToken(this, stampedLoginToken.token); this.setUserId(user._id); return { @@ -436,7 +436,7 @@ Meteor.methods({verifyEmail: function (token) { $push: {'services.resume.loginTokens': stampedLoginToken}}); this.setUserId(user._id); - this._setLoginToken(stampedLoginToken.token); + Accounts.setLoginToken(this, stampedLoginToken.token); return { token: stampedLoginToken.token, tokenExpires: Accounts._tokenExpiration(stampedLoginToken.when), @@ -515,7 +515,7 @@ Meteor.methods({createUser: function (options) { // client gets logged in as the new user afterwards. this.setUserId(result.id); - this._setLoginToken(result.token); + Accounts.setLoginToken(this, result.token); return result; }}); diff --git a/packages/livedata/livedata_common.js b/packages/livedata/livedata_common.js index 5f24c573f0..147e85f8ba 100644 --- a/packages/livedata/livedata_common.js +++ b/packages/livedata/livedata_common.js @@ -29,10 +29,6 @@ MethodInvocation = function (options) { // reruns subscriptions this._setUserId = options.setUserId || function () {}; - // used for associating the connection with a login token so that the - // connection can be closed if the token is no longer valid - this._setLoginToken = options._setLoginToken || function () {}; - // On the server, the session id of the connection this method call // came in on. this.sessionId = options.sessionId; @@ -57,13 +53,6 @@ _.extend(MethodInvocation.prototype, { self.userId = userId; self._setUserId(userId); }, - _setLoginToken: function (token) { - this._setLoginToken(token); - this._sessionData.loginToken = token; - }, - _getLoginToken: function (token) { - return this._sessionData.loginToken; - } }); parseDDP = function (stringMessage) { diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index 4a1c74f28c..f9e93a5f23 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -272,7 +272,8 @@ var Session = function (server, version, socket) { } ); self.closeCallbacks.push(fn); - } + }, + _sessionData: self.sessionData }; socket.send(stringifyDDP({msg: 'connected', @@ -389,8 +390,7 @@ _.extend(Session.prototype, { } Meteor.defer(function () { // stop callbacks can yield, so we defer this on destroy. - // see also _closeAllForTokens and its desire to destroy things in a loop. - // that said, sub._isDeactivated() detects that we set inQueue to null and + // sub._isDeactivated() detects that we set inQueue to null and // treats it as semi-deactivated (it will ignore incoming callbacks, etc). self._deactivateAllSubscriptions(); }); @@ -555,15 +555,10 @@ _.extend(Session.prototype, { self._setUserId(userId); }; - var setLoginToken = function (newToken) { - self._setLoginToken(newToken); - }; - var invocation = new MethodInvocation({ isSimulation: false, userId: self.userId, setUserId: setUserId, - _setLoginToken: setLoginToken, unblock: unblock, sessionId: self.id, sessionData: self.sessionData @@ -618,19 +613,6 @@ _.extend(Session.prototype, { }); }, - // XXX This mixes accounts concerns (login tokens) into livedata, which is not - // ideal. Eventually we'll have an API that allows accounts to keep track of - // which connections are associated with tokens and close them when necessary, - // rather than the current state of things where accounts tells livedata which - // connections are associated with which tokens, and when to close connections - // associated with a given token. - _setLoginToken: function (newToken) { - var self = this; - var oldToken = self.sessionData.loginToken; - self.sessionData.loginToken = newToken; - self.server._loginTokenChanged(self, newToken, oldToken); - }, - // Sets the current user id in all appropriate contexts and reruns // all subscriptions _setUserId: function(userId) { @@ -1023,18 +1005,6 @@ Server = function () { self.sessions = {}; // map from id to session - // Keeps track of the open connections associated with particular login - // tokens. Used for logging out all a user's open connections, expiring login - // tokens, etc. - // XXX This mixes accounts concerns (login tokens) into livedata, which is not - // ideal. Eventually we'll have an API that allows accounts to keep track of - // which connections are associated with tokens and close them when necessary, - // rather than the current state of things where accounts tells livedata which - // connections are associated with which tokens, and when to close connections - // associated with a given token. - self.sessionsByLoginToken = {}; - - self.stream_server = new StreamServer; self.stream_server.register(function (socket) { @@ -1228,15 +1198,6 @@ _.extend(Server.prototype, { _destroySession: function (session) { var self = this; delete self.sessions[session.id]; - if (session.sessionData.loginToken) { - self.sessionsByLoginToken[session.sessionData.loginToken] = _.without( - self.sessionsByLoginToken[session.sessionData.loginToken], - session.id - ); - if (_.isEmpty(self.sessionsByLoginToken[session.sessionData.loginToken])) { - delete self.sessionsByLoginToken[session.sessionData.loginToken]; - } - } session.destroy(); }, @@ -1294,11 +1255,6 @@ _.extend(Server.prototype, { var setUserId = function() { throw new Error("Can't call setUserId on a server initiated method call"); }; - var setLoginToken = function () { - // XXX is this correct? - throw new Error("Can't call _setLoginToken on a server " + - "initiated method call"); - }; var sessionId = null; var currentInvocation = DDP._CurrentInvocation.get(); if (currentInvocation) { @@ -1306,9 +1262,6 @@ _.extend(Server.prototype, { setUserId = function(userId) { currentInvocation.setUserId(userId); }; - setLoginToken = function (newToken) { - currentInvocation._setLoginToken(newToken); - }; sessionId = currentInvocation.sessionId; } @@ -1316,7 +1269,6 @@ _.extend(Server.prototype, { isSimulation: false, userId: userId, setUserId: setUserId, - _setLoginToken: setLoginToken, sessionId: sessionId, // XXX the Server object doesn't have a `sessionData` field. sessionData: self.sessionData @@ -1343,42 +1295,6 @@ _.extend(Server.prototype, { if (exception) throw exception; return result; - }, - - _loginTokenChanged: function (session, newToken, oldToken) { - var self = this; - if (oldToken) { - // Remove the session from the list of open sessions for the old token. - self.sessionsByLoginToken[oldToken] = _.without( - self.sessionsByLoginToken[oldToken], - session.id - ); - if (_.isEmpty(self.sessionsByLoginToken[oldToken])) - delete self.sessionsByLoginToken[oldToken]; - } - if (newToken) { - if (! _.has(self.sessionsByLoginToken, newToken)) - self.sessionsByLoginToken[newToken] = []; - self.sessionsByLoginToken[newToken].push(session.id); - } - }, - - // Close all open sessions associated with any of the tokens in - // `tokens`. - _closeAllForTokens: function (tokens) { - var self = this; - _.each(tokens, function (token) { - if (_.has(self.sessionsByLoginToken, token)) { - // _destroySession modifies sessionsByLoginToken, so we clone it. - _.each(EJSON.clone(self.sessionsByLoginToken[token]), function (sessionId) { - // Destroy session and remove from self.sessions. - var session = self.sessions[sessionId]; - if (session) { - self._destroySession(session); - } - }); - } - }); } }); From 2ae1ea495f8cc649afa2454b2f5fbd41a42b5fcc Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Fri, 15 Nov 2013 09:52:18 -0500 Subject: [PATCH 148/190] Wrap calling session close callback in Meteor.defer, so that a bunch of connections can be closed without waiting for the close callbacks on one connection to return before closing the other connections. Underscore internal Session field `_closeCallbacks`. Update comment to explain the cause of the problem requiring the use of `Meteor.bindEnvironment` with Meteor's public API. --- packages/livedata/livedata_server.js | 11 ++++++----- packages/livedata/livedata_server_tests.js | 10 ++++++---- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index f9e93a5f23..0f8b9ee47d 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -252,7 +252,7 @@ var Session = function (server, version, socket) { self._pendingReady = []; // List of callbacks to call when this session is closed. - self.closeCallbacks = []; + self._closeCallbacks = []; // The `SessionHandle` for this session, passed to // `Meteor.server.onConnection` callbacks. @@ -271,7 +271,7 @@ var Session = function (server, version, socket) { ); } ); - self.closeCallbacks.push(fn); + self._closeCallbacks.push(fn); }, _sessionData: self.sessionData }; @@ -397,9 +397,10 @@ _.extend(Session.prototype, { // Drop the merge box data immediately. self.collectionViews = {}; self.inQueue = null; - // XXX do we need to use Meteor.defer here as well? - _.each(self.closeCallbacks, function (callback) { - callback(); + Meteor.defer(function () { + _.each(self._closeCallbacks, function (callback) { + callback(); + }); }); Package.facts && Package.facts.Facts.incrementServerFact( "livedata", "sessions", -1); diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index 2d00e4f4d6..dd868521ef 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -20,10 +20,12 @@ Tinytest.addAsync( "livedata server - sessionHandle.close()", function (test, onComplete) { - // XXX I don't understand why using `bindEnvironment` here is - // necessary, but I get "Meteor code must always run within a - // Fiber. Try wrapping callbacks that you pass to non-Meteor - // libraries with Meteor.bindEnvironment" if I don't. + // XXX stream_client_nodejs.js should not be requiring a developer + // to use Meteor.bindEnvironment themselves when using Meteor's + // public API. The problem is that the computation rerunning is + // triggered by the close event firing on the stream's connection + // object, and that callback in stream_client_nodejs.js is not + // wrapped in a Meteor.bindEnvironment for us. done = Meteor.bindEnvironment( function () { Meteor.defer(onComplete); From 0e4ba1580de091e11c6c82db5c8d1de1c85f8d27 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Fri, 15 Nov 2013 15:56:33 -0500 Subject: [PATCH 149/190] Underscore internal `_getLoginToken` and `_setLoginToken`. `closeSessionsForTokens` doesn't need to clone `sessionsByLoginToken` because `onClose` callbacks are deferred. Simplify `closeTokensForUser` by using `_.pluck`. --- packages/accounts-base/accounts_server.js | 18 +++++++----------- packages/accounts-password/password_server.js | 12 ++++++------ 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/packages/accounts-base/accounts_server.js b/packages/accounts-base/accounts_server.js index 3a36d5b696..50ecfac581 100644 --- a/packages/accounts-base/accounts_server.js +++ b/packages/accounts-base/accounts_server.js @@ -24,11 +24,11 @@ var loginTokenChanged = function (sessionId, newToken, oldToken) { }; -Accounts.getLoginToken = function (methodInvocation) { +Accounts._getLoginToken = function (methodInvocation) { return methodInvocation._sessionData.loginToken; }; -Accounts.setLoginToken = function (methodInvocation, newToken) { +Accounts._setLoginToken = function (methodInvocation, newToken) { var oldToken = methodInvocation._sessionData.loginToken; methodInvocation._sessionData.loginToken = newToken; loginTokenChanged(methodInvocation.sessionId, newToken, oldToken); @@ -56,9 +56,7 @@ Meteor.server.onConnection(function (sessionHandle) { var closeSessionsForTokens = function (tokens) { _.each(tokens, function (token) { if (_.has(sessionsByLoginToken, token)) { - // closing a session triggers the onClose callback which - // modifies sessionsByLoginToken, so we clone it. - _.each(EJSON.clone(sessionsByLoginToken[token]), function (sessionId) { + _.each(sessionsByLoginToken[token], function (sessionId) { sessionHandles[sessionId] && sessionHandles[sessionId].close(); }); } @@ -147,14 +145,14 @@ Meteor.methods({ var result = tryAllLoginHandlers(options); if (result !== null) { this.setUserId(result.id); - Accounts.setLoginToken(this, result.token); + Accounts._setLoginToken(this, result.token); } return result; }, logout: function() { - var token = Accounts.getLoginToken(this); - Accounts.setLoginToken(this, null); + var token = Accounts._getLoginToken(this); + Accounts._setLoginToken(this, null); if (token && this.userId) removeLoginToken(this.userId, token); this.setUserId(null); @@ -715,9 +713,7 @@ Meteor.startup(function () { /// var closeTokensForUser = function (userTokens) { - closeSessionsForTokens(_.map(userTokens, function (token) { - return token.token; - })); + closeSessionsForTokens(_.pluck(userTokens, "token")); }; // Like _.difference, but uses EJSON.equals to compute which values to return. diff --git a/packages/accounts-password/password_server.js b/packages/accounts-password/password_server.js index 6b88e46b7c..5488ef2914 100644 --- a/packages/accounts-password/password_server.js +++ b/packages/accounts-password/password_server.js @@ -320,8 +320,8 @@ Meteor.methods({resetPassword: function (token, newVerifier) { // logged in as. Make sure to avoid logging ourselves out if this // happens. But also make sure not to leave the connection in a state // of having a bad token set if things fail. - var oldToken = Accounts.getLoginToken(this); - Accounts.setLoginToken(this, null); + var oldToken = Accounts._getLoginToken(this); + Accounts._setLoginToken(this, null); try { // Update the user record by: @@ -338,11 +338,11 @@ Meteor.methods({resetPassword: function (token, newVerifier) { }); } catch (err) { // update failed somehow. reset to old token. - Accounts.setLoginToken(this, oldToken); + Accounts._setLoginToken(this, oldToken); throw err; } - Accounts.setLoginToken(this, stampedLoginToken.token); + Accounts._setLoginToken(this, stampedLoginToken.token); this.setUserId(user._id); return { @@ -436,7 +436,7 @@ Meteor.methods({verifyEmail: function (token) { $push: {'services.resume.loginTokens': stampedLoginToken}}); this.setUserId(user._id); - Accounts.setLoginToken(this, stampedLoginToken.token); + Accounts._setLoginToken(this, stampedLoginToken.token); return { token: stampedLoginToken.token, tokenExpires: Accounts._tokenExpiration(stampedLoginToken.when), @@ -515,7 +515,7 @@ Meteor.methods({createUser: function (options) { // client gets logged in as the new user afterwards. this.setUserId(result.id); - Accounts.setLoginToken(this, result.token); + Accounts._setLoginToken(this, result.token); return result; }}); From 92df70cb4887cbe58a29b3137dfd20d88ea85f99 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Tue, 19 Nov 2013 11:35:46 -0500 Subject: [PATCH 150/190] Move login token code into the "RECONNECT TOKENS" section. --- packages/accounts-base/accounts_server.js | 135 +++++++++++----------- 1 file changed, 68 insertions(+), 67 deletions(-) diff --git a/packages/accounts-base/accounts_server.js b/packages/accounts-base/accounts_server.js index 50ecfac581..a828a87c4b 100644 --- a/packages/accounts-base/accounts_server.js +++ b/packages/accounts-base/accounts_server.js @@ -1,70 +1,3 @@ -// token -> list of session ids -var sessionsByLoginToken = {}; - -// Remove the session from the list of open sessions for the token. -var removeSessionFromToken = function (token, sessionId) { - sessionsByLoginToken[token] = _.without( - sessionsByLoginToken[token], - sessionId - ); - if (_.isEmpty(sessionsByLoginToken[token])) - delete sessionsByLoginToken[token]; -}; - -var loginTokenChanged = function (sessionId, newToken, oldToken) { - var self = this; - if (oldToken) { - removeSessionFromToken(oldToken, sessionId); - } - if (newToken) { - if (! _.has(sessionsByLoginToken, newToken)) - sessionsByLoginToken[newToken] = []; - sessionsByLoginToken[newToken].push(sessionId); - } -}; - - -Accounts._getLoginToken = function (methodInvocation) { - return methodInvocation._sessionData.loginToken; -}; - -Accounts._setLoginToken = function (methodInvocation, newToken) { - var oldToken = methodInvocation._sessionData.loginToken; - methodInvocation._sessionData.loginToken = newToken; - loginTokenChanged(methodInvocation.sessionId, newToken, oldToken); -}; - - -// sessionId -> SessionHandle -var sessionHandles = {}; - -Meteor.server.onConnection(function (sessionHandle) { - var sessionId = sessionHandle.id; - sessionHandles[sessionId] = sessionHandle; - sessionHandle.onClose(function () { - var token = sessionHandle._sessionData.loginToken; - if (token) - removeSessionFromToken(token, sessionId); - delete sessionHandles[sessionId]; - }); -}); - - - -// Close all open sessions associated with any of the tokens in -// `tokens`. -var closeSessionsForTokens = function (tokens) { - _.each(tokens, function (token) { - if (_.has(sessionsByLoginToken, token)) { - _.each(sessionsByLoginToken[token], function (sessionId) { - sessionHandles[sessionId] && sessionHandles[sessionId].close(); - }); - } - }); -}; - - - /// /// CURRENT USER /// @@ -211,6 +144,74 @@ Meteor.methods({ /// /// support reconnecting using a meteor login token +// token -> list of session ids +var sessionsByLoginToken = {}; + +// Remove the session from the list of open sessions for the token. +var removeSessionFromToken = function (token, sessionId) { + sessionsByLoginToken[token] = _.without( + sessionsByLoginToken[token], + sessionId + ); + if (_.isEmpty(sessionsByLoginToken[token])) + delete sessionsByLoginToken[token]; +}; + +var loginTokenChanged = function (sessionId, newToken, oldToken) { + var self = this; + if (oldToken) { + removeSessionFromToken(oldToken, sessionId); + } + if (newToken) { + if (! _.has(sessionsByLoginToken, newToken)) + sessionsByLoginToken[newToken] = []; + sessionsByLoginToken[newToken].push(sessionId); + } +}; + + +Accounts._getLoginToken = function (methodInvocation) { + return methodInvocation._sessionData.loginToken; +}; + +Accounts._setLoginToken = function (methodInvocation, newToken) { + var oldToken = methodInvocation._sessionData.loginToken; + methodInvocation._sessionData.loginToken = newToken; + loginTokenChanged(methodInvocation.sessionId, newToken, oldToken); +}; + + +// sessionId -> SessionHandle +// XXX Wouldn't be necessary if there was an API to get the session or +// session handle from a session id. +var sessionHandles = {}; + +Meteor.server.onConnection(function (sessionHandle) { + var sessionId = sessionHandle.id; + sessionHandles[sessionId] = sessionHandle; + sessionHandle.onClose(function () { + var token = sessionHandle._sessionData.loginToken; + if (token) + removeSessionFromToken(token, sessionId); + delete sessionHandles[sessionId]; + }); +}); + + + +// Close all open sessions associated with any of the tokens in +// `tokens`. +var closeSessionsForTokens = function (tokens) { + _.each(tokens, function (token) { + if (_.has(sessionsByLoginToken, token)) { + _.each(sessionsByLoginToken[token], function (sessionId) { + sessionHandles[sessionId] && sessionHandles[sessionId].close(); + }); + } + }); +}; + + // Login handler for resume tokens. Accounts.registerLoginHandler(function(options) { if (!options.resume) From 2f1ddc0e816e3082485125235f17420ca4f572f9 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Tue, 19 Nov 2013 11:49:53 -0500 Subject: [PATCH 151/190] Closing a session should be idempotent. --- packages/livedata/livedata_server.js | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index 0f8b9ee47d..234910513f 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -259,7 +259,7 @@ var Session = function (server, version, socket) { self.sessionHandle = { id: self.id, close: function () { - self.server._destroySession(self); + self.server._closeSession(self); }, onClose: function (fn) { fn = Meteor.bindEnvironment( @@ -1059,7 +1059,7 @@ Server = function () { socket.on('close', function () { if (socket._meteorSession) { Fiber(function () { - self._destroySession(socket._meteorSession); + self._closeSession(socket._meteorSession); }).run(); } }); @@ -1196,10 +1196,12 @@ _.extend(Server.prototype, { } }, - _destroySession: function (session) { + _closeSession: function (session) { var self = this; - delete self.sessions[session.id]; - session.destroy(); + if (self.sessions[session.id]) { + delete self.sessions[session.id]; + session.destroy(); + } }, methods: function (methods) { From e7ef2a2406a5c0c8f747dfe62199d93760497134 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Tue, 19 Nov 2013 12:11:53 -0500 Subject: [PATCH 152/190] Space out code for readability. --- packages/livedata/livedata_server.js | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index 234910513f..92574d89df 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -384,26 +384,31 @@ _.extend(Session.prototype, { // down. If a socket was attached, close it. destroy: function () { var self = this; + if (self.socket) { self.socket.close(); self.socket._meteorSession = null; } + + // Drop the merge box data immediately. + self.collectionViews = {}; + self.inQueue = null; + + Package.facts && Package.facts.Facts.incrementServerFact( + "livedata", "sessions", -1); + Meteor.defer(function () { // stop callbacks can yield, so we defer this on destroy. // sub._isDeactivated() detects that we set inQueue to null and // treats it as semi-deactivated (it will ignore incoming callbacks, etc). self._deactivateAllSubscriptions(); - }); - // Drop the merge box data immediately. - self.collectionViews = {}; - self.inQueue = null; - Meteor.defer(function () { + + // Defer calling the close callbacks, so that the caller closing + // the session isn't waiting for all the callbacks to complete. _.each(self._closeCallbacks, function (callback) { callback(); }); }); - Package.facts && Package.facts.Facts.incrementServerFact( - "livedata", "sessions", -1); }, // Send a message (doing nothing if no socket is connected right now.) From ccaeef516fcd7aa4a97b5446f1d06450d995fe37 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Tue, 19 Nov 2013 15:40:58 -0500 Subject: [PATCH 153/190] Run nodejs stream client callbacks in a fiber. Poll instead of using Deps.autorun in server test. When polling the client connection, tests don't have a chance to disconnect before the stream client automatically reconnects, so add an option to disable retries for testing. Callers of `Meteor.bindEnvironment` often have the `onException` argument print the exception stack trace. To allow for less code duplication, let the argument be a string providing the context (e.g. "connection closed callback"), and then on an exception print the context and the exception stack trace. --- packages/autoupdate/package.js | 1 + packages/livedata/livedata_connection.js | 7 +- packages/livedata/livedata_server_tests.js | 74 +++++++++++----------- packages/livedata/package.js | 1 + packages/livedata/stream_client_common.js | 11 ++-- packages/livedata/stream_client_nodejs.js | 55 ++++++++++++---- packages/livedata/stream_client_sockjs.js | 5 +- packages/livedata/stream_client_tests.js | 17 +++++ packages/meteor/dynamics_browser.js | 11 +++- packages/meteor/dynamics_nodejs.js | 18 +++++- 10 files changed, 137 insertions(+), 63 deletions(-) create mode 100644 packages/livedata/stream_client_tests.js diff --git a/packages/autoupdate/package.js b/packages/autoupdate/package.js index ce152a2c96..e350789346 100644 --- a/packages/autoupdate/package.js +++ b/packages/autoupdate/package.js @@ -6,6 +6,7 @@ Package.on_use(function (api) { api.use('webapp', 'server'); api.use('deps', 'client'); api.use(['livedata', 'mongo-livedata'], ['client', 'server']); + api.use('deps', 'client'); api.use('reload', 'client', {weak: true}); api.export('Autoupdate'); diff --git a/packages/livedata/livedata_connection.js b/packages/livedata/livedata_connection.js index 81f05de24c..8667a18022 100644 --- a/packages/livedata/livedata_connection.js +++ b/packages/livedata/livedata_connection.js @@ -18,7 +18,8 @@ var Connection = function (url, options) { }, // These options are only for testing. reloadWithOutstanding: false, - supportedDDPVersions: SUPPORTED_DDP_VERSIONS + supportedDDPVersions: SUPPORTED_DDP_VERSIONS, + retry: true }, options); // If set, called when we reconnect, queuing method calls _before_ the @@ -30,7 +31,9 @@ var Connection = function (url, options) { if (typeof url === "object") { self._stream = url; } else { - self._stream = new LivedataTest.ClientStream(url); + self._stream = new LivedataTest.ClientStream(url, { + retry: options.retry + }); } self._lastSessionId = null; diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index dd868521ef..b8629f4b73 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -1,3 +1,5 @@ +var Fiber = Npm.require('fibers'); + Tinytest.addAsync( "livedata server - sessionHandle.onClose()", function (test, onComplete) { @@ -16,45 +18,43 @@ Tinytest.addAsync( } ); -Tinytest.addAsync( - "livedata server - sessionHandle.close()", - function (test, onComplete) { +// like pollUntil but doesn't have to be called from testAsyncMulti. +var poll = function (test, onComplete, fn) { + var timeout = 10000; + var step = 200; + var start = (new Date()).valueOf(); + var helper = function () { + if (fn()) { + test.ok(); + onComplete(); + return; + } + if (start + timeout < (new Date()).valueOf()) { + test.fail(); + onComplete(); + return; + } + Meteor.setTimeout(helper, step); + }; + helper(); +}; + +Tinytest.addAsync("livedata server - sessionHandle.close()", function (test, onComplete) { + var connection; + var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + callbackHandle.stop(); - // XXX stream_client_nodejs.js should not be requiring a developer - // to use Meteor.bindEnvironment themselves when using Meteor's - // public API. The problem is that the computation rerunning is - // triggered by the close event firing on the stream's connection - // object, and that callback in stream_client_nodejs.js is not - // wrapped in a Meteor.bindEnvironment for us. - done = Meteor.bindEnvironment( - function () { - Meteor.defer(onComplete); - }, - function (err) { - Meteor._debug("Exception thrown from Meteor.defer", err && err.stack); - } - ); - - var connection; - var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { - callbackHandle.stop(); - // Wait for connection to be closed on the client side. - Deps.autorun(function (computation) { - if (computation.firstRun) - test.isTrue(connection.status().connected); - if (! connection.status().connected) { - computation.stop(); - // Avoid reconnecting from the client. - connection.disconnect(); - done(); - } - }); - // Close the connection from the server. - sessionHandle.close(); + poll(test, onComplete, function () { + return ! connection.status().connected; }); - connection = DDP.connect(Meteor.absoluteUrl()); - } -); + + // Close the connection from the server. + sessionHandle.close(); + }); + + connection = DDP.connect(Meteor.absoluteUrl(), {retry: false}); +}); + var innerCalled = null; diff --git a/packages/livedata/package.js b/packages/livedata/package.js index 2b0011c9a8..e45ea5e07e 100644 --- a/packages/livedata/package.js +++ b/packages/livedata/package.js @@ -73,5 +73,6 @@ Package.on_test(function (api) { api.use('http', 'client'); api.add_files(['stream_tests.js'], 'client'); + api.add_files('stream_client_tests.js', 'server'); api.use('check', ['client', 'server']); }); diff --git a/packages/livedata/stream_client_common.js b/packages/livedata/stream_client_common.js index 47f003cc62..bb16098119 100644 --- a/packages/livedata/stream_client_common.js +++ b/packages/livedata/stream_client_common.js @@ -195,10 +195,13 @@ _.extend(LivedataTest.ClientStream.prototype, { _retryLater: function () { var self = this; - var timeout = self._retry.retryLater( - self.currentStatus.retryCount, - _.bind(self._retryNow, self) - ); + var timeout = 0; + if (self.options.retry) { + timeout = self._retry.retryLater( + self.currentStatus.retryCount, + _.bind(self._retryNow, self) + ); + } self.currentStatus.status = "waiting"; self.currentStatus.connected = false; diff --git a/packages/livedata/stream_client_nodejs.js b/packages/livedata/stream_client_nodejs.js index 1a833c0e34..d5d6405c97 100644 --- a/packages/livedata/stream_client_nodejs.js +++ b/packages/livedata/stream_client_nodejs.js @@ -9,8 +9,11 @@ // We don't do any heartbeating. (The logic that did this in sockjs was removed, // because it used a built-in sockjs mechanism. We could do it with WebSocket // ping frames or with DDP-level messages.) -LivedataTest.ClientStream = function (endpoint) { +LivedataTest.ClientStream = function (endpoint, options) { var self = this; + self.options = _.extend({ + retry: true + }, options); // WebSocket-Node https://github.com/Worlize/WebSocket-Node // Chosen because it can run without native components. It has a @@ -31,9 +34,12 @@ LivedataTest.ClientStream = function (endpoint) { self.endpoint = endpoint; self.currentConnection = null; - self.client.on('connect', function (connection) { - return self._onConnect(connection); - }); + self.client.on('connect', Meteor.bindEnvironment( + function (connection) { + return self._onConnect(connection); + }, + "stream connect callback" + )); self.client.on('connectFailed', function (error) { // XXX: Make this do something better than make the tests hang if it does not work. @@ -89,20 +95,41 @@ _.extend(LivedataTest.ClientStream.prototype, { self.connectionTimer = null; } - connection.on('error', function (error) { - if (self.currentConnection !== this) - return; + var onError = Meteor.bindEnvironment( + function (_this) { + if (self.currentConnection !== _this) + return; - Meteor._debug("stream error", error.toString(), - (new Date()).toDateString()); - self._lostConnection(); + Meteor._debug("stream error", error.toString(), + (new Date()).toDateString()); + self._lostConnection(); + }, + "stream error callback" + ); + + connection.on('error', function (error) { + // We have to pass in `this` explicitly because bindEnvironment + // doesn't propagate it for us. + onError(this); }); - connection.on('close', function () { - if (self.currentConnection !== this) - return; + var onClose = Meteor.bindEnvironment( + function (_this) { + if (self.options._testOnClose) + self.options._testOnClose(); - self._lostConnection(); + if (self.currentConnection !== _this) + return; + + self._lostConnection(); + }, + "stream close callback" + ); + + connection.on('close', function () { + // We have to pass in `this` explicitly because bindEnvironment + // doesn't propagate it for us. + onClose(this); }); connection.on('message', function (message) { diff --git a/packages/livedata/stream_client_sockjs.js b/packages/livedata/stream_client_sockjs.js index 1edf9ea7d3..d99889f339 100644 --- a/packages/livedata/stream_client_sockjs.js +++ b/packages/livedata/stream_client_sockjs.js @@ -1,8 +1,11 @@ // @param url {String} URL to Meteor app // "http://subdomain.meteor.com/" or "/" or // "ddp+sockjs://foo-**.meteor.com/sockjs" -LivedataTest.ClientStream = function (url) { +LivedataTest.ClientStream = function (url, options) { var self = this; + self.options = _.extend({ + retry: true + }, options); self._initCommon(); //// Constants diff --git a/packages/livedata/stream_client_tests.js b/packages/livedata/stream_client_tests.js new file mode 100644 index 0000000000..dbb675852d --- /dev/null +++ b/packages/livedata/stream_client_tests.js @@ -0,0 +1,17 @@ +var Fiber = Npm.require('fibers'); + +Tinytest.addAsync("stream client - callbacks run in a fiber", function (test, onComplete) { + stream = new LivedataTest.ClientStream( + Meteor.absoluteUrl(), + { + _testOnClose: function () { + test.isTrue(Fiber.current); + onComplete(); + } + } + ); + stream.on('reset', function () { + test.isTrue(Fiber.current); + stream.disconnect(); + }); +}); diff --git a/packages/meteor/dynamics_browser.js b/packages/meteor/dynamics_browser.js index b2b7c34a88..01ebcaf941 100644 --- a/packages/meteor/dynamics_browser.js +++ b/packages/meteor/dynamics_browser.js @@ -30,8 +30,15 @@ Meteor.bindEnvironment = function (func, onException, _this) { // values var boundValues = _.clone(currentValues); - if (!onException) - throw new Error("onException must be supplied"); + if (!onException || typeof(onException) === 'string') { + var description = onException || "callback of async function"; + onException = function (error) { + Meteor._debug( + "Exception in " + description + ":", + error && error.stack || error + ); + }; + } return function (/* arguments */) { var savedValues = currentValues; diff --git a/packages/meteor/dynamics_nodejs.js b/packages/meteor/dynamics_nodejs.js index 7df36a2cd3..b0d68b5e27 100644 --- a/packages/meteor/dynamics_nodejs.js +++ b/packages/meteor/dynamics_nodejs.js @@ -55,14 +55,26 @@ _.extend(Meteor.EnvironmentVariable.prototype, { // return value of the function will be passed through, and no new // fiber will be created.) // -Meteor.bindEnvironment = function (func, onException, _this) { +// `onException` should be a function or a string. When it is a +// function, it is called as a callback when the bound function raises +// an exception. If it is a string, it should be a description of the +// callback, and when an exception is raised a debug message will be +// printed with the description. +Meteor.bindEnvironment = function (func, onException, _this, context) { if (!Fiber.current) throw new Error(noFiberMessage); var boundValues = _.clone(Fiber.current._meteor_dynamics || []); - if (!onException) - throw new Error("onException must be supplied"); + if (!onException || typeof(onException) === 'string') { + var description = onException || "callback of async function"; + onException = function (error) { + Meteor._debug( + "Exception in " + description + ":", + error && error.stack || error + ); + }; + } return function (/* arguments */) { var args = _.toArray(arguments); From 8644363f1a1b1ae7ad4dc278f111bb1ffa20efd8 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Tue, 19 Nov 2013 16:51:35 -0500 Subject: [PATCH 154/190] Meteor.server.onConnection documentation. --- docs/client/api.html | 32 ++++++++++++++++++++++++++++++++ docs/client/api.js | 12 ++++++++++++ docs/client/docs.js | 3 ++- 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/docs/client/api.html b/docs/client/api.html index 0589b8d9af..2b88410d17 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -496,6 +496,38 @@ When you call `Meteor.subscribe`, `Meteor.status`, `Meteor.call`, and `Meteor.apply`, you are using a connection back to that default server. +{{> api_box serverOnConnection}} + +The `onConnection` callback is called only once for a session. When +the session reconnection feature is implemented, a client reconnecting +to the same session won't cause the callback to be called again. + +The callback is called with a single argument, a `SessionHandle`. +The `SessionHandle` is an object containing the following fields: + +
+{{#dtdd name="id" type="String"}} + The session id, unique to this session. +{{/dtdd}} + +{{#dtdd name="close" type="Function"}} + Close this session and the associated DDP connection. The + client is free to reconnect, but will receive a different session if + it does. +{{/dtdd}} + +{{#dtdd name="onClose" type="Function"}} + Register a callback to be called when the session is closed. + + When session reconnections are implemented, the client closing the + DDP connection won't cause the session is close right away because + the client might reconnect; instead the session will close after a + timeout. Once the session has been closed, a reconnect from the + client attempting to reuse the session will receive a new session. +{{/dtdd}} +
+ +

Collections

Meteor stores data in *collections*. To get started, declare a diff --git a/docs/client/api.js b/docs/client/api.js index 9294952289..e06c204b38 100644 --- a/docs/client/api.js +++ b/docs/client/api.js @@ -479,6 +479,18 @@ Template.api.connect = { ] }; +Template.api.serverOnConnection = { + id: "meteor_server_onconnection", + name: "Meteor.server.onConnection(callback)", + locus: "server", + descr: ["Register a callback to be called when a new DDP connection is made to the server."], + args: [ + {name: "callback", + type: "function", + descr: "The function to call when a new DDP connection is received."} + ] +}; + // onAutopublish Template.api.meteor_collection = { diff --git a/docs/client/docs.js b/docs/client/docs.js index f32972da78..6eb015e9a2 100644 --- a/docs/client/docs.js +++ b/docs/client/docs.js @@ -143,7 +143,8 @@ var toc = [ "Meteor.status", "Meteor.reconnect", "Meteor.disconnect", - "DDP.connect" + "DDP.connect", + "Meteor.server.onConnection" ], {name: "Collections", id: "collections"}, [ From 41f44b1ad6fc5818656768d61329427d3bc44ed0 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Wed, 20 Nov 2013 16:13:03 -0500 Subject: [PATCH 155/190] Include the session handle in a method invocation instead of just the session id. Make _sessionData available in a nested method invocation on the server. --- packages/accounts-base/accounts_server.js | 22 +++++++-------- packages/livedata/livedata_common.js | 5 ++-- packages/livedata/livedata_server.js | 11 ++++---- packages/livedata/livedata_server_tests.js | 32 +++++++++++++++++----- 4 files changed, 43 insertions(+), 27 deletions(-) diff --git a/packages/accounts-base/accounts_server.js b/packages/accounts-base/accounts_server.js index a828a87c4b..058cb6d66a 100644 --- a/packages/accounts-base/accounts_server.js +++ b/packages/accounts-base/accounts_server.js @@ -177,23 +177,23 @@ Accounts._getLoginToken = function (methodInvocation) { Accounts._setLoginToken = function (methodInvocation, newToken) { var oldToken = methodInvocation._sessionData.loginToken; methodInvocation._sessionData.loginToken = newToken; - loginTokenChanged(methodInvocation.sessionId, newToken, oldToken); + loginTokenChanged(methodInvocation.session.id, newToken, oldToken); }; // sessionId -> SessionHandle -// XXX Wouldn't be necessary if there was an API to get the session or -// session handle from a session id. -var sessionHandles = {}; +// XXX Wouldn't be necessary if there was an API to get the session +// from a session id via Meteor.server.sessions[sessionId].sessionHandle +var sessions = {}; -Meteor.server.onConnection(function (sessionHandle) { - var sessionId = sessionHandle.id; - sessionHandles[sessionId] = sessionHandle; - sessionHandle.onClose(function () { - var token = sessionHandle._sessionData.loginToken; +Meteor.server.onConnection(function (session) { + var sessionId = session.id; + sessions[sessionId] = session; + session.onClose(function () { + var token = session._sessionData.loginToken; if (token) removeSessionFromToken(token, sessionId); - delete sessionHandles[sessionId]; + delete sessions[sessionId]; }); }); @@ -205,7 +205,7 @@ var closeSessionsForTokens = function (tokens) { _.each(tokens, function (token) { if (_.has(sessionsByLoginToken, token)) { _.each(sessionsByLoginToken[token], function (sessionId) { - sessionHandles[sessionId] && sessionHandles[sessionId].close(); + sessions[sessionId] && sessions[sessionId].close(); }); } }); diff --git a/packages/livedata/livedata_common.js b/packages/livedata/livedata_common.js index 147e85f8ba..fda4a32437 100644 --- a/packages/livedata/livedata_common.js +++ b/packages/livedata/livedata_common.js @@ -29,9 +29,8 @@ MethodInvocation = function (options) { // reruns subscriptions this._setUserId = options.setUserId || function () {}; - // On the server, the session id of the connection this method call - // came in on. - this.sessionId = options.sessionId; + // On the server, the session this method call came in on. + this.session = options.session; // Scratch data scoped to this connection (livedata_connection on the // client, livedata_session on the server). This is only used diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index 92574d89df..45e7098fc3 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -566,7 +566,7 @@ _.extend(Session.prototype, { userId: self.userId, setUserId: setUserId, unblock: unblock, - sessionId: self.id, + session: self.sessionHandle, sessionData: self.sessionData }); try { @@ -1263,23 +1263,22 @@ _.extend(Server.prototype, { var setUserId = function() { throw new Error("Can't call setUserId on a server initiated method call"); }; - var sessionId = null; + var session = null; var currentInvocation = DDP._CurrentInvocation.get(); if (currentInvocation) { userId = currentInvocation.userId; setUserId = function(userId) { currentInvocation.setUserId(userId); }; - sessionId = currentInvocation.sessionId; + session = currentInvocation.session; } var invocation = new MethodInvocation({ isSimulation: false, userId: userId, setUserId: setUserId, - sessionId: sessionId, - // XXX the Server object doesn't have a `sessionData` field. - sessionData: self.sessionData + session: session, + sessionData: session && session._sessionData }); try { var result = DDP._CurrentInvocation.withValue(invocation, function () { diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index b8629f4b73..69f5dc9636 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -61,9 +61,9 @@ var innerCalled = null; Meteor.methods({ livedata_server_test_inner: function () { - var sessionId = this.sessionId; + var self = this; Meteor.defer(function () { - innerCalled(sessionId); + innerCalled(self); }); }, @@ -81,8 +81,8 @@ Tinytest.addAsync( callbackHandle.stop(); sessionId = sessionHandle.id; }); - innerCalled = function (methodSessionId) { - test.equal(methodSessionId, sessionId); + innerCalled = function (methodInvocation) { + test.equal(methodInvocation.session.id, sessionId); onComplete(); }; var connection = DDP.connect(Meteor.absoluteUrl()); @@ -93,15 +93,33 @@ Tinytest.addAsync( Tinytest.addAsync( - "livedata server - sessionId in nested method invocation", + "livedata server - session in nested method invocation", function (test, onComplete) { var sessionId; var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { callbackHandle.stop(); sessionId = sessionHandle.id; }); - innerCalled = function (methodSessionId) { - test.equal(methodSessionId, sessionId); + innerCalled = function (methodInvocation) { + test.equal(methodInvocation.session.id, sessionId); + onComplete(); + }; + var connection = DDP.connect(Meteor.absoluteUrl()); + connection.call('livedata_server_test_outer'); + connection.disconnect(); + } +); + + +Tinytest.addAsync( + "livedata server - session data in nested method invocation", + function (test, onComplete) { + var callbackHandle = Meteor.server.onConnection(function (session) { + callbackHandle.stop(); + session._sessionData.foo = 123; + }); + innerCalled = function (methodInvocation) { + test.equal(methodInvocation._sessionData.foo, 123); onComplete(); }; var connection = DDP.connect(Meteor.absoluteUrl()); From 6462e2dae976588648f364faa3d239fee2da61f2 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Wed, 20 Nov 2013 16:21:50 -0500 Subject: [PATCH 156/190] Alias Meteor.server.onConnection to Meteor.onConnection --- packages/livedata/livedata_server_tests.js | 10 +++++----- packages/livedata/server_convenience.js | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index 69f5dc9636..74561e9ef8 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -4,7 +4,7 @@ Tinytest.addAsync( "livedata server - sessionHandle.onClose()", function (test, onComplete) { var connection; - var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + var callbackHandle = Meteor.onConnection(function (sessionHandle) { callbackHandle.stop(); test.isTrue(_.isString(sessionHandle.id), "sessionHandle.id exists and is a string"); // On the server side, wait for the connection to be closed. @@ -41,7 +41,7 @@ var poll = function (test, onComplete, fn) { Tinytest.addAsync("livedata server - sessionHandle.close()", function (test, onComplete) { var connection; - var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + var callbackHandle = Meteor.onConnection(function (sessionHandle) { callbackHandle.stop(); poll(test, onComplete, function () { @@ -77,7 +77,7 @@ Tinytest.addAsync( "livedata server - sessionId in method invocation", function (test, onComplete) { var sessionId; - var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + var callbackHandle = Meteor.onConnection(function (sessionHandle) { callbackHandle.stop(); sessionId = sessionHandle.id; }); @@ -96,7 +96,7 @@ Tinytest.addAsync( "livedata server - session in nested method invocation", function (test, onComplete) { var sessionId; - var callbackHandle = Meteor.server.onConnection(function (sessionHandle) { + var callbackHandle = Meteor.onConnection(function (sessionHandle) { callbackHandle.stop(); sessionId = sessionHandle.id; }); @@ -114,7 +114,7 @@ Tinytest.addAsync( Tinytest.addAsync( "livedata server - session data in nested method invocation", function (test, onComplete) { - var callbackHandle = Meteor.server.onConnection(function (session) { + var callbackHandle = Meteor.onConnection(function (session) { callbackHandle.stop(); session._sessionData.foo = 123; }); diff --git a/packages/livedata/server_convenience.js b/packages/livedata/server_convenience.js index ed1dc6d05a..ac20e68fae 100644 --- a/packages/livedata/server_convenience.js +++ b/packages/livedata/server_convenience.js @@ -24,14 +24,14 @@ if (Package.webapp) { // Proxy the public methods of Meteor.server so they can // be called directly on Meteor. - _.each(['publish', 'methods', 'call', 'apply'], + _.each(['publish', 'methods', 'call', 'apply', 'onConnection'], function (name) { Meteor[name] = _.bind(Meteor.server[name], Meteor.server); }); } else { // No server? Make these empty/no-ops. Meteor.server = null; - Meteor.refresh = function (notificatio) { + Meteor.refresh = function (notification) { }; } From badb6e9d30bae0eb3b536addc926d6b8884a5f0a Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Wed, 20 Nov 2013 17:28:45 -0500 Subject: [PATCH 157/190] Documentation update. Document `Meteor.onConnection` instead of `Meteor.server.onConnection`. Condense sections about when there is a session reconnect into a single {{note}}. Document the `stop` handle returned by `onConnect`. Document `this.session` in Meteor.methods section. --- docs/client/api.html | 34 ++++++++++++++++++++++++---------- docs/client/api.js | 6 +++--- docs/client/docs.js | 2 +- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/docs/client/api.html b/docs/client/api.html index 2b88410d17..d28a750517 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -276,6 +276,7 @@ invocation object, which provides the following: begin running. * `userId`: the id of the current user. * `setUserId`: a function that associates the current client with a user. +* `session`: on the server, a reference to the [session handle](#meteor_onconnection) of the session this method call came in on. Calling `methods` on the client defines *stub* functions associated with server methods of the same name. You don't have to define a stub for @@ -496,11 +497,11 @@ When you call `Meteor.subscribe`, `Meteor.status`, `Meteor.call`, and `Meteor.apply`, you are using a connection back to that default server. -{{> api_box serverOnConnection}} +{{> api_box onConnection}} -The `onConnection` callback is called only once for a session. When -the session reconnection feature is implemented, a client reconnecting -to the same session won't cause the callback to be called again. +`onConnection` returns an object with a single method `stop`. Calling +`stop` unregisters the callback, so that the callback will no longer +be called on new connections. The callback is called with a single argument, a `SessionHandle`. The `SessionHandle` is an object containing the following fields: @@ -518,15 +519,28 @@ The `SessionHandle` is an object containing the following fields: {{#dtdd name="onClose" type="Function"}} Register a callback to be called when the session is closed. - - When session reconnections are implemented, the client closing the - DDP connection won't cause the session is close right away because - the client might reconnect; instead the session will close after a - timeout. Once the session has been closed, a reconnect from the - client attempting to reuse the session will receive a new session. {{/dtdd}} +{{#note}} +Currently when a client reconnects to the server (such as after +temporarily losing its Internet connection), it will get a new session +each time. + +When session reconnects are implemented, clients will be able to +reconnect to the same session. + +The `onConnection` callback will be called only once for a session. A +client reconnecting to the same session won't cause the callback to be +called again. + +The client closing the connection won't cause the session to close +right away because the client might reconnect; instead the session +will close after a timeout. Once the session has been closed +(including when the session handle `close` method is called), a +reconnect from the client attempting to reuse the session will receive +a new session. +{{/note}}

Collections

diff --git a/docs/client/api.js b/docs/client/api.js index e06c204b38..ca1d73e44c 100644 --- a/docs/client/api.js +++ b/docs/client/api.js @@ -479,9 +479,9 @@ Template.api.connect = { ] }; -Template.api.serverOnConnection = { - id: "meteor_server_onconnection", - name: "Meteor.server.onConnection(callback)", +Template.api.onConnection = { + id: "meteor_onconnection", + name: "Meteor.onConnection(callback)", locus: "server", descr: ["Register a callback to be called when a new DDP connection is made to the server."], args: [ diff --git a/docs/client/docs.js b/docs/client/docs.js index 6eb015e9a2..5d144d4440 100644 --- a/docs/client/docs.js +++ b/docs/client/docs.js @@ -144,7 +144,7 @@ var toc = [ "Meteor.reconnect", "Meteor.disconnect", "DDP.connect", - "Meteor.server.onConnection" + "Meteor.onConnection" ], {name: "Collections", id: "collections"}, [ From 72eded7e2f1086900c3541d4f9969e5ae6a0620d Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Wed, 20 Nov 2013 18:32:41 -0500 Subject: [PATCH 158/190] Check that the connection hasn't been closed already before calling the onConnection callback. --- packages/livedata/livedata_server.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index 45e7098fc3..f876b359eb 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -1106,7 +1106,8 @@ _.extend(Server.prototype, { socket._meteorSession = new Session(self, version, socket); self.sessions[socket._meteorSession.id] = socket._meteorSession; _.each(self.connectionCallbacks, function (callback) { - callback(socket._meteorSession.sessionHandle); + if (socket._meteorSession) + callback(socket._meteorSession.sessionHandle); }); } else if (!msg.version) { // connect message without a version. This means an old (pre-pre1) From 819019f08f9b599e1f7e5b38e3f203298b2dbf57 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Wed, 20 Nov 2013 19:10:53 -0500 Subject: [PATCH 159/190] Mention that session id's are globally unique. --- docs/client/api.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/client/api.html b/docs/client/api.html index d28a750517..14304c28ef 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -508,7 +508,7 @@ The `SessionHandle` is an object containing the following fields:
{{#dtdd name="id" type="String"}} - The session id, unique to this session. + The globally unique session id. {{/dtdd}} {{#dtdd name="close" type="Function"}} From 67a589be8b103d55104f74e3dbccf9ab883552e5 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Thu, 21 Nov 2013 13:03:04 -0500 Subject: [PATCH 160/190] Fix session handle tests so that multiple copies of the test can run at the same time. --- packages/livedata/livedata_server_tests.js | 219 ++++++++++++++------- 1 file changed, 148 insertions(+), 71 deletions(-) diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index 74561e9ef8..a94560e902 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -1,60 +1,135 @@ var Fiber = Npm.require('fibers'); -Tinytest.addAsync( - "livedata server - sessionHandle.onClose()", - function (test, onComplete) { - var connection; - var callbackHandle = Meteor.onConnection(function (sessionHandle) { - callbackHandle.stop(); - test.isTrue(_.isString(sessionHandle.id), "sessionHandle.id exists and is a string"); - // On the server side, wait for the connection to be closed. - sessionHandle.onClose(function () { - onComplete(); - }); - // Close the connection from the client. - connection.disconnect(); - }); - connection = DDP.connect(Meteor.absoluteUrl()); - } -); - // like pollUntil but doesn't have to be called from testAsyncMulti. -var poll = function (test, onComplete, fn) { +// +// Call `fn` periodically until it returns true. If it does, call +// `success`. If it doesn't before the timeout, call `failed`. +// +// An implementation that used fibers would be easier to use, but +// don't want to rule out the possibility of eventually also running +// these tests from the client (which would need an additional +// signaling mechanism to tell the server when to do particular steps +// such as closing the connection on the server side). +var poll = function (fn, success, failed) { var timeout = 10000; var step = 200; var start = (new Date()).valueOf(); var helper = function () { if (fn()) { - test.ok(); - onComplete(); + success(); return; } if (start + timeout < (new Date()).valueOf()) { - test.fail(); - onComplete(); + failed(); return; } Meteor.setTimeout(helper, step); }; helper(); }; - -Tinytest.addAsync("livedata server - sessionHandle.close()", function (test, onComplete) { + + +// Establish a connection from the server to the server, and wait +// until the client side of the connection has received the session +// id. On success call `succeeded` with two arguments, the client +// side `connection` and the server side `session`. Call `failed` on +// failure. +var establishConnection = function (test, succeeded, failed) { + // The connection from the client side. var connection; - var callbackHandle = Meteor.onConnection(function (sessionHandle) { - callbackHandle.stop(); - poll(test, onComplete, function () { - return ! connection.status().connected; - }); + // Track incoming sessions server side until we know which one is + // ours. + var sessions = {}; - // Close the connection from the server. - sessionHandle.close(); + // Add incoming sessions to `sessions`. + var onConnectionHandle = Meteor.onConnection(function (session) { + test.isTrue(_.isString(session.id), "session handle id exists and is a string"); + if (sessions[session.id]) { + test.fail("onConnection callback called multiple times for same session id"); + failed(); + } + else { + sessions[session.id] = session; + } }); - connection = DDP.connect(Meteor.absoluteUrl(), {retry: false}); -}); + // We've succeeded when we get the session id on the client side. + var onClientSessionId = function (sessionId) { + test.isTrue(connection.status().connected); + var session = sessions[sessionId]; + if (! session) { + test.fail("No onConnection received server side for connected client"); + failed(); + } + else { + onConnectionHandle.stop(); + succeeded(connection, session); + } + }; + // Connect and wait until the connection receives its session id. + // Disable retries so that when the connection is closed we don't + // automatically keep reconnecting on the client side. + connection = DDP.connect(Meteor.absoluteUrl(), {retry: false}); + poll( + function () { + return connection._lastSessionId; + }, + function () { + onClientSessionId(connection._lastSessionId); + }, + function () { + test.fail("client side of connection did not receive a session id"); + failed(); + } + ); +}; + +Tinytest.addAsync( + "livedata server - sessionHandle.onClose()", + function (test, onComplete) { + establishConnection( + test, + function (connection, session) { + // On the server side, wait for the connection to be closed. + session.onClose(function () { + onComplete(); + }); + // Close the connection from the client. + connection.disconnect(); + }, + onComplete + ); + } +); + + +Tinytest.addAsync( + "livedata server - sessionHandle.close()", + function (test, onComplete) { + establishConnection( + test, + function (connection, session) { + // Wait for the connection to be closed from the server side. + poll( + function () { + return ! connection.status().connected; + }, + onComplete, + function () { + test.fail("timeout waiting for the connection to be closed on the server side"); + onComplete(); + } + ); + + // Close the connection from the server. + session.close(); + }, + onComplete + ); + } +); var innerCalled = null; @@ -74,20 +149,20 @@ Meteor.methods({ Tinytest.addAsync( - "livedata server - sessionId in method invocation", + "livedata server - session in method invocation", function (test, onComplete) { - var sessionId; - var callbackHandle = Meteor.onConnection(function (sessionHandle) { - callbackHandle.stop(); - sessionId = sessionHandle.id; - }); - innerCalled = function (methodInvocation) { - test.equal(methodInvocation.session.id, sessionId); - onComplete(); - }; - var connection = DDP.connect(Meteor.absoluteUrl()); - connection.call('livedata_server_test_inner'); - connection.disconnect(); + establishConnection( + test, + function (connection, session) { + innerCalled = function (methodInvocation) { + test.equal(methodInvocation.session.id, session.id); + onComplete(); + }; + connection.call('livedata_server_test_inner'); + connection.disconnect(); + }, + onComplete + ); } ); @@ -95,35 +170,37 @@ Tinytest.addAsync( Tinytest.addAsync( "livedata server - session in nested method invocation", function (test, onComplete) { - var sessionId; - var callbackHandle = Meteor.onConnection(function (sessionHandle) { - callbackHandle.stop(); - sessionId = sessionHandle.id; - }); - innerCalled = function (methodInvocation) { - test.equal(methodInvocation.session.id, sessionId); - onComplete(); - }; - var connection = DDP.connect(Meteor.absoluteUrl()); - connection.call('livedata_server_test_outer'); - connection.disconnect(); + establishConnection( + test, + function (connection, session) { + innerCalled = function (methodInvocation) { + test.equal(methodInvocation.session.id, session.id); + onComplete(); + }; + connection.call('livedata_server_test_outer'); + connection.disconnect(); + }, + onComplete + ); } ); - + Tinytest.addAsync( "livedata server - session data in nested method invocation", function (test, onComplete) { - var callbackHandle = Meteor.onConnection(function (session) { - callbackHandle.stop(); - session._sessionData.foo = 123; - }); - innerCalled = function (methodInvocation) { - test.equal(methodInvocation._sessionData.foo, 123); - onComplete(); - }; - var connection = DDP.connect(Meteor.absoluteUrl()); - connection.call('livedata_server_test_outer'); - connection.disconnect(); + establishConnection( + test, + function (connection, session) { + session._sessionData.foo = 123; + innerCalled = function (methodInvocation) { + test.equal(methodInvocation._sessionData.foo, 123); + onComplete(); + }; + connection.call('livedata_server_test_outer'); + connection.disconnect(); + }, + onComplete + ); } ); From 26e9fad0965a2f2d3597fda98c93f5943f826c4e Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Sat, 23 Nov 2013 13:04:58 -0500 Subject: [PATCH 161/190] In the docs, refer to the public object as a "session" instead of a "session handle". --- docs/client/api.html | 14 +++++++------- docs/client/api.js | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/client/api.html b/docs/client/api.html index 14304c28ef..c3fb3b9749 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -276,7 +276,7 @@ invocation object, which provides the following: begin running. * `userId`: the id of the current user. * `setUserId`: a function that associates the current client with a user. -* `session`: on the server, a reference to the [session handle](#meteor_onconnection) of the session this method call came in on. +* `session`: on the server, the [session](#meteor_onconnection) this method call came in on. Calling `methods` on the client defines *stub* functions associated with server methods of the same name. You don't have to define a stub for @@ -500,11 +500,11 @@ server. {{> api_box onConnection}} `onConnection` returns an object with a single method `stop`. Calling -`stop` unregisters the callback, so that the callback will no longer +`stop` unregisters the callback, so that this callback will no longer be called on new connections. -The callback is called with a single argument, a `SessionHandle`. -The `SessionHandle` is an object containing the following fields: +The callback is called with a single argument, the `session`. +The session is an object containing the following fields:
{{#dtdd name="id" type="String"}} @@ -537,9 +537,9 @@ called again. The client closing the connection won't cause the session to close right away because the client might reconnect; instead the session will close after a timeout. Once the session has been closed -(including when the session handle `close` method is called), a -reconnect from the client attempting to reuse the session will receive -a new session. +(including when the session `close` method is called), a reconnect +from the client attempting to reuse the session will receive a new +session instead. {{/note}}

Collections

diff --git a/docs/client/api.js b/docs/client/api.js index ca1d73e44c..7dad578ce2 100644 --- a/docs/client/api.js +++ b/docs/client/api.js @@ -487,7 +487,7 @@ Template.api.onConnection = { args: [ {name: "callback", type: "function", - descr: "The function to call when a new DDP connection is received."} + descr: "The function to call with the session when a new DDP connection is established."} ] }; From c1d97fde7cbaa0921c3bcfbaea294f0c2673fccb Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Sat, 23 Nov 2013 13:34:31 -0500 Subject: [PATCH 162/190] Also make the session available in publish functions. --- docs/client/api.html | 2 ++ docs/client/api.js | 8 ++++++ docs/client/docs.js | 3 ++- packages/livedata/livedata_server.js | 1 + packages/livedata/livedata_server_tests.js | 30 ++++++++++++++++++++++ 5 files changed, 43 insertions(+), 1 deletion(-) diff --git a/docs/client/api.html b/docs/client/api.html index c3fb3b9749..d90238bfb9 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -175,6 +175,8 @@ publish handler, this is the place to stop the observes. {{> api_box subscription_error}} {{> api_box subscription_stop}} +{{> api_box subscription_session}} + {{> api_box subscribe}} When you subscribe to a record set, it tells the server to send records to the diff --git a/docs/client/api.js b/docs/client/api.js index 7dad578ce2..d4c71779d4 100644 --- a/docs/client/api.js +++ b/docs/client/api.js @@ -318,6 +318,14 @@ Template.api.subscription_userId = { }; +Template.api.subscription_session = { + id: "publish_session", + name: "this.session", + locus: "Server", + descr: ["Access inside the publish function. The [session](#meteor_onconnection) this subscription came in on."] +}; + + Template.api.subscribe = { id: "meteor_subscribe", name: "Meteor.subscribe(name [, arg1, arg2, ... ] [, callbacks])", diff --git a/docs/client/docs.js b/docs/client/docs.js index 5d144d4440..388eaef3ed 100644 --- a/docs/client/docs.js +++ b/docs/client/docs.js @@ -122,7 +122,8 @@ var toc = [ {instance: "this", name: "ready", id: "publish_ready"}, {instance: "this", name: "onStop", id: "publish_onstop"}, {instance: "this", name: "error", id: "publish_error"}, - {instance: "this", name: "stop", id: "publish_stop"} + {instance: "this", name: "stop", id: "publish_stop"}, + {instance: "this", name: "session", id: "publish_session"} ], "Meteor.subscribe" ], diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index f876b359eb..a3e3a97a68 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -741,6 +741,7 @@ var Subscription = function ( session, handler, subscriptionId, params, name) { var self = this; self._session = session; // type is Session + self.session = session.sessionHandle; // public API object self._handler = handler; diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index a94560e902..997c766a9b 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -204,3 +204,33 @@ Tinytest.addAsync( ); } ); + + +// sessionId -> callback +var onSubscription = {}; + +Meteor.publish("livedata_server_test_sub", function (sessionId) { + var callback = onSubscription[sessionId]; + if (callback) + callback(this); + this.stop(); +}); + + +Tinytest.addAsync( + "livedata server - session in publish function", + function (test, onComplete) { + establishConnection( + test, + function (connection, session) { + onSubscription[session.id] = function (subscription) { + delete onSubscription[session.id]; + test.equal(subscription.session.id, session.id); + connection.disconnect(); + onComplete(); + }; + connection.subscribe("livedata_server_test_sub", session.id); + } + ); + } +); From 62fe9c62e5bd44e9c4f70caff8717d02382763e1 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Mon, 25 Nov 2013 11:42:07 -0500 Subject: [PATCH 163/190] Add a "this.session" entry for methods in the docs. Pull out the session object into its own documentation section. --- docs/client/api.html | 88 ++++++++++++++++++++++---------------------- docs/client/api.js | 31 +++++++++++++++- docs/client/docs.js | 8 ++-- 3 files changed, 78 insertions(+), 49 deletions(-) diff --git a/docs/client/api.html b/docs/client/api.html index d90238bfb9..67c2333c91 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -278,7 +278,8 @@ invocation object, which provides the following: begin running. * `userId`: the id of the current user. * `setUserId`: a function that associates the current client with a user. -* `session`: on the server, the [session](#meteor_onconnection) this method call came in on. +* `session`: on the server, the [session](#ddp_session) this method +call came in on. Calling `methods` on the client defines *stub* functions associated with server methods of the same name. You don't have to define a stub for @@ -331,6 +332,8 @@ invocation from a client won't start until the Nth invocation returns. However, you can change this by calling `this.unblock`. This will allow the N+1th invocation to start running in a new fiber. +{{> api_box method_invocation_session}} + {{> api_box error}} If you want to return an error from a method, throw an exception. Methods can @@ -465,6 +468,40 @@ and resume data transfer. This can be used to save battery on mobile devices when real time updates are not required. + +{{> api_box onConnection}} + +`onConnection` returns an object with a single method `stop`. Calling +`stop` unregisters the callback, so that this callback will no longer +be called on new connections. + +The callback is called with a single argument, the server-side +[session](#ddp_session) representing the connection from the client. + + +{{> api_box ddp_session}} + +{{#note}} +Currently when a client reconnects to the server (such as after +temporarily losing its Internet connection), it will get a new session +each time. + +When session reconnects are implemented, clients will be able to +reconnect to the same session. + +The `Meteor.onConnection` callback will be called only once for a +session. A client reconnecting to the same session won't cause the +callback to be called again. + +The client closing the connection won't cause the session to close +right away because the client might reconnect; instead the session +will close after a timeout. Once the session has been closed +(including when the session `close` method is called), a reconnect +from the client attempting to reuse the session will receive a new +session instead. +{{/note}} + + {{> api_box connect}} To call methods on another Meteor application or subscribe to its data @@ -499,50 +536,6 @@ When you call `Meteor.subscribe`, `Meteor.status`, `Meteor.call`, and `Meteor.apply`, you are using a connection back to that default server. -{{> api_box onConnection}} - -`onConnection` returns an object with a single method `stop`. Calling -`stop` unregisters the callback, so that this callback will no longer -be called on new connections. - -The callback is called with a single argument, the `session`. -The session is an object containing the following fields: - -
-{{#dtdd name="id" type="String"}} - The globally unique session id. -{{/dtdd}} - -{{#dtdd name="close" type="Function"}} - Close this session and the associated DDP connection. The - client is free to reconnect, but will receive a different session if - it does. -{{/dtdd}} - -{{#dtdd name="onClose" type="Function"}} - Register a callback to be called when the session is closed. -{{/dtdd}} -
- -{{#note}} -Currently when a client reconnects to the server (such as after -temporarily losing its Internet connection), it will get a new session -each time. - -When session reconnects are implemented, clients will be able to -reconnect to the same session. - -The `onConnection` callback will be called only once for a session. A -client reconnecting to the same session won't cause the callback to be -called again. - -The client closing the connection won't cause the session to close -right away because the client might reconnect; instead the session -will close after a timeout. Once the session has been closed -(including when the session `close` method is called), a reconnect -from the client attempting to reuse the session will receive a new -session instead. -{{/note}}

Collections

@@ -3118,6 +3111,11 @@ code can read `data.txt` by running: {{> api_box_args options}} {{/if}} +{{#if fields}} +

Fields

+{{> api_box_args fields}} +{{/if}} + {{#if body}} {{#better_markdown}}{{{body}}}{{/better_markdown}} {{/if}} diff --git a/docs/client/api.js b/docs/client/api.js index d4c71779d4..89a4cf0c75 100644 --- a/docs/client/api.js +++ b/docs/client/api.js @@ -322,7 +322,7 @@ Template.api.subscription_session = { id: "publish_session", name: "this.session", locus: "Server", - descr: ["Access inside the publish function. The [session](#meteor_onconnection) this subscription came in on."] + descr: ["Access inside the publish function. The [session](#ddp_session) this subscription came in on."] }; @@ -389,6 +389,13 @@ Template.api.method_invocation_isSimulation = { descr: ["Access inside a method invocation. Boolean value, true if this invocation is a stub."] }; +Template.api.method_invocation_session = { + id: "method_session", + name: "this.session", + locus: "Server", + descr: ["Access inside a method invocation. The [session](#meteor_onconnection) this method call came in on."] +}; + Template.api.error = { id: "meteor_error", name: "new Meteor.Error(error, reason, details)", @@ -499,6 +506,28 @@ Template.api.onConnection = { ] }; +Template.api.ddp_session = { + id: "ddp_session", + name: "DDP.Session", + locus: "Server", + descr: ["On the server, this object represents a connection from a client."], + fields: [ + {name: "id", + type: "String", + descr: "The globally unique session id." + }, + {name: "close", + type: "Function", + descr: "Close this session and the associated DDP connection. The client is free to reconnect, but will receive a different session if it does." + }, + {name: "onClose", + type: "Function", + descr: "Register a callback to be called when the session is closed." + } + ] +}; + + // onAutopublish Template.api.meteor_collection = { diff --git a/docs/client/docs.js b/docs/client/docs.js index 388eaef3ed..6fafc02dbd 100644 --- a/docs/client/docs.js +++ b/docs/client/docs.js @@ -133,7 +133,8 @@ var toc = [ {instance: "this", name: "userId", id: "method_userId"}, {instance: "this", name: "setUserId", id: "method_setUserId"}, {instance: "this", name: "isSimulation", id: "method_issimulation"}, - {instance: "this", name: "unblock", id: "method_unblock"} + {instance: "this", name: "unblock", id: "method_unblock"}, + {instance: "this", name: "session", id: "method_session"} ], "Meteor.Error", "Meteor.call", @@ -144,8 +145,9 @@ var toc = [ "Meteor.status", "Meteor.reconnect", "Meteor.disconnect", - "DDP.connect", - "Meteor.onConnection" + "Meteor.onConnection", + "DDP.Session", + "DDP.connect" ], {name: "Collections", id: "collections"}, [ From 614d91b6b02bf34a334c76928a8fdd1d10ea3f88 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Mon, 25 Nov 2013 13:19:56 -0500 Subject: [PATCH 164/190] Simplify callers of bindEnvironment by using the new string argument, when the error callback only needs to print the exception. Replace unsafe references to `err.stack` with `err && err.stack || err`. This avoids throwing a secondary exception if the original exception in `err` isn't an object (`throw(null)` and `throw(undefined)` are legal in JavaScript), and also displays the error object if the stack trace wasn't included. --- packages/livedata/crossbar.js | 7 +++--- packages/livedata/livedata_connection.js | 10 ++++---- packages/livedata/livedata_server.js | 31 ++++++------------------ packages/meteor/dynamics_nodejs.js | 2 +- packages/meteor/fiber_helpers.js | 2 +- packages/meteor/helpers.js | 2 +- packages/meteor/timers.js | 5 +--- packages/mongo-livedata/mongo_driver.js | 4 +-- packages/webapp/webapp_server.js | 2 +- 9 files changed, 22 insertions(+), 43 deletions(-) diff --git a/packages/livedata/crossbar.js b/packages/livedata/crossbar.js index 358ddd5a22..1600b4379b 100644 --- a/packages/livedata/crossbar.js +++ b/packages/livedata/crossbar.js @@ -65,10 +65,9 @@ _.extend(DDPServer._Crossbar.prototype, { }); if (onComplete) - onComplete = Meteor.bindEnvironment(onComplete, function (e) { - Meteor._debug("Exception in Crossbar fire complete " + - "callback", e.stack); - }); + onComplete = Meteor.bindEnvironment( + onComplete, + "Crossbar fire complete callback"); var outstanding = callbacks.length; if (!outstanding) diff --git a/packages/livedata/livedata_connection.js b/packages/livedata/livedata_connection.js index 8667a18022..c65de81cf6 100644 --- a/packages/livedata/livedata_connection.js +++ b/packages/livedata/livedata_connection.js @@ -593,11 +593,11 @@ _.extend(Connection.prototype, { if (callback) { // XXX would it be better form to do the binding in stream.on, // or caller, instead of here? - callback = Meteor.bindEnvironment(callback, function (e) { - // XXX improve error message (and how we report it) - Meteor._debug("Exception while delivering result of invoking '" + - name + "'", e, e.stack); - }); + // XXX improve error message (and how we report it) + callback = Meteor.bindEnvironment( + callback, + "delivering result of invoking '" + name + "'" + ); } // Lazily allocate method ID once we know that it'll be needed. diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index a3e3a97a68..d9f879ce57 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -262,16 +262,9 @@ var Session = function (server, version, socket) { self.server._closeSession(self); }, onClose: function (fn) { - fn = Meteor.bindEnvironment( - fn, - function (err) { - Meteor._debug( - "Exception in connection session onClose callback", - err && err.stack - ); - } + self._closeCallbacks.push( + Meteor.bindEnvironment(fn, "connection session onClose callback") ); - self._closeCallbacks.push(fn); }, _sessionData: self.sessionData }; @@ -1077,15 +1070,7 @@ _.extend(Server.prototype, { onConnection: function (fn) { var self = this; - fn = Meteor.bindEnvironment( - fn, - function (err) { - Meteor._debug( - "Exception in Meteor.server.onConnection callback", - err && err.stack - ); - } - ); + fn = Meteor.bindEnvironment(fn, "onConnection callback"); self.connectionCallbacks.push(fn); @@ -1246,11 +1231,11 @@ _.extend(Server.prototype, { // It's not really necessary to do this, since we immediately // run the callback in this fiber before returning, but we do it // anyway for regularity. - callback = Meteor.bindEnvironment(callback, function (e) { - // XXX improve error message (and how we report it) - Meteor._debug("Exception while delivering result of invoking '" + - name + "'", e.stack); - }); + // XXX improve error message (and how we report it) + callback = Meteor.bindEnvironment( + callback, + "delivering result of invoking '" + name + "'" + ); // Run the handler var handler = self.method_handlers[name]; diff --git a/packages/meteor/dynamics_nodejs.js b/packages/meteor/dynamics_nodejs.js index b0d68b5e27..4d19eb9422 100644 --- a/packages/meteor/dynamics_nodejs.js +++ b/packages/meteor/dynamics_nodejs.js @@ -60,7 +60,7 @@ _.extend(Meteor.EnvironmentVariable.prototype, { // an exception. If it is a string, it should be a description of the // callback, and when an exception is raised a debug message will be // printed with the description. -Meteor.bindEnvironment = function (func, onException, _this, context) { +Meteor.bindEnvironment = function (func, onException, _this) { if (!Fiber.current) throw new Error(noFiberMessage); diff --git a/packages/meteor/fiber_helpers.js b/packages/meteor/fiber_helpers.js index c071674e91..3b9cf53c10 100644 --- a/packages/meteor/fiber_helpers.js +++ b/packages/meteor/fiber_helpers.js @@ -71,7 +71,7 @@ _.extend(Meteor._SynchronousQueue.prototype, { var fut = new Future; var handle = { task: Meteor.bindEnvironment(task, function (e) { - Meteor._debug("Exception from task:", e ? e.stack : e); + Meteor._debug("Exception from task:", e && e.stack || e); throw e; }), future: fut, diff --git a/packages/meteor/helpers.js b/packages/meteor/helpers.js index 7edd152bcc..9b05246669 100644 --- a/packages/meteor/helpers.js +++ b/packages/meteor/helpers.js @@ -105,7 +105,7 @@ _.extend(Meteor, { callback = fut.resolver(); } } - newArgs.push(Meteor.bindEnvironment(callback, logErr)); + newArgs.push(Meteor.bindEnvironment(callback)); var result = fn.apply(self, newArgs); if (fut) return fut.wait(); diff --git a/packages/meteor/timers.js b/packages/meteor/timers.js index 68411cbaf5..f522701e05 100644 --- a/packages/meteor/timers.js +++ b/packages/meteor/timers.js @@ -10,10 +10,7 @@ var withoutInvocation = function (f) { }; var bindAndCatch = function (context, f) { - return Meteor.bindEnvironment(withoutInvocation(f), function (e) { - // XXX report nicely (or, should we catch it at all?) - Meteor._debug("Exception from " + context + ":", e, e.stack); - }); + return Meteor.bindEnvironment(withoutInvocation(f), context); }; _.extend(Meteor, { diff --git a/packages/mongo-livedata/mongo_driver.js b/packages/mongo-livedata/mongo_driver.js index 5fd2f7d2b6..bb258e55dd 100644 --- a/packages/mongo-livedata/mongo_driver.js +++ b/packages/mongo-livedata/mongo_driver.js @@ -263,9 +263,7 @@ var writeCallback = function (write, refresh, callback) { }; var bindEnvironmentForWrite = function (callback) { - return Meteor.bindEnvironment(callback, function (err) { - Meteor._debug("Error in Mongo write:", err.stack); - }); + return Meteor.bindEnvironment(callback, "Mongo write"); }; MongoConnection.prototype._insert = function (collection_name, document, diff --git a/packages/webapp/webapp_server.js b/packages/webapp/webapp_server.js index 22f7a2e269..51a07ab9aa 100644 --- a/packages/webapp/webapp_server.js +++ b/packages/webapp/webapp_server.js @@ -575,7 +575,7 @@ var runWebAppServer = function () { }, function (e) { console.error("Error listening:", e); - console.error(e.stack); + console.error(e && e.stack); })); if (argv.keepalive) From 46afaec2151f7995a9754a2b7b604d4a738b2db9 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Tue, 26 Nov 2013 11:32:26 -0500 Subject: [PATCH 165/190] Remove sessionData. Add tests to check that account data is cleaned up after a session closes. Make `establishConnection` available to account tests. Remove code duplication between `poll` (now called `simplePoll`) and the async_multi `pollUntil`. --- packages/accounts-base/accounts_server.js | 105 +++++++++-------- packages/accounts-base/accounts_tests.js | 22 ++++ packages/accounts-base/package.js | 1 + packages/accounts-password/password_server.js | 29 +++-- packages/accounts-password/password_tests.js | 32 ++++++ packages/livedata/livedata_common.js | 6 - packages/livedata/livedata_connection.js | 7 +- packages/livedata/livedata_server.js | 13 +-- packages/livedata/livedata_server_tests.js | 107 +----------------- packages/test-helpers/async_multi.js | 26 ++++- packages/test-helpers/connection.js | 56 +++++++++ packages/test-helpers/package.js | 4 +- 12 files changed, 212 insertions(+), 196 deletions(-) create mode 100644 packages/test-helpers/connection.js diff --git a/packages/accounts-base/accounts_server.js b/packages/accounts-base/accounts_server.js index 058cb6d66a..af8d0708dc 100644 --- a/packages/accounts-base/accounts_server.js +++ b/packages/accounts-base/accounts_server.js @@ -78,14 +78,14 @@ Meteor.methods({ var result = tryAllLoginHandlers(options); if (result !== null) { this.setUserId(result.id); - Accounts._setLoginToken(this, result.token); + Accounts._setLoginToken(this.session.id, result.token); } return result; }, logout: function() { - var token = Accounts._getLoginToken(this); - Accounts._setLoginToken(this, null); + var token = Accounts._getLoginToken(this.session.id); + Accounts._setLoginToken(this.session.id, null); if (token && this.userId) removeLoginToken(this.userId, token); this.setUserId(null); @@ -139,6 +139,35 @@ Meteor.methods({ } }); +/// +/// ACCOUNT DATA +/// + +// sessionId -> {session, loginToken, srpChallenge} +var accountData = {}; + +Accounts._getAccountData = function (sessionId, field) { + var data = accountData[sessionId]; + return data && data[field]; +}; + +Accounts._setAccountData = function (sessionId, field, value) { + var data = accountData[sessionId]; + if (data === undefined) + delete data[field]; + else + data[field] = value; +}; + +Meteor.server.onConnection(function (session) { + accountData[session.id] = {session: session}; + session.onClose(function () { + removeSessionFromToken(session.id); + delete accountData[session.id]; + }); +}); + + /// /// RECONNECT TOKENS /// @@ -147,21 +176,33 @@ Meteor.methods({ // token -> list of session ids var sessionsByLoginToken = {}; -// Remove the session from the list of open sessions for the token. -var removeSessionFromToken = function (token, sessionId) { - sessionsByLoginToken[token] = _.without( - sessionsByLoginToken[token], - sessionId - ); - if (_.isEmpty(sessionsByLoginToken[token])) - delete sessionsByLoginToken[token]; +// test hook +Accounts._getTokenSessions = function (token) { + return sessionsByLoginToken[token]; }; -var loginTokenChanged = function (sessionId, newToken, oldToken) { - var self = this; - if (oldToken) { - removeSessionFromToken(oldToken, sessionId); +// Remove the session from the list of open sessions for the token. +var removeSessionFromToken = function (sessionId) { + var token = Accounts._getLoginToken(sessionId); + if (token) { + sessionsByLoginToken[token] = _.without( + sessionsByLoginToken[token], + sessionId + ); + if (_.isEmpty(sessionsByLoginToken[token])) + delete sessionsByLoginToken[token]; } +}; + +Accounts._getLoginToken = function (sessionId) { + return Accounts._getAccountData(sessionId, 'loginToken'); +}; + +Accounts._setLoginToken = function (sessionId, newToken) { + removeSessionFromToken(sessionId); + + Accounts._setAccountData(sessionId, 'loginToken', newToken); + if (newToken) { if (! _.has(sessionsByLoginToken, newToken)) sessionsByLoginToken[newToken] = []; @@ -169,43 +210,15 @@ var loginTokenChanged = function (sessionId, newToken, oldToken) { } }; - -Accounts._getLoginToken = function (methodInvocation) { - return methodInvocation._sessionData.loginToken; -}; - -Accounts._setLoginToken = function (methodInvocation, newToken) { - var oldToken = methodInvocation._sessionData.loginToken; - methodInvocation._sessionData.loginToken = newToken; - loginTokenChanged(methodInvocation.session.id, newToken, oldToken); -}; - - -// sessionId -> SessionHandle -// XXX Wouldn't be necessary if there was an API to get the session -// from a session id via Meteor.server.sessions[sessionId].sessionHandle -var sessions = {}; - -Meteor.server.onConnection(function (session) { - var sessionId = session.id; - sessions[sessionId] = session; - session.onClose(function () { - var token = session._sessionData.loginToken; - if (token) - removeSessionFromToken(token, sessionId); - delete sessions[sessionId]; - }); -}); - - - // Close all open sessions associated with any of the tokens in // `tokens`. var closeSessionsForTokens = function (tokens) { _.each(tokens, function (token) { if (_.has(sessionsByLoginToken, token)) { _.each(sessionsByLoginToken[token], function (sessionId) { - sessions[sessionId] && sessions[sessionId].close(); + var session = Accounts._getAccountData(sessionId, 'session'); + if (session) + session.close(); }); } }); diff --git a/packages/accounts-base/accounts_tests.js b/packages/accounts-base/accounts_tests.js index 5326704e43..492dafd873 100644 --- a/packages/accounts-base/accounts_tests.js +++ b/packages/accounts-base/accounts_tests.js @@ -208,3 +208,25 @@ Tinytest.addAsync('accounts - expire numeric token', function (test, onComplete) }); Accounts._expireTokens(new Date(), result.id); }); + + +Tinytest.addAsync( + 'accounts - session data cleaned up', + function (test, onComplete) { + establishConnection( + test, + function (connection, session) { + // onClose callbacks are called in order, so we run after the + // close callback in accounts. + session.onClose(function () { + test.isFalse(Accounts._getAccountData(session.id, 'session')); + onComplete(); + }); + + test.isTrue(Accounts._getAccountData(session.id, 'session')); + session.close(); + }, + onComplete + ); + } +); diff --git a/packages/accounts-base/package.js b/packages/accounts-base/package.js index 871fb0c444..0afdb5a07e 100644 --- a/packages/accounts-base/package.js +++ b/packages/accounts-base/package.js @@ -45,5 +45,6 @@ Package.on_test(function (api) { api.use('accounts-base'); api.use('tinytest'); api.use('random'); + api.use('test-helpers'); api.add_files('accounts_tests.js', 'server'); }); diff --git a/packages/accounts-password/password_server.js b/packages/accounts-password/password_server.js index 5488ef2914..9e8e9070e7 100644 --- a/packages/accounts-password/password_server.js +++ b/packages/accounts-password/password_server.js @@ -65,11 +65,10 @@ Meteor.methods({beginPasswordExchange: function (request) { var srp = new SRP.Server(verifier); var challenge = srp.issueChallenge({A: request.A}); - // save off results in the current session so we can verify them - // later. - this._sessionData.srpChallenge = - { userId: user._id, M: srp.M, HAMK: srp.HAMK }; - + // Save results so we can verify them later. + Accounts._setAccountData(this.session.id, 'srpChallenge', + { userId: user._id, M: srp.M, HAMK: srp.HAMK } + ); return challenge; }}); @@ -83,11 +82,11 @@ Accounts.registerLoginHandler(function (options) { // we're always called from within a 'login' method, so this should // be safe. var currentInvocation = DDP._CurrentInvocation.get(); - var serialized = currentInvocation._sessionData.srpChallenge; + var serialized = Accounts._getAccountData(currentInvocation.session.id, 'srpChallenge'); if (!serialized || serialized.M !== options.srp.M) throw new Meteor.Error(403, "Incorrect password"); // Only can use challenges once. - delete currentInvocation._sessionData.srpChallenge; + Accounts._setAccountData(currentInvocation.session.id, 'srpChallenge', undefined); var userId = serialized.userId; var user = Meteor.users.findOne(userId); @@ -168,14 +167,14 @@ Meteor.methods({changePassword: function (options) { password: Match.Optional(String) }); - var serialized = this._sessionData.srpChallenge; + var serialized = Accounts._getAccountData(this.session.id, 'srpChallenge'); if (!serialized || serialized.M !== options.M) throw new Meteor.Error(403, "Incorrect password"); if (serialized.userId !== this.userId) // No monkey business! throw new Meteor.Error(403, "Incorrect password"); // Only can use challenges once. - delete this._sessionData.srpChallenge; + Accounts._setAccountData(this.session.id, 'srpChallenge', undefined); var verifier = options.srp; if (!verifier && options.password) { @@ -320,8 +319,8 @@ Meteor.methods({resetPassword: function (token, newVerifier) { // logged in as. Make sure to avoid logging ourselves out if this // happens. But also make sure not to leave the connection in a state // of having a bad token set if things fail. - var oldToken = Accounts._getLoginToken(this); - Accounts._setLoginToken(this, null); + var oldToken = Accounts._getLoginToken(this.session.id); + Accounts._setLoginToken(this.session.id, null); try { // Update the user record by: @@ -338,11 +337,11 @@ Meteor.methods({resetPassword: function (token, newVerifier) { }); } catch (err) { // update failed somehow. reset to old token. - Accounts._setLoginToken(this, oldToken); + Accounts._setLoginToken(this.session.id, oldToken); throw err; } - Accounts._setLoginToken(this, stampedLoginToken.token); + Accounts._setLoginToken(this.session.id, stampedLoginToken.token); this.setUserId(user._id); return { @@ -436,7 +435,7 @@ Meteor.methods({verifyEmail: function (token) { $push: {'services.resume.loginTokens': stampedLoginToken}}); this.setUserId(user._id); - Accounts._setLoginToken(this, stampedLoginToken.token); + Accounts._setLoginToken(this.session.id, stampedLoginToken.token); return { token: stampedLoginToken.token, tokenExpires: Accounts._tokenExpiration(stampedLoginToken.when), @@ -515,7 +514,7 @@ Meteor.methods({createUser: function (options) { // client gets logged in as the new user afterwards. this.setUserId(result.id); - Accounts._setLoginToken(this, result.token); + Accounts._setLoginToken(this.session.id, result.token); return result; }}); diff --git a/packages/accounts-password/password_tests.js b/packages/accounts-password/password_tests.js index b8d0417221..d4e1c70e61 100644 --- a/packages/accounts-password/password_tests.js +++ b/packages/accounts-password/password_tests.js @@ -547,4 +547,36 @@ if (Meteor.isServer) (function () { }); // XXX would be nice to test Accounts.config({forbidClientAccountCreation: true}) + + Tinytest.addAsync( + 'passwords - login tokens cleaned up', + function (test, onComplete) { + var username = Random.id(); + Accounts.createUser({ + username: username, + password: 'password' + }); + + establishConnection( + test, + function (connection, session) { + session.onClose(function () { + test.isFalse(_.contains(Accounts._getTokenSessions(token), session.id)); + onComplete(); + }); + var result = connection.call('login', { + user: {username: username}, + password: 'password' + }); + test.isTrue(result); + var token = Accounts._getAccountData(session.id, 'loginToken'); + test.isTrue(token); + test.equal(result.token, token); + test.isTrue(_.contains(Accounts._getTokenSessions(token), session.id)); + connection.disconnect(); + }, + onComplete + ); + } + ); }) (); diff --git a/packages/livedata/livedata_common.js b/packages/livedata/livedata_common.js index fda4a32437..cd63594040 100644 --- a/packages/livedata/livedata_common.js +++ b/packages/livedata/livedata_common.js @@ -31,12 +31,6 @@ MethodInvocation = function (options) { // On the server, the session this method call came in on. this.session = options.session; - - // Scratch data scoped to this connection (livedata_connection on the - // client, livedata_session on the server). This is only used - // internally, but we should have real and documented API for this - // sort of thing someday. - this._sessionData = options.sessionData; }; _.extend(MethodInvocation.prototype, { diff --git a/packages/livedata/livedata_connection.js b/packages/livedata/livedata_connection.js index c65de81cf6..7622b9361a 100644 --- a/packages/livedata/livedata_connection.js +++ b/packages/livedata/livedata_connection.js @@ -153,10 +153,6 @@ var Connection = function (url, options) { // an error) self._subscriptions = {}; - // Per-connection scratch area. This is only used internally, but we - // should have real and documented API for this sort of thing someday. - self._sessionData = {}; - // Reactive userId. self._userId = null; self._userIdDeps = (typeof Deps !== "undefined") && new Deps.Dependency; @@ -632,8 +628,7 @@ _.extend(Connection.prototype, { }; var invocation = new MethodInvocation({ isSimulation: true, - userId: self.userId(), setUserId: setUserId, - sessionData: self._sessionData + userId: self.userId(), setUserId: setUserId }); if (!alreadyInSimulation) diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index d9f879ce57..3e2c604dec 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -232,10 +232,6 @@ var Session = function (server, version, socket) { self.userId = null; - // Per-connection scratch area. This is only used internally, but we - // should have real and documented API for this sort of thing someday. - self.sessionData = {}; - self.collectionViews = {}; // Set this to false to not send messages when collectionViews are @@ -265,8 +261,7 @@ var Session = function (server, version, socket) { self._closeCallbacks.push( Meteor.bindEnvironment(fn, "connection session onClose callback") ); - }, - _sessionData: self.sessionData + } }; socket.send(stringifyDDP({msg: 'connected', @@ -559,8 +554,7 @@ _.extend(Session.prototype, { userId: self.userId, setUserId: setUserId, unblock: unblock, - session: self.sessionHandle, - sessionData: self.sessionData + session: self.sessionHandle }); try { var result = DDPServer._CurrentWriteFence.withValue(fence, function () { @@ -1264,8 +1258,7 @@ _.extend(Server.prototype, { isSimulation: false, userId: userId, setUserId: setUserId, - session: session, - sessionData: session && session._sessionData + session: session }); try { var result = DDP._CurrentInvocation.withValue(invocation, function () { diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index 997c766a9b..b9c2847739 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -1,90 +1,5 @@ var Fiber = Npm.require('fibers'); -// like pollUntil but doesn't have to be called from testAsyncMulti. -// -// Call `fn` periodically until it returns true. If it does, call -// `success`. If it doesn't before the timeout, call `failed`. -// -// An implementation that used fibers would be easier to use, but -// don't want to rule out the possibility of eventually also running -// these tests from the client (which would need an additional -// signaling mechanism to tell the server when to do particular steps -// such as closing the connection on the server side). -var poll = function (fn, success, failed) { - var timeout = 10000; - var step = 200; - var start = (new Date()).valueOf(); - var helper = function () { - if (fn()) { - success(); - return; - } - if (start + timeout < (new Date()).valueOf()) { - failed(); - return; - } - Meteor.setTimeout(helper, step); - }; - helper(); -}; - - -// Establish a connection from the server to the server, and wait -// until the client side of the connection has received the session -// id. On success call `succeeded` with two arguments, the client -// side `connection` and the server side `session`. Call `failed` on -// failure. -var establishConnection = function (test, succeeded, failed) { - // The connection from the client side. - var connection; - - // Track incoming sessions server side until we know which one is - // ours. - var sessions = {}; - - // Add incoming sessions to `sessions`. - var onConnectionHandle = Meteor.onConnection(function (session) { - test.isTrue(_.isString(session.id), "session handle id exists and is a string"); - if (sessions[session.id]) { - test.fail("onConnection callback called multiple times for same session id"); - failed(); - } - else { - sessions[session.id] = session; - } - }); - - // We've succeeded when we get the session id on the client side. - var onClientSessionId = function (sessionId) { - test.isTrue(connection.status().connected); - var session = sessions[sessionId]; - if (! session) { - test.fail("No onConnection received server side for connected client"); - failed(); - } - else { - onConnectionHandle.stop(); - succeeded(connection, session); - } - }; - - // Connect and wait until the connection receives its session id. - // Disable retries so that when the connection is closed we don't - // automatically keep reconnecting on the client side. - connection = DDP.connect(Meteor.absoluteUrl(), {retry: false}); - poll( - function () { - return connection._lastSessionId; - }, - function () { - onClientSessionId(connection._lastSessionId); - }, - function () { - test.fail("client side of connection did not receive a session id"); - failed(); - } - ); -}; Tinytest.addAsync( "livedata server - sessionHandle.onClose()", @@ -112,7 +27,7 @@ Tinytest.addAsync( test, function (connection, session) { // Wait for the connection to be closed from the server side. - poll( + simplePoll( function () { return ! connection.status().connected; }, @@ -186,26 +101,6 @@ Tinytest.addAsync( ); -Tinytest.addAsync( - "livedata server - session data in nested method invocation", - function (test, onComplete) { - establishConnection( - test, - function (connection, session) { - session._sessionData.foo = 123; - innerCalled = function (methodInvocation) { - test.equal(methodInvocation._sessionData.foo, 123); - onComplete(); - }; - connection.call('livedata_server_test_outer'); - connection.disconnect(); - }, - onComplete - ); - } -); - - // sessionId -> callback var onSubscription = {}; diff --git a/packages/test-helpers/async_multi.js b/packages/test-helpers/async_multi.js index ad831def4b..fbbb34db35 100644 --- a/packages/test-helpers/async_multi.js +++ b/packages/test-helpers/async_multi.js @@ -152,21 +152,35 @@ testAsyncMulti = function (name, funcs) { }); }; -pollUntil = function (expect, f, timeout, step, noFail) { - noFail = noFail || false; +// Call `fn` periodically until it returns true. If it does, call +// `success`. If it doesn't before the timeout, call `failed`. +simplePoll = function (fn, success, failed, timeout, step) { + timeout = timeout || 10000; step = step || 100; - var expectation = expect(true); var start = (new Date()).valueOf(); var helper = function () { - if (f()) { - expectation(true); + if (fn()) { + success(); return; } if (start + timeout < (new Date()).valueOf()) { - expectation(noFail); + failed(); return; } Meteor.setTimeout(helper, step); }; helper(); }; + +pollUntil = function (expect, f, timeout, step, noFail) { + noFail = noFail || false; + step = step || 100; + var expectation = expect(true); + simplePoll( + f, + function () { expectation(true) }, + function () { expectation(noFail) }, + timeout, + step + ); +}; diff --git a/packages/test-helpers/connection.js b/packages/test-helpers/connection.js new file mode 100644 index 0000000000..34f6821e83 --- /dev/null +++ b/packages/test-helpers/connection.js @@ -0,0 +1,56 @@ +// Establish a connection from the server to the server, and wait +// until the client side of the connection has received the session +// id. On success call `succeeded` with two arguments, the client +// side `connection` and the server side `session`. Call `failed` on +// failure. +establishConnection = function (test, succeeded, failed) { + // The connection from the client side. + var connection; + + // Track incoming sessions server side until we know which one is + // ours. + var sessions = {}; + + // Add incoming sessions to `sessions`. + var onConnectionHandle = Meteor.onConnection(function (session) { + test.isTrue(_.isString(session.id), "session handle id exists and is a string"); + if (sessions[session.id]) { + test.fail("onConnection callback called multiple times for same session id"); + failed(); + } + else { + sessions[session.id] = session; + } + }); + + // We've succeeded when we get the session id on the client side. + var onClientSessionId = function (sessionId) { + test.isTrue(connection.status().connected); + var session = sessions[sessionId]; + if (! session) { + test.fail("No onConnection received server side for connected client"); + failed(); + } + else { + onConnectionHandle.stop(); + succeeded(connection, session); + } + }; + + // Connect and wait until the connection receives its session id. + // Disable retries so that when the connection is closed we don't + // automatically keep reconnecting on the client side. + connection = DDP.connect(Meteor.absoluteUrl(), {retry: false}); + simplePoll( + function () { + return connection._lastSessionId; + }, + function () { + onClientSessionId(connection._lastSessionId); + }, + function () { + test.fail("client side of connection did not receive a session id"); + failed(); + } + ); +}; diff --git a/packages/test-helpers/package.js b/packages/test-helpers/package.js index 0235cb5fe5..1f02d27fc5 100644 --- a/packages/test-helpers/package.js +++ b/packages/test-helpers/package.js @@ -12,7 +12,8 @@ Package.on_use(function (api) { 'pollUntil', 'WrappedFrag', 'try_all_permutations', 'StubStream', 'SeededRandom', 'ReactiveVar', 'OnscreenDiv', 'clickElement', 'blurElement', 'focusElement', 'simulateEvent', 'getStyleProperty', 'canonicalizeHtml', - 'withCallbackLogger', 'testAsyncMulti'], {testOnly: true}); + 'withCallbackLogger', 'testAsyncMulti', 'simplePoll', + 'establishConnection'], {testOnly: true}); api.add_files('try_all_permutations.js'); api.add_files('async_multi.js'); @@ -25,6 +26,7 @@ Package.on_use(function (api) { api.add_files('current_style.js'); api.add_files('reactivevar.js'); api.add_files('callback_logger.js'); + api.add_files('connection.js', 'server'); }); Package.on_test(function (api) { From 1bd29762062be2e088384a7cea8b073ec219cf13 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Mon, 2 Dec 2013 23:23:08 -0800 Subject: [PATCH 166/190] =?UTF-8?q?Rework=20tests=20that=20didn=E2=80=99t?= =?UTF-8?q?=20work=20when=20multiple=20clients=20run=20at=20once.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/livedata/livedata_server_tests.js | 29 ++++++++-------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index b9c2847739..cbc5519301 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -19,7 +19,7 @@ Tinytest.addAsync( } ); - + Tinytest.addAsync( "livedata server - sessionHandle.close()", function (test, onComplete) { @@ -47,18 +47,13 @@ Tinytest.addAsync( ); -var innerCalled = null; - Meteor.methods({ livedata_server_test_inner: function () { - var self = this; - Meteor.defer(function () { - innerCalled(self); - }); + return this.session.id; }, livedata_server_test_outer: function () { - Meteor.call('livedata_server_test_inner'); + return Meteor.call('livedata_server_test_inner'); } }); @@ -69,12 +64,10 @@ Tinytest.addAsync( establishConnection( test, function (connection, session) { - innerCalled = function (methodInvocation) { - test.equal(methodInvocation.session.id, session.id); - onComplete(); - }; - connection.call('livedata_server_test_inner'); + var res = connection.call('livedata_server_test_inner'); + test.equal(res, session.id); connection.disconnect(); + onComplete(); }, onComplete ); @@ -88,18 +81,16 @@ Tinytest.addAsync( establishConnection( test, function (connection, session) { - innerCalled = function (methodInvocation) { - test.equal(methodInvocation.session.id, session.id); - onComplete(); - }; - connection.call('livedata_server_test_outer'); + var res = connection.call('livedata_server_test_outer'); + test.equal(res, session.id); connection.disconnect(); + onComplete(); }, onComplete ); } ); - + // sessionId -> callback var onSubscription = {}; From d1cb3742f38f81f34034e74f7d6d3c4034c651fd Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Tue, 3 Dec 2013 02:40:17 -0800 Subject: [PATCH 167/190] =?UTF-8?q?Remove=20email=20stubbing=20code=20that?= =?UTF-8?q?=20wasn=E2=80=99t=20safe=20for=20running=20multiple=20times=20o?= =?UTF-8?q?n=20the=20server.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/accounts-password/password_tests.js | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/packages/accounts-password/password_tests.js b/packages/accounts-password/password_tests.js index d4e1c70e61..16f99855fc 100644 --- a/packages/accounts-password/password_tests.js +++ b/packages/accounts-password/password_tests.js @@ -488,18 +488,14 @@ if (Meteor.isServer) (function () { Tinytest.add( 'passwords - createUser hooks', function (test) { - var email = Random.id() + '@example.com'; + var username = Random.id(); test.throws(function () { // should fail the new user validators - Accounts.createUser({email: email, profile: {invalid: true}}); - }); + Accounts.createUser({username: username, profile: {invalid: true}}); + }); - // disable sending emails - var oldEmailSend = Email.send; - Email.send = function() {}; - var userId = Accounts.createUser({email: email, + var userId = Accounts.createUser({username: username, testOnCreateUserHook: true}); - Email.send = oldEmailSend; test.isTrue(userId); var user = Meteor.users.findOne(userId); From 3229b3657845413beaeaf238d0e959b5b5b2444a Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Tue, 3 Dec 2013 03:13:13 -0800 Subject: [PATCH 168/190] Make test more robust to other tests leaving things in a bad state. --- packages/livedata/livedata_test_service.js | 2 +- packages/livedata/livedata_tests.js | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/packages/livedata/livedata_test_service.js b/packages/livedata/livedata_test_service.js index 59efe39972..52d0ff53de 100644 --- a/packages/livedata/livedata_test_service.js +++ b/packages/livedata/livedata_test_service.js @@ -42,7 +42,7 @@ Meteor.methods({ } }, setUserId: function(userId) { - check(userId, String); + check(userId, Match.OneOf(String, null)); this.setUserId(userId); } }); diff --git a/packages/livedata/livedata_tests.js b/packages/livedata/livedata_tests.js index 3bbd58de7a..1fe7ce9702 100644 --- a/packages/livedata/livedata_tests.js +++ b/packages/livedata/livedata_tests.js @@ -365,6 +365,10 @@ if (Meteor.isClient) { messages.length = 0; // clear messages without creating a new object }; + // make sure we're not already logged in. can happen if accounts + // tests fail oddly. + Meteor.apply("setUserId", [null], {wait: true}, expect(function () {})); + Meteor.subscribe("objectsWithUsers", expect(function() { expectMessages(1, 0, ["owned by none"]); Meteor.apply("setUserId", ["1"], {wait: true}, afterFirstSetUserId); @@ -406,6 +410,8 @@ if (Meteor.isClient) { test.isFalse(err); test.equal(result, "100"); })); + // clean up + Meteor.apply("setUserId", [null], {wait: true}, expect(function () {})); } ]); } From 9181de2d1eb8f284dfa5396b407100f90c3be9a2 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Tue, 3 Dec 2013 03:29:29 -0800 Subject: [PATCH 169/190] Move StubStream from test-helpers to livedata. It is only used in livedata tests. --- packages/livedata/package.js | 1 + packages/{test-helpers => livedata}/stub_stream.js | 0 packages/test-helpers/package.js | 3 +-- 3 files changed, 2 insertions(+), 2 deletions(-) rename packages/{test-helpers => livedata}/stub_stream.js (100%) diff --git a/packages/livedata/package.js b/packages/livedata/package.js index e45ea5e07e..6c7778f808 100644 --- a/packages/livedata/package.js +++ b/packages/livedata/package.js @@ -64,6 +64,7 @@ Package.on_test(function (api) { api.use('test-helpers', ['client', 'server']); api.use(['underscore', 'tinytest', 'random', 'deps', 'minimongo']); + api.add_files('stub_stream.js'); api.add_files('livedata_server_tests.js', 'server'); api.add_files('livedata_connection_tests.js', ['client', 'server']); api.add_files('livedata_tests.js', ['client', 'server']); diff --git a/packages/test-helpers/stub_stream.js b/packages/livedata/stub_stream.js similarity index 100% rename from packages/test-helpers/stub_stream.js rename to packages/livedata/stub_stream.js diff --git a/packages/test-helpers/package.js b/packages/test-helpers/package.js index 1f02d27fc5..6b40ead553 100644 --- a/packages/test-helpers/package.js +++ b/packages/test-helpers/package.js @@ -9,7 +9,7 @@ Package.on_use(function (api) { api.use(['spark', 'jquery'], 'client'); api.export([ - 'pollUntil', 'WrappedFrag', 'try_all_permutations', 'StubStream', + 'pollUntil', 'WrappedFrag', 'try_all_permutations', 'SeededRandom', 'ReactiveVar', 'OnscreenDiv', 'clickElement', 'blurElement', 'focusElement', 'simulateEvent', 'getStyleProperty', 'canonicalizeHtml', 'withCallbackLogger', 'testAsyncMulti', 'simplePoll', @@ -20,7 +20,6 @@ Package.on_use(function (api) { api.add_files('event_simulation.js'); api.add_files('seeded_random.js'); api.add_files('canonicalize_html.js'); - api.add_files('stub_stream.js'); api.add_files('onscreendiv.js'); api.add_files('wrappedfrag.js'); api.add_files('current_style.js'); From f328079780ff4c9dd2c77e23f07938d0a66970bc Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Tue, 3 Dec 2013 03:49:34 -0800 Subject: [PATCH 170/190] Rename establishConnection to be more test-y. Also comment on inappropriate placement. --- packages/accounts-base/accounts_tests.js | 2 +- packages/accounts-password/password_tests.js | 2 +- packages/livedata/livedata_server_tests.js | 10 +++++----- packages/test-helpers/connection.js | 2 +- packages/test-helpers/package.js | 9 ++++++++- 5 files changed, 16 insertions(+), 9 deletions(-) diff --git a/packages/accounts-base/accounts_tests.js b/packages/accounts-base/accounts_tests.js index 492dafd873..8f99c40da6 100644 --- a/packages/accounts-base/accounts_tests.js +++ b/packages/accounts-base/accounts_tests.js @@ -213,7 +213,7 @@ Tinytest.addAsync('accounts - expire numeric token', function (test, onComplete) Tinytest.addAsync( 'accounts - session data cleaned up', function (test, onComplete) { - establishConnection( + makeTestConnection( test, function (connection, session) { // onClose callbacks are called in order, so we run after the diff --git a/packages/accounts-password/password_tests.js b/packages/accounts-password/password_tests.js index 16f99855fc..f42880797f 100644 --- a/packages/accounts-password/password_tests.js +++ b/packages/accounts-password/password_tests.js @@ -553,7 +553,7 @@ if (Meteor.isServer) (function () { password: 'password' }); - establishConnection( + makeTestConnection( test, function (connection, session) { session.onClose(function () { diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index cbc5519301..b22f8456ca 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -4,7 +4,7 @@ var Fiber = Npm.require('fibers'); Tinytest.addAsync( "livedata server - sessionHandle.onClose()", function (test, onComplete) { - establishConnection( + makeTestConnection( test, function (connection, session) { // On the server side, wait for the connection to be closed. @@ -23,7 +23,7 @@ Tinytest.addAsync( Tinytest.addAsync( "livedata server - sessionHandle.close()", function (test, onComplete) { - establishConnection( + makeTestConnection( test, function (connection, session) { // Wait for the connection to be closed from the server side. @@ -61,7 +61,7 @@ Meteor.methods({ Tinytest.addAsync( "livedata server - session in method invocation", function (test, onComplete) { - establishConnection( + makeTestConnection( test, function (connection, session) { var res = connection.call('livedata_server_test_inner'); @@ -78,7 +78,7 @@ Tinytest.addAsync( Tinytest.addAsync( "livedata server - session in nested method invocation", function (test, onComplete) { - establishConnection( + makeTestConnection( test, function (connection, session) { var res = connection.call('livedata_server_test_outer'); @@ -106,7 +106,7 @@ Meteor.publish("livedata_server_test_sub", function (sessionId) { Tinytest.addAsync( "livedata server - session in publish function", function (test, onComplete) { - establishConnection( + makeTestConnection( test, function (connection, session) { onSubscription[session.id] = function (subscription) { diff --git a/packages/test-helpers/connection.js b/packages/test-helpers/connection.js index 34f6821e83..ef11bd14e4 100644 --- a/packages/test-helpers/connection.js +++ b/packages/test-helpers/connection.js @@ -3,7 +3,7 @@ // id. On success call `succeeded` with two arguments, the client // side `connection` and the server side `session`. Call `failed` on // failure. -establishConnection = function (test, succeeded, failed) { +makeTestConnection = function (test, succeeded, failed) { // The connection from the client side. var connection; diff --git a/packages/test-helpers/package.js b/packages/test-helpers/package.js index 6b40ead553..d99cf175bd 100644 --- a/packages/test-helpers/package.js +++ b/packages/test-helpers/package.js @@ -8,12 +8,19 @@ Package.on_use(function (api) { 'domutils']); api.use(['spark', 'jquery'], 'client'); + // XXX for connection.js. Not sure this really belongs in + // test-helpers. It probably would be better off in livedata. But it's + // unclear how to put it in livedata so that it can both be used by + // other package tests and not included in the non-test bundle. + api.use('livedata'); + + api.export([ 'pollUntil', 'WrappedFrag', 'try_all_permutations', 'SeededRandom', 'ReactiveVar', 'OnscreenDiv', 'clickElement', 'blurElement', 'focusElement', 'simulateEvent', 'getStyleProperty', 'canonicalizeHtml', 'withCallbackLogger', 'testAsyncMulti', 'simplePoll', - 'establishConnection'], {testOnly: true}); + 'makeTestConnection'], {testOnly: true}); api.add_files('try_all_permutations.js'); api.add_files('async_multi.js'); From 91ad6c0189f39e58f09cda830308162f36ef384f Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Tue, 3 Dec 2013 03:57:28 -0800 Subject: [PATCH 171/190] Remove trailing whitespace. --- docs/client/api.js | 2 +- packages/accounts-password/password_tests.js | 2 +- packages/appcache/appcache-server.js | 4 ++-- packages/livedata/retry.js | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/client/api.js b/docs/client/api.js index 89a4cf0c75..1dd0fb0d48 100644 --- a/docs/client/api.js +++ b/docs/client/api.js @@ -505,7 +505,7 @@ Template.api.onConnection = { descr: "The function to call with the session when a new DDP connection is established."} ] }; - + Template.api.ddp_session = { id: "ddp_session", name: "DDP.Session", diff --git a/packages/accounts-password/password_tests.js b/packages/accounts-password/password_tests.js index f42880797f..e1a7c47445 100644 --- a/packages/accounts-password/password_tests.js +++ b/packages/accounts-password/password_tests.js @@ -552,7 +552,7 @@ if (Meteor.isServer) (function () { username: username, password: 'password' }); - + makeTestConnection( test, function (connection, session) { diff --git a/packages/appcache/appcache-server.js b/packages/appcache/appcache-server.js index d503440381..89f27ea321 100644 --- a/packages/appcache/appcache-server.js +++ b/packages/appcache/appcache-server.js @@ -104,12 +104,12 @@ WebApp.connectHandlers.use(function(req, res, next) { if (Package.autoupdate) { var version = Package.autoupdate.Autoupdate.autoupdateVersion; - if (version !== WebApp.clientHash) + if (version !== WebApp.clientHash) manifest += "# " + version + "\n"; } manifest += "\n"; - + manifest += "CACHE:" + "\n"; manifest += "/" + "\n"; _.each(WebApp.clientProgram.manifest, function (resource) { diff --git a/packages/livedata/retry.js b/packages/livedata/retry.js index ac155c19e0..d5fdda4d66 100644 --- a/packages/livedata/retry.js +++ b/packages/livedata/retry.js @@ -54,7 +54,7 @@ _.extend(Retry.prototype, { // Call `fn` after a delay, based on the `count` of which retry this is. retryLater: function (count, fn) { - var self = this; + var self = this; var timeout = self._timeout(count); if (self.retryTimer) clearTimeout(self.retryTimer); From a3bbb6dfa129b52a909356f2d65f35128a3e6cf5 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 01:47:50 -0800 Subject: [PATCH 172/190] Update and tweak docs for new name. --- docs/client/api.html | 41 +++++++++++++++++++---------------------- docs/client/api.js | 40 +++++++++------------------------------- docs/client/docs.js | 5 ++--- 3 files changed, 30 insertions(+), 56 deletions(-) diff --git a/docs/client/api.html b/docs/client/api.html index 67c2333c91..ff54f3d4b9 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -175,7 +175,7 @@ publish handler, this is the place to stop the observes. {{> api_box subscription_error}} {{> api_box subscription_stop}} -{{> api_box subscription_session}} +{{> api_box subscription_connection}} {{> api_box subscribe}} @@ -278,8 +278,7 @@ invocation object, which provides the following: begin running. * `userId`: the id of the current user. * `setUserId`: a function that associates the current client with a user. -* `session`: on the server, the [session](#ddp_session) this method -call came in on. +* `connection`: on the server, the [connection](#meteor_onconnection) this method call was received on. Calling `methods` on the client defines *stub* functions associated with server methods of the same name. You don't have to define a stub for @@ -332,7 +331,7 @@ invocation from a client won't start until the Nth invocation returns. However, you can change this by calling `this.unblock`. This will allow the N+1th invocation to start running in a new fiber. -{{> api_box method_invocation_session}} +{{> api_box method_invocation_connection}} {{> api_box error}} @@ -479,26 +478,29 @@ The callback is called with a single argument, the server-side [session](#ddp_session) representing the connection from the client. -{{> api_box ddp_session}} +
+{{#dtdd name="id" type="String"}} +A globally unique id for this connection. +{{/dtdd}} + +{{#dtdd name="close" type="Function"}} +Close this DDP connection. The client is free to reconnect, but will +receive a different connection with a new `id` if it does. +{{/dtdd}} + +{{#dtdd name="onClose" type="Function"}} +Register a callback to be called when the connection is closed. +{{/dtdd}} +
{{#note}} Currently when a client reconnects to the server (such as after temporarily losing its Internet connection), it will get a new session each time. -When session reconnects are implemented, clients will be able to -reconnect to the same session. +In the future, when session reconnection is implemented, clients will be +able to reconnect and resume the same session. -The `Meteor.onConnection` callback will be called only once for a -session. A client reconnecting to the same session won't cause the -callback to be called again. - -The client closing the connection won't cause the session to close -right away because the client might reconnect; instead the session -will close after a timeout. Once the session has been closed -(including when the session `close` method is called), a reconnect -from the client attempting to reuse the session will receive a new -session instead. {{/note}} @@ -3111,11 +3113,6 @@ code can read `data.txt` by running: {{> api_box_args options}} {{/if}} -{{#if fields}} -

Fields

-{{> api_box_args fields}} -{{/if}} - {{#if body}} {{#better_markdown}}{{{body}}}{{/better_markdown}} {{/if}} diff --git a/docs/client/api.js b/docs/client/api.js index 1dd0fb0d48..e6292200ec 100644 --- a/docs/client/api.js +++ b/docs/client/api.js @@ -318,11 +318,11 @@ Template.api.subscription_userId = { }; -Template.api.subscription_session = { - id: "publish_session", - name: "this.session", +Template.api.subscription_connection = { + id: "publish_connection", + name: "this.connection", locus: "Server", - descr: ["Access inside the publish function. The [session](#ddp_session) this subscription came in on."] + descr: ["Access inside the publish function. The incoming [connection](#meteor_onconnection) for this subscription."] }; @@ -389,11 +389,11 @@ Template.api.method_invocation_isSimulation = { descr: ["Access inside a method invocation. Boolean value, true if this invocation is a stub."] }; -Template.api.method_invocation_session = { - id: "method_session", - name: "this.session", +Template.api.method_invocation_connection = { + id: "method_connection", + name: "this.connection", locus: "Server", - descr: ["Access inside a method invocation. The [session](#meteor_onconnection) this method call came in on."] + descr: ["Access inside a method invocation. The [connection](#meteor_onconnection) this method was received on."] }; Template.api.error = { @@ -502,32 +502,10 @@ Template.api.onConnection = { args: [ {name: "callback", type: "function", - descr: "The function to call with the session when a new DDP connection is established."} + descr: "The function to call when a new DDP connection is established."} ] }; -Template.api.ddp_session = { - id: "ddp_session", - name: "DDP.Session", - locus: "Server", - descr: ["On the server, this object represents a connection from a client."], - fields: [ - {name: "id", - type: "String", - descr: "The globally unique session id." - }, - {name: "close", - type: "Function", - descr: "Close this session and the associated DDP connection. The client is free to reconnect, but will receive a different session if it does." - }, - {name: "onClose", - type: "Function", - descr: "Register a callback to be called when the session is closed." - } - ] -}; - - // onAutopublish Template.api.meteor_collection = { diff --git a/docs/client/docs.js b/docs/client/docs.js index 6fafc02dbd..4ecb004ab5 100644 --- a/docs/client/docs.js +++ b/docs/client/docs.js @@ -123,7 +123,7 @@ var toc = [ {instance: "this", name: "onStop", id: "publish_onstop"}, {instance: "this", name: "error", id: "publish_error"}, {instance: "this", name: "stop", id: "publish_stop"}, - {instance: "this", name: "session", id: "publish_session"} + {instance: "this", name: "connection", id: "publish_connection"} ], "Meteor.subscribe" ], @@ -134,7 +134,7 @@ var toc = [ {instance: "this", name: "setUserId", id: "method_setUserId"}, {instance: "this", name: "isSimulation", id: "method_issimulation"}, {instance: "this", name: "unblock", id: "method_unblock"}, - {instance: "this", name: "session", id: "method_session"} + {instance: "this", name: "connection", id: "method_connection"} ], "Meteor.Error", "Meteor.call", @@ -146,7 +146,6 @@ var toc = [ "Meteor.reconnect", "Meteor.disconnect", "Meteor.onConnection", - "DDP.Session", "DDP.connect" ], From 76c78645ffef31ca8730c889b410ecb8dc9250af Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 02:10:47 -0800 Subject: [PATCH 173/190] Rename `this.session` to `this.connection`. --- packages/accounts-base/accounts_server.js | 82 +++++++++---------- packages/accounts-base/accounts_tests.js | 12 +-- packages/accounts-password/password_server.js | 22 ++--- packages/accounts-password/password_tests.js | 16 ++-- packages/livedata/livedata_common.js | 4 +- packages/livedata/livedata_server.js | 20 ++--- packages/livedata/livedata_server_tests.js | 58 ++++++------- packages/test-helpers/connection.js | 32 ++++---- 8 files changed, 124 insertions(+), 122 deletions(-) diff --git a/packages/accounts-base/accounts_server.js b/packages/accounts-base/accounts_server.js index af8d0708dc..de84a23160 100644 --- a/packages/accounts-base/accounts_server.js +++ b/packages/accounts-base/accounts_server.js @@ -78,14 +78,14 @@ Meteor.methods({ var result = tryAllLoginHandlers(options); if (result !== null) { this.setUserId(result.id); - Accounts._setLoginToken(this.session.id, result.token); + Accounts._setLoginToken(this.connection.id, result.token); } return result; }, logout: function() { - var token = Accounts._getLoginToken(this.session.id); - Accounts._setLoginToken(this.session.id, null); + var token = Accounts._getLoginToken(this.connection.id); + Accounts._setLoginToken(this.connection.id, null); if (token && this.userId) removeLoginToken(this.userId, token); this.setUserId(null); @@ -143,27 +143,27 @@ Meteor.methods({ /// ACCOUNT DATA /// -// sessionId -> {session, loginToken, srpChallenge} +// connectionId -> {connection, loginToken, srpChallenge} var accountData = {}; -Accounts._getAccountData = function (sessionId, field) { - var data = accountData[sessionId]; +Accounts._getAccountData = function (connectionId, field) { + var data = accountData[connectionId]; return data && data[field]; }; -Accounts._setAccountData = function (sessionId, field, value) { - var data = accountData[sessionId]; +Accounts._setAccountData = function (connectionId, field, value) { + var data = accountData[connectionId]; if (data === undefined) delete data[field]; else data[field] = value; }; -Meteor.server.onConnection(function (session) { - accountData[session.id] = {session: session}; - session.onClose(function () { - removeSessionFromToken(session.id); - delete accountData[session.id]; +Meteor.server.onConnection(function (connection) { + accountData[connection.id] = {connection: connection}; + connection.onClose(function () { + removeConnectionFromToken(connection.id); + delete accountData[connection.id]; }); }); @@ -173,52 +173,52 @@ Meteor.server.onConnection(function (session) { /// /// support reconnecting using a meteor login token -// token -> list of session ids -var sessionsByLoginToken = {}; +// token -> list of connection ids +var connectionsByLoginToken = {}; // test hook -Accounts._getTokenSessions = function (token) { - return sessionsByLoginToken[token]; +Accounts._getTokenConnections = function (token) { + return connectionsByLoginToken[token]; }; -// Remove the session from the list of open sessions for the token. -var removeSessionFromToken = function (sessionId) { - var token = Accounts._getLoginToken(sessionId); +// Remove the connection from the list of open connections for the token. +var removeConnectionFromToken = function (connectionId) { + var token = Accounts._getLoginToken(connectionId); if (token) { - sessionsByLoginToken[token] = _.without( - sessionsByLoginToken[token], - sessionId + connectionsByLoginToken[token] = _.without( + connectionsByLoginToken[token], + connectionId ); - if (_.isEmpty(sessionsByLoginToken[token])) - delete sessionsByLoginToken[token]; + if (_.isEmpty(connectionsByLoginToken[token])) + delete connectionsByLoginToken[token]; } }; -Accounts._getLoginToken = function (sessionId) { - return Accounts._getAccountData(sessionId, 'loginToken'); +Accounts._getLoginToken = function (connectionId) { + return Accounts._getAccountData(connectionId, 'loginToken'); }; -Accounts._setLoginToken = function (sessionId, newToken) { - removeSessionFromToken(sessionId); +Accounts._setLoginToken = function (connectionId, newToken) { + removeConnectionFromToken(connectionId); - Accounts._setAccountData(sessionId, 'loginToken', newToken); + Accounts._setAccountData(connectionId, 'loginToken', newToken); if (newToken) { - if (! _.has(sessionsByLoginToken, newToken)) - sessionsByLoginToken[newToken] = []; - sessionsByLoginToken[newToken].push(sessionId); + if (! _.has(connectionsByLoginToken, newToken)) + connectionsByLoginToken[newToken] = []; + connectionsByLoginToken[newToken].push(connectionId); } }; -// Close all open sessions associated with any of the tokens in +// Close all open connections associated with any of the tokens in // `tokens`. -var closeSessionsForTokens = function (tokens) { +var closeConnectionsForTokens = function (tokens) { _.each(tokens, function (token) { - if (_.has(sessionsByLoginToken, token)) { - _.each(sessionsByLoginToken[token], function (sessionId) { - var session = Accounts._getAccountData(sessionId, 'session'); - if (session) - session.close(); + if (_.has(connectionsByLoginToken, token)) { + _.each(connectionsByLoginToken[token], function (connectionId) { + var connection = Accounts._getAccountData(connectionId, 'connection'); + if (connection) + connection.close(); }); } }); @@ -727,7 +727,7 @@ Meteor.startup(function () { /// var closeTokensForUser = function (userTokens) { - closeSessionsForTokens(_.pluck(userTokens, "token")); + closeConnectionsForTokens(_.pluck(userTokens, "token")); }; // Like _.difference, but uses EJSON.equals to compute which values to return. diff --git a/packages/accounts-base/accounts_tests.js b/packages/accounts-base/accounts_tests.js index 8f99c40da6..dcd9cd28ec 100644 --- a/packages/accounts-base/accounts_tests.js +++ b/packages/accounts-base/accounts_tests.js @@ -211,20 +211,20 @@ Tinytest.addAsync('accounts - expire numeric token', function (test, onComplete) Tinytest.addAsync( - 'accounts - session data cleaned up', + 'accounts - connection data cleaned up', function (test, onComplete) { makeTestConnection( test, - function (connection, session) { + function (clientConn, serverConn) { // onClose callbacks are called in order, so we run after the // close callback in accounts. - session.onClose(function () { - test.isFalse(Accounts._getAccountData(session.id, 'session')); + serverConn.onClose(function () { + test.isFalse(Accounts._getAccountData(serverConn.id, 'connection')); onComplete(); }); - test.isTrue(Accounts._getAccountData(session.id, 'session')); - session.close(); + test.isTrue(Accounts._getAccountData(serverConn.id, 'connection')); + serverConn.close(); }, onComplete ); diff --git a/packages/accounts-password/password_server.js b/packages/accounts-password/password_server.js index 9e8e9070e7..f65ae8798f 100644 --- a/packages/accounts-password/password_server.js +++ b/packages/accounts-password/password_server.js @@ -66,7 +66,7 @@ Meteor.methods({beginPasswordExchange: function (request) { var challenge = srp.issueChallenge({A: request.A}); // Save results so we can verify them later. - Accounts._setAccountData(this.session.id, 'srpChallenge', + Accounts._setAccountData(this.connection.id, 'srpChallenge', { userId: user._id, M: srp.M, HAMK: srp.HAMK } ); return challenge; @@ -82,11 +82,11 @@ Accounts.registerLoginHandler(function (options) { // we're always called from within a 'login' method, so this should // be safe. var currentInvocation = DDP._CurrentInvocation.get(); - var serialized = Accounts._getAccountData(currentInvocation.session.id, 'srpChallenge'); + var serialized = Accounts._getAccountData(currentInvocation.connection.id, 'srpChallenge'); if (!serialized || serialized.M !== options.srp.M) throw new Meteor.Error(403, "Incorrect password"); // Only can use challenges once. - Accounts._setAccountData(currentInvocation.session.id, 'srpChallenge', undefined); + Accounts._setAccountData(currentInvocation.connection.id, 'srpChallenge', undefined); var userId = serialized.userId; var user = Meteor.users.findOne(userId); @@ -167,14 +167,14 @@ Meteor.methods({changePassword: function (options) { password: Match.Optional(String) }); - var serialized = Accounts._getAccountData(this.session.id, 'srpChallenge'); + var serialized = Accounts._getAccountData(this.connection.id, 'srpChallenge'); if (!serialized || serialized.M !== options.M) throw new Meteor.Error(403, "Incorrect password"); if (serialized.userId !== this.userId) // No monkey business! throw new Meteor.Error(403, "Incorrect password"); // Only can use challenges once. - Accounts._setAccountData(this.session.id, 'srpChallenge', undefined); + Accounts._setAccountData(this.connection.id, 'srpChallenge', undefined); var verifier = options.srp; if (!verifier && options.password) { @@ -319,8 +319,8 @@ Meteor.methods({resetPassword: function (token, newVerifier) { // logged in as. Make sure to avoid logging ourselves out if this // happens. But also make sure not to leave the connection in a state // of having a bad token set if things fail. - var oldToken = Accounts._getLoginToken(this.session.id); - Accounts._setLoginToken(this.session.id, null); + var oldToken = Accounts._getLoginToken(this.connection.id); + Accounts._setLoginToken(this.connection.id, null); try { // Update the user record by: @@ -337,11 +337,11 @@ Meteor.methods({resetPassword: function (token, newVerifier) { }); } catch (err) { // update failed somehow. reset to old token. - Accounts._setLoginToken(this.session.id, oldToken); + Accounts._setLoginToken(this.connection.id, oldToken); throw err; } - Accounts._setLoginToken(this.session.id, stampedLoginToken.token); + Accounts._setLoginToken(this.connection.id, stampedLoginToken.token); this.setUserId(user._id); return { @@ -435,7 +435,7 @@ Meteor.methods({verifyEmail: function (token) { $push: {'services.resume.loginTokens': stampedLoginToken}}); this.setUserId(user._id); - Accounts._setLoginToken(this.session.id, stampedLoginToken.token); + Accounts._setLoginToken(this.connection.id, stampedLoginToken.token); return { token: stampedLoginToken.token, tokenExpires: Accounts._tokenExpiration(stampedLoginToken.when), @@ -514,7 +514,7 @@ Meteor.methods({createUser: function (options) { // client gets logged in as the new user afterwards. this.setUserId(result.id); - Accounts._setLoginToken(this.session.id, result.token); + Accounts._setLoginToken(this.connection.id, result.token); return result; }}); diff --git a/packages/accounts-password/password_tests.js b/packages/accounts-password/password_tests.js index e1a7c47445..1da7dff9a4 100644 --- a/packages/accounts-password/password_tests.js +++ b/packages/accounts-password/password_tests.js @@ -555,21 +555,23 @@ if (Meteor.isServer) (function () { makeTestConnection( test, - function (connection, session) { - session.onClose(function () { - test.isFalse(_.contains(Accounts._getTokenSessions(token), session.id)); + function (clientConn, serverConn) { + serverConn.onClose(function () { + test.isFalse(_.contains( + Accounts._getTokenConnections(token), serverConn.id)); onComplete(); }); - var result = connection.call('login', { + var result = clientConn.call('login', { user: {username: username}, password: 'password' }); test.isTrue(result); - var token = Accounts._getAccountData(session.id, 'loginToken'); + var token = Accounts._getAccountData(serverConn.id, 'loginToken'); test.isTrue(token); test.equal(result.token, token); - test.isTrue(_.contains(Accounts._getTokenSessions(token), session.id)); - connection.disconnect(); + test.isTrue(_.contains( + Accounts._getTokenConnections(token), serverConn.id)); + clientConn.disconnect(); }, onComplete ); diff --git a/packages/livedata/livedata_common.js b/packages/livedata/livedata_common.js index cd63594040..9c3d31bc78 100644 --- a/packages/livedata/livedata_common.js +++ b/packages/livedata/livedata_common.js @@ -29,8 +29,8 @@ MethodInvocation = function (options) { // reruns subscriptions this._setUserId = options.setUserId || function () {}; - // On the server, the session this method call came in on. - this.session = options.session; + // On the server, the connection this method call came in on. + this.connection = options.connection; }; _.extend(MethodInvocation.prototype, { diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index 3e2c604dec..07c475b814 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -247,19 +247,19 @@ var Session = function (server, version, socket) { // we want to buffer up for when we are done rerunning subscriptions self._pendingReady = []; - // List of callbacks to call when this session is closed. + // List of callbacks to call when this connection is closed. self._closeCallbacks = []; - // The `SessionHandle` for this session, passed to + // The `ConnectionHandle` for this session, passed to // `Meteor.server.onConnection` callbacks. - self.sessionHandle = { + self.connectionHandle = { id: self.id, close: function () { self.server._closeSession(self); }, onClose: function (fn) { self._closeCallbacks.push( - Meteor.bindEnvironment(fn, "connection session onClose callback") + Meteor.bindEnvironment(fn, "connection onClose callback") ); } }; @@ -554,7 +554,7 @@ _.extend(Session.prototype, { userId: self.userId, setUserId: setUserId, unblock: unblock, - session: self.sessionHandle + connection: self.connectionHandle }); try { var result = DDPServer._CurrentWriteFence.withValue(fence, function () { @@ -728,7 +728,7 @@ var Subscription = function ( session, handler, subscriptionId, params, name) { var self = this; self._session = session; // type is Session - self.session = session.sessionHandle; // public API object + self.connection = session.connectionHandle; // public API object self._handler = handler; @@ -1087,7 +1087,7 @@ _.extend(Server.prototype, { self.sessions[socket._meteorSession.id] = socket._meteorSession; _.each(self.connectionCallbacks, function (callback) { if (socket._meteorSession) - callback(socket._meteorSession.sessionHandle); + callback(socket._meteorSession.connectionHandle); }); } else if (!msg.version) { // connect message without a version. This means an old (pre-pre1) @@ -1244,21 +1244,21 @@ _.extend(Server.prototype, { var setUserId = function() { throw new Error("Can't call setUserId on a server initiated method call"); }; - var session = null; + var connection = null; var currentInvocation = DDP._CurrentInvocation.get(); if (currentInvocation) { userId = currentInvocation.userId; setUserId = function(userId) { currentInvocation.setUserId(userId); }; - session = currentInvocation.session; + connection = currentInvocation.connection; } var invocation = new MethodInvocation({ isSimulation: false, userId: userId, setUserId: setUserId, - session: session + connection: connection }); try { var result = DDP._CurrentInvocation.withValue(invocation, function () { diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index b22f8456ca..5c8eb4023f 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -2,17 +2,17 @@ var Fiber = Npm.require('fibers'); Tinytest.addAsync( - "livedata server - sessionHandle.onClose()", + "livedata server - connectionHandle.onClose()", function (test, onComplete) { makeTestConnection( test, - function (connection, session) { + function (clientConn, serverConn) { // On the server side, wait for the connection to be closed. - session.onClose(function () { + serverConn.onClose(function () { onComplete(); }); // Close the connection from the client. - connection.disconnect(); + clientConn.disconnect(); }, onComplete ); @@ -21,15 +21,15 @@ Tinytest.addAsync( Tinytest.addAsync( - "livedata server - sessionHandle.close()", + "livedata server - connectionHandle.close()", function (test, onComplete) { makeTestConnection( test, - function (connection, session) { + function (clientConn, serverConn) { // Wait for the connection to be closed from the server side. simplePoll( function () { - return ! connection.status().connected; + return ! clientConn.status().connected; }, onComplete, function () { @@ -39,7 +39,7 @@ Tinytest.addAsync( ); // Close the connection from the server. - session.close(); + serverConn.close(); }, onComplete ); @@ -49,7 +49,7 @@ Tinytest.addAsync( Meteor.methods({ livedata_server_test_inner: function () { - return this.session.id; + return this.connection.id; }, livedata_server_test_outer: function () { @@ -59,14 +59,14 @@ Meteor.methods({ Tinytest.addAsync( - "livedata server - session in method invocation", + "livedata server - connection in method invocation", function (test, onComplete) { makeTestConnection( test, - function (connection, session) { - var res = connection.call('livedata_server_test_inner'); - test.equal(res, session.id); - connection.disconnect(); + function (clientConn, serverConn) { + var res = clientConn.call('livedata_server_test_inner'); + test.equal(res, serverConn.id); + clientConn.disconnect(); onComplete(); }, onComplete @@ -76,14 +76,14 @@ Tinytest.addAsync( Tinytest.addAsync( - "livedata server - session in nested method invocation", + "livedata server - connection in nested method invocation", function (test, onComplete) { makeTestConnection( test, - function (connection, session) { - var res = connection.call('livedata_server_test_outer'); - test.equal(res, session.id); - connection.disconnect(); + function (clientConn, serverConn) { + var res = clientConn.call('livedata_server_test_outer'); + test.equal(res, serverConn.id); + clientConn.disconnect(); onComplete(); }, onComplete @@ -92,11 +92,11 @@ Tinytest.addAsync( ); -// sessionId -> callback +// connectionId -> callback var onSubscription = {}; -Meteor.publish("livedata_server_test_sub", function (sessionId) { - var callback = onSubscription[sessionId]; +Meteor.publish("livedata_server_test_sub", function (connectionId) { + var callback = onSubscription[connectionId]; if (callback) callback(this); this.stop(); @@ -104,18 +104,18 @@ Meteor.publish("livedata_server_test_sub", function (sessionId) { Tinytest.addAsync( - "livedata server - session in publish function", + "livedata server - connection in publish function", function (test, onComplete) { makeTestConnection( test, - function (connection, session) { - onSubscription[session.id] = function (subscription) { - delete onSubscription[session.id]; - test.equal(subscription.session.id, session.id); - connection.disconnect(); + function (clientConn, serverConn) { + onSubscription[serverConn.id] = function (subscription) { + delete onSubscription[serverConn.id]; + test.equal(subscription.connection.id, serverConn.id); + clientConn.disconnect(); onComplete(); }; - connection.subscribe("livedata_server_test_sub", session.id); + clientConn.subscribe("livedata_server_test_sub", serverConn.id); } ); } diff --git a/packages/test-helpers/connection.js b/packages/test-helpers/connection.js index ef11bd14e4..4da2e73f72 100644 --- a/packages/test-helpers/connection.js +++ b/packages/test-helpers/connection.js @@ -1,52 +1,52 @@ // Establish a connection from the server to the server, and wait // until the client side of the connection has received the session // id. On success call `succeeded` with two arguments, the client -// side `connection` and the server side `session`. Call `failed` on +// side connection and the server side connection handle. Call `failed` on // failure. makeTestConnection = function (test, succeeded, failed) { // The connection from the client side. - var connection; + var clientConn; - // Track incoming sessions server side until we know which one is + // Track incoming connections server side until we know which one is // ours. - var sessions = {}; + var serverConns = {}; - // Add incoming sessions to `sessions`. - var onConnectionHandle = Meteor.onConnection(function (session) { - test.isTrue(_.isString(session.id), "session handle id exists and is a string"); - if (sessions[session.id]) { + // Add incoming connections to `serverConns`. + var onConnectionHandle = Meteor.onConnection(function (serverConn) { + test.isTrue(_.isString(serverConn.id), "connection handle id exists and is a string"); + if (serverConns[serverConn.id]) { test.fail("onConnection callback called multiple times for same session id"); failed(); } else { - sessions[session.id] = session; + serverConns[serverConn.id] = serverConn; } }); // We've succeeded when we get the session id on the client side. var onClientSessionId = function (sessionId) { - test.isTrue(connection.status().connected); - var session = sessions[sessionId]; - if (! session) { + test.isTrue(clientConn.status().connected); + var serverConn = serverConns[sessionId]; + if (! serverConn) { test.fail("No onConnection received server side for connected client"); failed(); } else { onConnectionHandle.stop(); - succeeded(connection, session); + succeeded(clientConn, serverConn); } }; // Connect and wait until the connection receives its session id. // Disable retries so that when the connection is closed we don't // automatically keep reconnecting on the client side. - connection = DDP.connect(Meteor.absoluteUrl(), {retry: false}); + clientConn = DDP.connect(Meteor.absoluteUrl(), {retry: false}); simplePoll( function () { - return connection._lastSessionId; + return clientConn._lastSessionId; }, function () { - onClientSessionId(connection._lastSessionId); + onClientSessionId(clientConn._lastSessionId); }, function () { test.fail("client side of connection did not receive a session id"); From bd978adcbd95e72cc51bbaa7ddcb445134e7b379 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 02:25:53 -0800 Subject: [PATCH 174/190] History note for session hooks. --- History.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/History.md b/History.md index be3919064f..6df859ccae 100644 --- a/History.md +++ b/History.md @@ -6,6 +6,11 @@ client code changes; server only code changes will not cause the page to reload. +* Add `Meteor.onConnection` and add `this.connection` to method + invocations and publish functions. These can be used to store data + associated with individual clients between subscriptions and method + calls. See http://docs.meteor.com/#meteor_onconnection for details. + * Bundler failures cause non-zero exit code in `meteor run`. #1515 * Fix `meteor run` with settings files containing non-ASCII characters. #1497 From e253ecd46a8185d7bb2e92eed20da7983ce55b95 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Wed, 4 Dec 2013 14:56:37 -0500 Subject: [PATCH 175/190] Another session -> connection renaming. --- docs/client/api.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/client/api.html b/docs/client/api.html index ff54f3d4b9..34777678f7 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -475,8 +475,8 @@ updates are not required. be called on new connections. The callback is called with a single argument, the server-side -[session](#ddp_session) representing the connection from the client. - +`connection` representing the connection from the client. This object +contains the following fields:
{{#dtdd name="id" type="String"}} From 79f845f7352eaacc63d580171d014386db1739d4 Mon Sep 17 00:00:00 2001 From: Andrew Wilcox Date: Wed, 4 Dec 2013 15:09:07 -0500 Subject: [PATCH 176/190] Clarify note on client reconnections. --- docs/client/api.html | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/docs/client/api.html b/docs/client/api.html index 34777678f7..131135c82e 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -495,12 +495,15 @@ Register a callback to be called when the connection is closed. {{#note}} Currently when a client reconnects to the server (such as after -temporarily losing its Internet connection), it will get a new session -each time. - -In the future, when session reconnection is implemented, clients will be -able to reconnect and resume the same session. +temporarily losing its Internet connection), it will get a new +connection each time. The `onConnection` callbacks will be called +again, and the new connection will have a new connection `id`. +In the future, when client reconnection is fully implemented, +reconnecting from the client will reconnect to the same connection on +the server: the `onConnection` callback won't be called for that +connection again, and the connection will still have the same +connection `id`. {{/note}} From 496936d21adf74332cc0ba3c16fc544f86e55167 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 20:43:58 -0800 Subject: [PATCH 177/190] review part 1: docs, style, etc. --- docs/client/api.js | 4 ++-- packages/livedata/livedata_connection.js | 3 ++- packages/test-helpers/package.js | 4 +++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/client/api.js b/docs/client/api.js index e6292200ec..5dd26f8685 100644 --- a/docs/client/api.js +++ b/docs/client/api.js @@ -393,7 +393,7 @@ Template.api.method_invocation_connection = { id: "method_connection", name: "this.connection", locus: "Server", - descr: ["Access inside a method invocation. The [connection](#meteor_onconnection) this method was received on."] + descr: ["Access inside a method invocation. The [connection](#meteor_onconnection) this method was received on. `null` if the method is not associated with a connection, eg. a server initiated method call."] }; Template.api.error = { @@ -497,7 +497,7 @@ Template.api.connect = { Template.api.onConnection = { id: "meteor_onconnection", name: "Meteor.onConnection(callback)", - locus: "server", + locus: "Server", descr: ["Register a callback to be called when a new DDP connection is made to the server."], args: [ {name: "callback", diff --git a/packages/livedata/livedata_connection.js b/packages/livedata/livedata_connection.js index 7622b9361a..40f21bae09 100644 --- a/packages/livedata/livedata_connection.js +++ b/packages/livedata/livedata_connection.js @@ -628,7 +628,8 @@ _.extend(Connection.prototype, { }; var invocation = new MethodInvocation({ isSimulation: true, - userId: self.userId(), setUserId: setUserId + userId: self.userId(), + setUserId: setUserId }); if (!alreadyInSimulation) diff --git a/packages/test-helpers/package.js b/packages/test-helpers/package.js index d99cf175bd..73076cab80 100644 --- a/packages/test-helpers/package.js +++ b/packages/test-helpers/package.js @@ -11,7 +11,9 @@ Package.on_use(function (api) { // XXX for connection.js. Not sure this really belongs in // test-helpers. It probably would be better off in livedata. But it's // unclear how to put it in livedata so that it can both be used by - // other package tests and not included in the non-test bundle. + // other package tests and not included in the non-test bundle. One + // idea would be to make a new separate package 'ddp-test-helpers' or + // the like. api.use('livedata'); From 0a0059d0818c7b48531abab1688555870f737f6e Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 20:44:56 -0800 Subject: [PATCH 178/190] typo. --- packages/accounts-base/accounts_server.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/accounts-base/accounts_server.js b/packages/accounts-base/accounts_server.js index de84a23160..b26d988fe9 100644 --- a/packages/accounts-base/accounts_server.js +++ b/packages/accounts-base/accounts_server.js @@ -153,7 +153,7 @@ Accounts._getAccountData = function (connectionId, field) { Accounts._setAccountData = function (connectionId, field, value) { var data = accountData[connectionId]; - if (data === undefined) + if (value === undefined) delete data[field]; else data[field] = value; From d4761610da08e7f93d33296fff6f55353c8bd7d0 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 20:54:11 -0800 Subject: [PATCH 179/190] need to pass through variable that is used. --- packages/livedata/stream_client_nodejs.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/livedata/stream_client_nodejs.js b/packages/livedata/stream_client_nodejs.js index d5d6405c97..7a1aceca44 100644 --- a/packages/livedata/stream_client_nodejs.js +++ b/packages/livedata/stream_client_nodejs.js @@ -96,7 +96,7 @@ _.extend(LivedataTest.ClientStream.prototype, { } var onError = Meteor.bindEnvironment( - function (_this) { + function (_this, error) { if (self.currentConnection !== _this) return; @@ -110,7 +110,7 @@ _.extend(LivedataTest.ClientStream.prototype, { connection.on('error', function (error) { // We have to pass in `this` explicitly because bindEnvironment // doesn't propagate it for us. - onError(this); + onError(this, error); }); var onClose = Meteor.bindEnvironment( From 8ca859b786061453c44c11ebaad52fe4f90cdebc Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 20:54:36 -0800 Subject: [PATCH 180/190] safety belts. --- packages/accounts-base/accounts_server.js | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/packages/accounts-base/accounts_server.js b/packages/accounts-base/accounts_server.js index b26d988fe9..1dde7a7788 100644 --- a/packages/accounts-base/accounts_server.js +++ b/packages/accounts-base/accounts_server.js @@ -153,6 +153,12 @@ Accounts._getAccountData = function (connectionId, field) { Accounts._setAccountData = function (connectionId, field, value) { var data = accountData[connectionId]; + + // safety belt. shouldn't happen. accountData is set in onConnection, + // we don't have a connectionId until it is set. + if (!data) + return; + if (value === undefined) delete data[field]; else @@ -215,10 +221,13 @@ Accounts._setLoginToken = function (connectionId, newToken) { var closeConnectionsForTokens = function (tokens) { _.each(tokens, function (token) { if (_.has(connectionsByLoginToken, token)) { - _.each(connectionsByLoginToken[token], function (connectionId) { - var connection = Accounts._getAccountData(connectionId, 'connection'); - if (connection) - connection.close(); + // safety belt. close should defer potentially yielding callbacks. + Meteor._noYieldsAllowed(function () { + _.each(connectionsByLoginToken[token], function (connectionId) { + var connection = Accounts._getAccountData(connectionId, 'connection'); + if (connection) + connection.close(); + }); }); } }); From 8f05510b59028ecedcb16f0f5a9ac96b72486e34 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 21:10:09 -0800 Subject: [PATCH 181/190] Make onClose fire immediately instead of never on already closed connection. --- docs/client/api.html | 3 ++- packages/livedata/livedata_server.js | 13 ++++++++++--- packages/livedata/livedata_server_tests.js | 7 ++++++- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/docs/client/api.html b/docs/client/api.html index 131135c82e..f171cfba81 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -489,7 +489,8 @@ receive a different connection with a new `id` if it does. {{/dtdd}} {{#dtdd name="onClose" type="Function"}} -Register a callback to be called when the connection is closed. +Register a callback to be called when the connection is closed. If the +connection is already closed, the callback will be called immediately. {{/dtdd}}
diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index 07c475b814..b740b3fc8c 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -222,7 +222,10 @@ var Session = function (server, version, socket) { self.initialized = false; self.socket = socket; + // set to null when the session is destroyed. multiple places below + // use this to determine if the session is alive or not. self.inQueue = []; + self.blocked = false; self.workerRunning = false; @@ -258,9 +261,13 @@ var Session = function (server, version, socket) { self.server._closeSession(self); }, onClose: function (fn) { - self._closeCallbacks.push( - Meteor.bindEnvironment(fn, "connection onClose callback") - ); + var cb = Meteor.bindEnvironment(fn, "connection onClose callback"); + if (self.inQueue) { + self._closeCallbacks.push(cb); + } else { + // if we're already closed, call the callback. + Meteor.defer(cb); + } } }; diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index 5c8eb4023f..8ee882414e 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -9,7 +9,12 @@ Tinytest.addAsync( function (clientConn, serverConn) { // On the server side, wait for the connection to be closed. serverConn.onClose(function () { - onComplete(); + test.isTrue(true); + // Add a new onClose after the connection is already + // closed. See that it fires. + serverConn.onClose(function () { + onComplete(); + }); }); // Close the connection from the client. clientConn.disconnect(); From 6a2c952cd56df2df85c843e8ee92b062589b3625 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 4 Dec 2013 21:52:45 -0800 Subject: [PATCH 182/190] Use an object instead of an array to store connection callbacks. This way we can ensure a callback is never called after its stop handle is called. --- packages/livedata/livedata_server.js | 20 +++++++++----- packages/livedata/livedata_server_tests.js | 32 ++++++++++++++++++++++ 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/packages/livedata/livedata_server.js b/packages/livedata/livedata_server.js index b740b3fc8c..036e426a26 100644 --- a/packages/livedata/livedata_server.js +++ b/packages/livedata/livedata_server.js @@ -995,9 +995,12 @@ _.extend(Subscription.prototype, { Server = function () { var self = this; - // List of callbacks to call when a new connection comes in to the - // server and completes DDP version negotiation. - self.connectionCallbacks = []; + // Map of callbacks to call when a new connection comes in to the + // server and completes DDP version negotiation. Use an object instead + // of an array so we can safely remove one from the list while + // iterating over it. + self.connectionCallbacks = {}; + self.nextConnectionCallbackId = 0; self.publish_handlers = {}; self.universal_publish_handlers = []; @@ -1073,11 +1076,12 @@ _.extend(Server.prototype, { fn = Meteor.bindEnvironment(fn, "onConnection callback"); - self.connectionCallbacks.push(fn); + var id = self.nextConnectionCallbackId++; + self.connectionCallbacks[id] = fn; return { stop: function () { - self.connectionCallbacks = _.without(self.connectionCallbacks, fn); + delete self.connectionCallbacks[id]; } }; }, @@ -1092,9 +1096,11 @@ _.extend(Server.prototype, { // Creating a new session socket._meteorSession = new Session(self, version, socket); self.sessions[socket._meteorSession.id] = socket._meteorSession; - _.each(self.connectionCallbacks, function (callback) { - if (socket._meteorSession) + _.each(_.keys(self.connectionCallbacks), function (id) { + if (_.has(self.connectionCallbacks, id) && socket._meteorSession) { + var callback = self.connectionCallbacks[id]; callback(socket._meteorSession.connectionHandle); + } }); } else if (!msg.version) { // connect message without a version. This means an old (pre-pre1) diff --git a/packages/livedata/livedata_server_tests.js b/packages/livedata/livedata_server_tests.js index 8ee882414e..4fe5dec901 100644 --- a/packages/livedata/livedata_server_tests.js +++ b/packages/livedata/livedata_server_tests.js @@ -52,6 +52,38 @@ Tinytest.addAsync( ); +testAsyncMulti( + "livedata server - onConnection doesn't get callback after stop.", + [function (test, expect) { + var afterStop = false; + var expectStop1 = expect(); + var stopHandle1 = Meteor.onConnection(function (conn) { + stopHandle2.stop(); + stopHandle1.stop(); + afterStop = true; + // yield to the event loop for a moment to see that no other calls + // to listener2 are called. + Meteor.setTimeout(expectStop1, 10); + }); + var stopHandle2 = Meteor.onConnection(function (conn) { + test.isFalse(afterStop); + }); + + // trigger a connection + var expectConnection = expect(); + makeTestConnection( + test, + function (clientConn, serverConn) { + // Close the connection from the client. + clientConn.disconnect(); + expectConnection(); + }, + expectConnection + ); + }] +); + + Meteor.methods({ livedata_server_test_inner: function () { return this.connection.id; From 6eccf8cbbb074bd750851a3f75595011aa7533ed Mon Sep 17 00:00:00 2001 From: Emily Stark Date: Thu, 5 Dec 2013 17:51:26 -0800 Subject: [PATCH 183/190] Add an optional prefix for bundled js and css files. Set it with `WebAppInternals.setBundledJsCssPrefix(prefix)`. --- packages/webapp/webapp_server.js | 10 ++++++++++ tools/bundler.js | 4 ++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/packages/webapp/webapp_server.js b/packages/webapp/webapp_server.js index 51a07ab9aa..66ae581ca4 100644 --- a/packages/webapp/webapp_server.js +++ b/packages/webapp/webapp_server.js @@ -18,6 +18,7 @@ var LONG_SOCKET_TIMEOUT = 120*1000; WebApp = {}; WebAppInternals = {}; +var bundledJsCssPrefix; var makeAppNamePathPrefix = function (appName) { return encodeURIComponent(appName).replace(/\./g, '_'); @@ -528,6 +529,11 @@ var runWebAppServer = function () { /##ROOT_URL_PATH_PREFIX##/g, __meteor_runtime_config__.ROOT_URL_PATH_PREFIX || ""); + boilerplateHtml = boilerplateHtml.replace( + /##BUNDLED_JS_CSS_PREFIX##/g, + bundledJsCssPrefix || + __meteor_runtime_config__.ROOT_URL_PATH_PREFIX || ""); + // only start listening after all the startup code has run. var localPort = parseInt(process.env.PORT) || 0; var host = process.env.BIND_IP; @@ -707,3 +713,7 @@ WebAppInternals.inlineScriptsAllowed = function () { WebAppInternals.setInlineScriptsAllowed = function (value) { inlineScriptsAllowed = value; }; + +WebAppInternals.setBundledJsCssPrefix = function (prefix) { + bundledJsCssPrefix = prefix; +}; diff --git a/tools/bundler.js b/tools/bundler.js index 54c54b735e..966deb6610 100644 --- a/tools/bundler.js +++ b/tools/bundler.js @@ -782,13 +782,13 @@ _.extend(ClientTarget.prototype, { '\n' + '\n'); _.each(self.css, function (css) { - html.push(' \n'); }); html.push('\n\n##RUNTIME_CONFIG##\n\n'); _.each(self.js, function (js) { - html.push(' \n'); }); From d16cd33242b23a86f4ccf6cb23002d9ba5f4eddd Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 6 Dec 2013 13:38:51 -0800 Subject: [PATCH 184/190] Improve bundler comment --- tools/bundler.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/bundler.js b/tools/bundler.js index 966deb6610..07e21fc625 100644 --- a/tools/bundler.js +++ b/tools/bundler.js @@ -60,9 +60,9 @@ // - format: "browser-program-pre1" for this version // // - page: path to the template for the HTML to serve when a browser -// loads a page that is part of the application. In the file -// ##HTML_ATTRIBUTES## and ##RUNTIME_CONFIG## will be replaced with -// appropriate values at runtime. +// loads a page that is part of the application. In the file, +// some strings of the format ##FOO## will be replaced with +// appropriate values at runtime by the webapp package. // // - manifest: array of resources to serve with HTTP, each an object: // - path: path of file relative to program.json From 5a8fcee9570a31d3bb8caea38e5485e55d0c6ed0 Mon Sep 17 00:00:00 2001 From: Denis Gorbachev Date: Tue, 26 Nov 2013 11:30:34 +0300 Subject: [PATCH 185/190] Fix docs paragraph --- docs/client/api.html | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/client/api.html b/docs/client/api.html index f171cfba81..d60d29bdbb 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -927,7 +927,9 @@ proposed update.) Return `true` to permit the change. `fieldNames` is an array of the (top-level) fields in `doc` that the client wants to modify, for example -`['name',` `'score']`. `modifier` is the raw Mongo modifier that +`['name',` `'score']`. + +`modifier` is the raw Mongo modifier that the client wants to execute, for example `{$set: {'name.first': "Alice"}, $inc: {score: 1}}`. From 51b786c64c5a3be76f498a21df710350784a61eb Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 6 Dec 2013 13:40:59 -0800 Subject: [PATCH 186/190] docs: Fix line break and punctuation --- docs/client/api.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/client/api.html b/docs/client/api.html index d60d29bdbb..9789740397 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -930,8 +930,8 @@ client wants to modify, for example `['name',` `'score']`. `modifier` is the raw Mongo modifier that -the client wants to execute, for example `{$set: {'name.first': -"Alice"}, $inc: {score: 1}}`. +the client wants to execute; for example, +`{$set: {'name.first': "Alice"}, $inc: {score: 1}}`. Only Mongo modifiers are supported (operations like `$set` and `$push`). If the user tries to replace the entire document rather than use From 1eff62f0f3f449c4d852e572a3a4a7224d7ace84 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Fri, 6 Dec 2013 14:10:19 -0800 Subject: [PATCH 187/190] Only count files that actually go in the cache in the cache size check. Fixes #1653. --- packages/appcache/appcache-server.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/appcache/appcache-server.js b/packages/appcache/appcache-server.js index 89f27ea321..833acc6880 100644 --- a/packages/appcache/appcache-server.js +++ b/packages/appcache/appcache-server.js @@ -178,7 +178,7 @@ WebApp.connectHandlers.use(function(req, res, next) { var sizeCheck = function() { var totalSize = 0; _.each(WebApp.clientProgram.manifest, function (resource) { - if (resource.where === 'client') { + if (resource.cacheable && resource.where === 'client') { totalSize += resource.size; } }); From b556a474d3a704bc3065552e9c8513eef32d0cd0 Mon Sep 17 00:00:00 2001 From: Dan Dascalescu Date: Wed, 4 Dec 2013 02:53:39 -0800 Subject: [PATCH 188/190] Mention Meteor.bindEnvironment Google searches for the error fail to find the Meteor docs, and there was no mention of bindEnvironment therein. --- docs/client/api.html | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/client/api.html b/docs/client/api.html index 9789740397..5a52591d58 100644 --- a/docs/client/api.html +++ b/docs/client/api.html @@ -2511,7 +2511,9 @@ these variables have the right values, you need to use instead of `setInterval`. These functions work just like their native JavaScript equivalents. -You'll get an error if you call the native function. +If you call the native function, you'll get an error stating that Meteor +code must always run within a Fiber, and advising to use +`Meteor.bindEnvironment`. {{> api_box setTimeout}} From 30e709006eaa8d3c7de286398cd0510951a046b7 Mon Sep 17 00:00:00 2001 From: Mitar Date: Thu, 5 Dec 2013 02:47:10 -0800 Subject: [PATCH 189/190] Fix so that it is really possible to pass null to disable transformation in validators. --- packages/mongo-livedata/collection.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/mongo-livedata/collection.js b/packages/mongo-livedata/collection.js index 3ce91b36c8..ea68480470 100644 --- a/packages/mongo-livedata/collection.js +++ b/packages/mongo-livedata/collection.js @@ -549,7 +549,7 @@ Meteor.Collection.ObjectID = LocalCollection._ObjectID; if (!(options[name] instanceof Function)) { throw new Error(allowOrDeny + ": Value for `" + name + "` must be a function"); } - if (self._transform) + if (self._transform && options.transform !== null) options[name].transform = self._transform; if (options.transform) options[name].transform = Deps._makeNonreactive(options.transform); From 1ebcd8481199414b428ac21a528241c3b400826d Mon Sep 17 00:00:00 2001 From: David Glasser Date: Fri, 6 Dec 2013 14:27:12 -0800 Subject: [PATCH 190/190] Test for #1659 --- packages/mongo-livedata/allow_tests.js | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/packages/mongo-livedata/allow_tests.js b/packages/mongo-livedata/allow_tests.js index bc1bd941f3..f30dfc17fe 100644 --- a/packages/mongo-livedata/allow_tests.js +++ b/packages/mongo-livedata/allow_tests.js @@ -83,6 +83,14 @@ if (Meteor.isServer) { return doc.bar === "bar"; } }); + restrictedCollectionWithTransform.allow({ + // transform: null means that doc here is the top level, not the 'a' + // element. + transform: null, + insert: function (userId, doc) { + return !!doc.topLevelField; + } + }); // two calls to allow to verify that either validator is sufficient. var allows = [{ @@ -332,6 +340,13 @@ if (Meteor.isClient) { }, expect(function (e, res) { test.isTrue(e); })); + restrictedCollectionWithTransform.insert({ + a: {foo: "bar"}, + topLevelField: true + }, expect(function (e, res) { + test.isFalse(e); + test.isTrue(res); + })); }, function (test, expect) { test.equal(