Merge remote-tracking branch 'origin/devel' into sso

This commit is contained in:
Emily Stark
2014-01-30 10:18:42 -08:00
55 changed files with 6167 additions and 5274 deletions

View File

@@ -17,6 +17,19 @@
* Upgrade `jquery-waypoints` package from 1.1.7 to 2.0.3. (Contains
backward-incompatible changes).
* Add `frame-src` to `browser-policy-content` and account for
cross-browser CSP disparities.
* Transform functions must return objects and may not change the `_id` field
(though they may leave it out)
* Upgrade jQuery from 1.8.2 to 1.10.2.
XXX see http://jquery.com/upgrade-guide/1.9/ for incompatibilities
XXX consider taking 1.11 instead, which was released this week
* `force-ssl`: don't require SSL during `meteor run` in IPv6 environments.
## v0.7.0.1
* Two fixes to `meteor run` Mongo startup bugs that could lead to hangs with the

View File

@@ -285,7 +285,6 @@ optimist: https://github.com/substack/node-optimist
mkdirp: https://github.com/substack/node-mkdirp
wordwrap: https://github.com/substack/node-wordwrap
archy: https://github.com/substack/node-archy
shell-quote: https://github.com/substack/node-shell-quote
deep-equal: https://github.com/substack/node-deep-equal
editor: https://github.com/substack/node-editor
minimist: https://github.com/substack/minimist

View File

@@ -654,7 +654,8 @@ methods, documents are passed through the `transform` function before being
returned or passed to callbacks. This allows you to add methods or otherwise
modify the contents of your collection from their database representation. You
can also specify `transform` on a particular `find`, `findOne`, `allow`, or
`deny` call.
`deny` call. Transform functions must return an object and they may not change
the value of the document's `_id` field (though it's OK to leave it out).
// An Animal class that takes a document in its constructor
Animal = function (doc) {
@@ -1753,11 +1754,11 @@ On the client, this function logs in as the newly created user on
successful completion. On the server, it returns the newly created user
id.
On the client, you must pass `password` and one of `username` or `email`
— enough information for the user to be able to log in again
later. On the server, you can pass any subset of these options, but the
user will not be able to log in until it has an identifier and a
password.
On the client, you must pass `password` and at least one of `username` or
`email` — enough information for the user to be able to log in again
later. On the server, you do not need to specify `password`, but the user will
not be able to log in until it has a password (eg, set with
[`Accounts.setPassword`](#accounts_setpassword)).
To create an account without a password on the server and still let the
user pick their own password, call `createUser` with the `email` option
@@ -1937,7 +1938,7 @@ effects of `created`. It fires once and is the last callback to fire.
{{> api_box template_events}}
Declare event handers for instances of this template. Multiple calls add
Declare event handlers for instances of this template. Multiple calls add
new event handlers in addition to the existing ones.
See [Event Maps](#eventmaps) for a detailed description of the event

View File

@@ -826,10 +826,11 @@ To get started, run
This command will generate a fully-contained Node.js application in the form of
a tarball. To run this application, you need to provide Node.js 0.10 and a
MongoDB server. (The current release of Meteor has been tested with Node
0.10.22, and is recommended for use with 0.10.22 through 0.10.24 only.) You can
then run the application by invoking node, specifying the HTTP port for the
application to listen on, and the MongoDB endpoint. If you don't already have a
MongoDB server, we can recommend our friends at [MongoHQ](http://mongohq.com).
0.10.25; older versions contain a serious bug that can cause production servers
to stall.) You can then run the application by invoking node, specifying the
HTTP port for the application to listen on, and the MongoDB endpoint. If
you don't already have a MongoDB server, we can recommend our friends at
[MongoHQ](http://mongohq.com).
$ PORT=3000 MONGO_URL=mongodb://localhost:27017/myapp node bundle/main.js

View File

@@ -111,7 +111,7 @@ Disallows inline CSS.
Finally, you can configure a whitelist of allowed requests that various types of
content can make. The following functions are defined for the content types
script, object, image, media, font, and connect.
script, object, image, media, font, frame, and connect.
<dl class="callbacks">
{{#dtdd "BrowserPolicy.content.allow&lt;ContentType&gt;Origin(origin)"}}
@@ -119,8 +119,11 @@ Allows this type of content to be loaded from the given origin. `origin` is a
string and can include an optional scheme (such as `http` or `https`), an
optional wildcard at the beginning, and an optional port which can be a
wildcard. Examples include `example.com`, `https://*.example.com`, and
`example.com:*`. You can call these functions multiple times with different
origins to specify a whitelist of allowed origins.
`example.com:*`. You can call these functions multiple times with
different origins to specify a whitelist of allowed origins. Origins
that don't specify a protocol will allow content over both HTTP and
HTTPS: passing `example.com` will allow content from both
`http://example.com` and `https://example.com`.
{{/dtdd}}
{{#dtdd "BrowserPolicy.content.allow&lt;ContentType&gt;DataUrl()"}}
@@ -159,6 +162,12 @@ allows images to have their `src` attributes point to images served from
`https://example.com`.
* `BrowserPolicy.content.allowConnectOrigin("https://example.com")` allows XMLHttpRequest
and WebSocket connections to `https://example.com`.
* `BrowserPolicy.content.allowFrameOrigin("https://example.com")` allows
your site to load the origin `https://example.com` in a frame or
iframe. The `BrowserPolicy.framing` API allows you to control which
sites can frame your site, while
`BrowserPolicy.content.allowFrameOrigin` allows you to control which
sites can be loaded inside frames on your site.
{{/better_markdown}}

View File

@@ -1,5 +1,3 @@
* { padding: 0; margin: 0; }
html, body { height: 100%; }
#controlpane {

18
meteor
View File

@@ -1,6 +1,8 @@
#!/bin/bash
BUNDLE_VERSION=0.3.26
# danger will robinson! mother:config/download-dev-bundles.sh only goes up to
# 0.3.30!
BUNDLE_VERSION=0.3.29
# OS Check. Put here because here is where we download the precompiled
# bundles that are arch specific.
@@ -51,15 +53,25 @@ function install_dev_bundle {
rm -rf "$BUNDLE_TMPDIR"
mkdir "$BUNDLE_TMPDIR"
# fyi: URL duplicated in packages/dev-bundle-fetcher/dev-bundle
DEV_BUNDLE_URL_ROOT="https://d3sqy0vbqsdhku.cloudfront.net/"
# If you set $USE_TEST_DEV_BUNDLE_SERVER then we will download
# dev bundles copied by copy-dev-bundle-from-jenkins.sh without --prod.
# It still only does this if the version number has changed
# (setting it won't cause it to automatically delete a prod dev bundle).
if [ -n "$USE_TEST_DEV_BUNDLE_SERVER" ] ; then
DEV_BUNDLE_URL_ROOT="https://com.meteor.static.s3.amazonaws.com/test/"
fi
if [ -f "$SCRIPT_DIR/$TARBALL" ] ; then
echo "Skipping download and installing kit from $SCRIPT_DIR/$TARBALL" >&2
tar -xzf "$SCRIPT_DIR/$TARBALL" -C "$BUNDLE_TMPDIR"
elif [ -n "$SAVE_DEV_BUNDLE_TARBALL" ] ; then
# URL duplicated in tools/server/target.sh.in
curl -# "https://d3sqy0vbqsdhku.cloudfront.net/$TARBALL" >"$SCRIPT_DIR/$TARBALL"
curl -# "$DEV_BUNDLE_URL_ROOT$TARBALL" >"$SCRIPT_DIR/$TARBALL"
tar -xzf "$SCRIPT_DIR/$TARBALL" -C "$BUNDLE_TMPDIR"
else
curl -# "https://d3sqy0vbqsdhku.cloudfront.net/$TARBALL" | tar -xzf - -C "$BUNDLE_TMPDIR"
curl -# "$DEV_BUNDLE_URL_ROOT$TARBALL" | tar -xzf - -C "$BUNDLE_TMPDIR"
fi
test -x "${BUNDLE_TMPDIR}/bin/node" # bomb out if it didn't work, eg no net

View File

@@ -32,43 +32,6 @@
-webkit-box-shadow: @arguments; // For Android
}
////////// Display: Inline-block
.display-inline-block () {
display: inline-block;
// IE 7 hacks (disabled)
//*display: inline;
//*zoom: 1;
}
////////// Gradients
.vertical-gradient (@topColor: #fff, @bottomColor: #000) {
// Fallback in absence of gradients
background-color: mix(@topColor, @bottomColor, 60%);
// FF 3.6+
background-image: -moz-linear-gradient(top, @topColor, @bottomColor);
// Safari 4+, Chrome 2+
background-image: -webkit-gradient(linear, 0 0, 0 100%, from(@topColor), to(@bottomColor));
// Safari 5.1+, Chrome 10+
background-image: -webkit-linear-gradient(top, @topColor, @bottomColor);
// Opera 11.10
background-image: -o-linear-gradient(top, @topColor, @bottomColor);
// Standard, IE10
background-image: linear-gradient(to bottom, @topColor, @bottomColor);
background-repeat: repeat-x;
// IE9 and down
// XXX This gradient hack causes gradients to overflow the rounded corners
// in IE9. We make the same call as Bootstrap here: keep the rounded
// corners and withhold the gradients.
// filter: e(%("progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)",argb(@topColor),argb(@bottomColor)));
}
.reset-ie-gradient () {
filter: e(%("progid:DXImageTransform.Microsoft.gradient(enabled = false)"));
}
////////// Unselectable
.unselectable () {
@@ -86,9 +49,19 @@
//////////////////// LOGIN BUTTONS
@login-buttons-accounts-dialog-width: 198px;
@login-buttons-color: #596595;
@login-buttons-color-border: darken(@login-buttons-color, 10%);
@login-buttons-color-active: lighten(@login-buttons-color, 10%);
@login-buttons-color-active-border: darken(@login-buttons-color-active, 10%);
@login-buttons-config-color: darken(#f53, 10%);
@login-buttons-config-color-border: darken(@login-buttons-config-color, 10%);
@login-buttons-config-color-active: lighten(@login-buttons-config-color, 10%);
@login-buttons-config-color-active-border: darken(@login-buttons-config-color-active, 10%);
#login-buttons {
.display-inline-block();
display: inline-block;
margin-right: 0.2px; // Fixes display on IE8: http://www.compsoft.co.uk/Blog/2009/11/inline-block-not-quite-inline-blocking.html
// This seems to keep the height of the line from
@@ -107,6 +80,7 @@
display: inline-block;
}
}
.login-display-name {
display: inline-block;
padding-right: 2px;
@@ -133,26 +107,25 @@
text-align: center;
color: #fff;
text-shadow: 0px -1px 1px rgba(0, 0, 0, 0.5);
@topColor: #a5acc9;
@bottomColor: darken(@topColor, 25%);
background: @login-buttons-color;
border: 1px solid @login-buttons-color-border;
.vertical-gradient(@topColor, @bottomColor);
border-radius: 4px;
border: 1px solid mix(@bottomColor, #000, 30%);
.box-shadow(0 1px 3px rgba(0,0,0,0.5));
&:hover {
background: @login-buttons-color-active;
}
&:active {
.box-shadow(none);
.vertical-gradient(mix(@bottomColor, @topColor, 30%),
mix(@bottomColor, #000, 80%));
background: @login-buttons-color-active;
.box-shadow(0 2px 3px 0 rgba(0, 0, 0, 0.2) inset);
}
&.login-button-disabled, &.login-button-disabled:active {
color: #ddd;
background: #aaa;
border: 1px solid lighten(#aaa, 10%);
.box-shadow(none);
background: #999;
}
}
@@ -173,6 +146,8 @@
line-height: inherit;
color: inherit;
font: inherit;
font-family: 'Helvetica Neue', Helvetica, Arial, default;
}
.accounts-dialog .login-button {
@@ -187,12 +162,15 @@
}
.login-display-name { margin-right: 4px; }
.configure-button {
background: @login-buttons-config-color;
border-color: @login-buttons-config-color-border;
.vertical-gradient(#f53, darken(#f53, 15%));
.box-shadow(0 1px 3px rgba(0,0,0,0.5));
&:active { background: #b10; .box-shadow(0 1px 3px rgba(0,0,0,0.5) inset); }
&:active, &:hover {
background: @login-buttons-config-color-active;
border-color: @login-buttons-config-color-active-border;
}
}
.login-image {
@@ -254,11 +232,9 @@
@meteor-accounts-dialog-border-width: 1px;
.accounts-dialog {
border: @meteor-accounts-dialog-border-width solid #777;
border: @meteor-accounts-dialog-border-width solid #ccc;
z-index: 1000;
background: white;
.box-shadow(0 3px 6px 1px rgba(0, 0, 0, 0.3));
border-radius: 4px;
padding: 8px 12px;
@@ -266,6 +242,8 @@
width: @login-buttons-accounts-dialog-width;
.box-shadow(0 0 3px 0 rgba(0, 0, 0, 0.2));
// Labels and links inherit app's font with this line commented out:
//font-family: 'Helvetica Neue', Helvetica, Arial, default;
font-size: 16px;
@@ -276,11 +254,16 @@
// the "Close" link, which we want to have the same line-height
// as the "Sign in" link.
& > * { line-height: 1.6; }
& > .login-close-text { line-height: inherit; }
& > .login-close-text {
line-height: inherit;
font-size: inherit;
font-family: inherit;
}
label, .title {
font-weight: bold;
font-size: 80%;
margin-top: 7px;
margin-bottom: -2px;
}
input {
// Be pixel-accurate in IE 8+ regardless of our borders and
@@ -302,7 +285,7 @@
}
.login-button-form-submit { margin-top: 8px; }
.message { font-size: 80%; margin-top: 2px; line-height: 1.3; }
.message { font-size: 80%; margin-top: 8px; line-height: 1.3; }
.error-message { color: red; }
.info-message { color: green; }
.additional-link { font-size: 75%; }
@@ -418,7 +401,7 @@
#login-buttons, .accounts-dialog {
input[type=text], input[type=email], input[type=password] {
padding: 4px;
border: 1px solid #999;
border: 1px solid #aaa;
border-radius: 3px;
line-height: 1;
}

View File

@@ -37,10 +37,12 @@ var cspSrcs;
var cachedCsp; // Avoid constructing the header out of cspSrcs when possible.
// CSP keywords have to be single-quoted.
var unsafeInline = "'unsafe-inline'";
var unsafeEval = "'unsafe-eval'";
var selfKeyword = "'self'";
var noneKeyword = "'none'";
var keywords = {
unsafeInline: "'unsafe-inline'",
unsafeEval: "'unsafe-eval'",
self: "'self'",
none: "'none'"
};
BrowserPolicy.content = {};
@@ -52,7 +54,7 @@ var parseCsp = function (csp) {
policy = policy.substring(0, policy.length - 1);
var srcs = policy.split(" ");
var directive = srcs[0];
if (_.indexOf(srcs, noneKeyword) !== -1)
if (_.indexOf(srcs, keywords.none) !== -1)
cspSrcs[directive] = null;
else
cspSrcs[directive] = srcs.slice(1);
@@ -81,6 +83,38 @@ var prepareForCspDirective = function (directive) {
cspSrcs[directive] = _.clone(cspSrcs["default-src"]);
};
// Add `src` to the list of allowed sources for `directive`, with the
// following modifications if `src` is an origin:
// - If `src` does not have a protocol specified, then add both
// http://<src> and https://<src>. This is to mask differing
// cross-browser behavior; some browsers interpret an origin without a
// protocol as http://<src> and some interpret it as both http://<src>
// and https://<src>
// - Trim trailing slashes from `src`, since some browsers interpret
// "foo.com/" as "foo.com" and some don't.
var addSourceForDirective = function (directive, src) {
if (_.contains(_.values(keywords), src)) {
cspSrcs[directive].push(src);
} else {
src = src.toLowerCase();
// Trim trailing slashes.
src = src.replace(/\/+$/, '');
var toAdd = [];
// If there is no protocol, add both http:// and https://.
if (! /^([a-z0-9.+-]+:)/.test(src)) {
toAdd.push("http://" + src);
toAdd.push("https://" + src);
} else {
toAdd.push(src);
}
_.each(toAdd, function (s) {
cspSrcs[directive].push(s);
});
}
};
var setDefaultPolicy = function () {
// By default, unsafe inline scripts and styles are allowed, since we expect
// many apps will use them for analytics, etc. Unsafe eval is disallowed, and
@@ -111,7 +145,7 @@ _.extend(BrowserPolicy.content, {
var header = _.map(cspSrcs, function (srcs, directive) {
srcs = srcs || [];
if (_.isEmpty(srcs))
srcs = [noneKeyword];
srcs = [keywords.none];
var directiveCsp = _.uniq(srcs).join(" ");
return directive + " " + directiveCsp + ";";
});
@@ -129,7 +163,7 @@ _.extend(BrowserPolicy.content, {
cachedCsp = null;
parseCsp(csp);
setWebAppInlineScripts(
BrowserPolicy.content._keywordAllowed("script-src", unsafeInline)
BrowserPolicy.content._keywordAllowed("script-src", keywords.unsafeInline)
);
},
@@ -142,34 +176,34 @@ _.extend(BrowserPolicy.content, {
allowInlineScripts: function () {
prepareForCspDirective("script-src");
cspSrcs["script-src"].push(unsafeInline);
cspSrcs["script-src"].push(keywords.unsafeInline);
setWebAppInlineScripts(true);
},
disallowInlineScripts: function () {
prepareForCspDirective("script-src");
removeCspSrc("script-src", unsafeInline);
removeCspSrc("script-src", keywords.unsafeInline);
setWebAppInlineScripts(false);
},
allowEval: function () {
prepareForCspDirective("script-src");
cspSrcs["script-src"].push(unsafeEval);
cspSrcs["script-src"].push(keywords.unsafeEval);
},
disallowEval: function () {
prepareForCspDirective("script-src");
removeCspSrc("script-src", unsafeEval);
removeCspSrc("script-src", keywords.unsafeEval);
},
allowInlineStyles: function () {
prepareForCspDirective("style-src");
cspSrcs["style-src"].push(unsafeInline);
cspSrcs["style-src"].push(keywords.unsafeInline);
},
disallowInlineStyles: function () {
prepareForCspDirective("style-src");
removeCspSrc("style-src", unsafeInline);
removeCspSrc("style-src", keywords.unsafeInline);
},
// Functions for setting defaults
allowSameOriginForAll: function () {
BrowserPolicy.content.allowOriginForAll(selfKeyword);
BrowserPolicy.content.allowOriginForAll(keywords.self);
},
allowDataUrlForAll: function () {
BrowserPolicy.content.allowOriginForAll("data:");
@@ -177,7 +211,7 @@ _.extend(BrowserPolicy.content, {
allowOriginForAll: function (origin) {
prepareForCspDirective("default-src");
_.each(_.keys(cspSrcs), function (directive) {
cspSrcs[directive].push(origin);
addSourceForDirective(directive, origin);
});
},
disallowAll: function () {
@@ -192,7 +226,7 @@ _.extend(BrowserPolicy.content, {
// allow<Resource>Origin, allow<Resource>Data, allow<Resource>self, and
// disallow<Resource> methods for each type of resource.
_.each(["script", "object", "img", "media",
"font", "connect", "style"],
"font", "connect", "style", "frame"],
function (resource) {
var directive = resource + "-src";
var methodResource;
@@ -214,7 +248,7 @@ _.each(["script", "object", "img", "media",
BrowserPolicy.content[allowMethodName] = function (src) {
prepareForCspDirective(directive);
cspSrcs[directive].push(src);
addSourceForDirective(directive, src);
};
if (resource === "script") {
BrowserPolicy.content[disallowMethodName] = function () {
@@ -230,7 +264,7 @@ _.each(["script", "object", "img", "media",
};
BrowserPolicy.content[allowSelfMethodName] = function () {
prepareForCspDirective(directive);
cspSrcs[directive].push(selfKeyword);
cspSrcs[directive].push(keywords.self);
};
});

View File

@@ -112,6 +112,30 @@ Tinytest.add("browser-policy - csp", function (test) {
BrowserPolicy.content.disallowObject();
test.isTrue(cspsEqual(BrowserPolicy.content._constructCsp(),
"default-src 'self'; object-src 'none';"));
// Allow foo.com; it should allow both http://foo.com and
// https://foo.com.
BrowserPolicy.content.allowImageOrigin("foo.com");
test.isTrue(cspsEqual(BrowserPolicy.content._constructCsp(),
"default-src 'self'; object-src 'none'; " +
"img-src 'self' http://foo.com https://foo.com;"));
// "Disallow all <object>" followed by "allow foo.com for all" results
// in <object> srcs from foo.com.
BrowserPolicy.content.allowOriginForAll("foo.com");
test.isTrue(cspsEqual(BrowserPolicy.content._constructCsp(),
"default-src 'self' http://foo.com https://foo.com; " +
"object-src http://foo.com https://foo.com; " +
"img-src 'self' http://foo.com https://foo.com;"));
// Check that trailing slashes are trimmed from origins.
BrowserPolicy.content.disallowAll();
BrowserPolicy.content.allowFrameOrigin("https://foo.com/");
test.isTrue(cspsEqual(BrowserPolicy.content._constructCsp(),
"default-src 'none'; frame-src https://foo.com;"));
BrowserPolicy.content.allowObjectOrigin("foo.com//");
test.isTrue(cspsEqual(BrowserPolicy.content._constructCsp(),
"default-src 'none'; frame-src https://foo.com; " +
"object-src http://foo.com https://foo.com;"));
});
Tinytest.add("browser-policy - x-frame-options", function (test) {

View File

@@ -156,9 +156,8 @@ var handler = function (compileStep, isLiterate) {
var literateHandler = function (compileStep) {
return handler(compileStep, true);
}
};
Plugin.registerSourceHandler("coffee", handler);
Plugin.registerSourceHandler("litcoffee", literateHandler);
Plugin.registerSourceHandler("coffee.md", literateHandler);

View File

@@ -22,11 +22,12 @@ httpServer.addListener('request', function (req, res) {
// Determine if the connection is only over localhost. Both we
// received it on localhost, and all proxies involved received on
// localhost.
var localhostRegexp = /^\s*(127\.0\.0\.1|::1)\s*$/;
var isLocal = (
remoteAddress === "127.0.0.1" &&
localhostRegexp.test(remoteAddress) &&
(!req.headers['x-forwarded-for'] ||
_.all(req.headers['x-forwarded-for'].split(','), function (x) {
return /\s*127\.0\.0\.1\s*/.test(x);
return localhostRegexp.test(x);
})));
// Determine if the connection was over SSL at any point. Either we

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,4 @@
// A "crossbar" is a class that provides structured notification registration.
// The "invalidation crossbar" is a specific instance used by the DDP server to
// implement write fence notifications.
DDPServer._Crossbar = function (options) {
var self = this;
@@ -17,10 +15,8 @@ DDPServer._Crossbar = function (options) {
_.extend(DDPServer._Crossbar.prototype, {
// Listen for notification that match 'trigger'. A notification
// matches if it has the key-value pairs in trigger as a
// subset. When a notification matches, call 'callback', passing two
// arguments, the actual notification and an acknowledgement
// function. The callback should call the acknowledgement function
// when it is finished processing the notification.
// subset. When a notification matches, call 'callback', passing
// the actual notification.
//
// Returns a listen handle, which is an object with a method
// stop(). Call stop() to stop listening.
@@ -48,38 +44,29 @@ _.extend(DDPServer._Crossbar.prototype, {
// Fire the provided 'notification' (an object whose attribute
// values are all JSON-compatibile) -- inform all matching listeners
// (registered with listen()), and once they have all acknowledged
// the notification, call onComplete with no arguments.
// (registered with listen()).
//
// If fire() is called inside a write fence, then each of the
// listener callbacks will be called inside the write fence as well.
//
// The listeners may be invoked in parallel, rather than serially.
fire: function (notification, onComplete) {
fire: function (notification) {
var self = this;
var callbacks = [];
// Listener callbacks can yield, so we need to first find all the ones that
// match in a single iteration over self.listeners (which can't be mutated
// during this iteration), and then invoke the matching callbacks, checking
// before each call to ensure they are still in self.listeners.
var matchingCallbacks = {};
// XXX consider refactoring to "index" on "collection"
_.each(self.listeners, function (l) {
_.each(self.listeners, function (l, id) {
if (self._matches(notification, l.trigger))
callbacks.push(l.callback);
matchingCallbacks[id] = l.callback;
});
if (onComplete)
onComplete = Meteor.bindEnvironment(
onComplete,
"Crossbar fire complete callback");
var outstanding = callbacks.length;
if (!outstanding)
onComplete && onComplete();
else {
_.each(callbacks, function (c) {
c(notification, function () {
if (--outstanding === 0)
onComplete && onComplete();
});
});
}
_.each(matchingCallbacks, function (c, id) {
if (_.has(self.listeners, id))
c(notification);
});
},
// A notification matches a trigger if all keys that exist in both are equal.
@@ -107,6 +94,11 @@ _.extend(DDPServer._Crossbar.prototype, {
}
});
// The "invalidation crossbar" is a specific instance used by the DDP server to
// implement write fence notifications. Listener callbacks on this crossbar
// should call beginWrite on the current write fence before they return, if they
// want to delay the write fence from firing (ie, the DDP method-data-updated
// message from being sent).
DDPServer._InvalidationCrossbar = new DDPServer._Crossbar({
factName: "invalidation-crossbar-listeners"
});

View File

@@ -18,4 +18,32 @@ Tinytest.add('livedata - crossbar', function (test) {
test.isFalse(crossbar._matches({collection: "C", id: "X"},
{collection: "C", id: "Y"}));
// Test that stopped listens definitely don't fire.
var calledFirst = false;
var calledSecond = false;
var trigger = {collection: "C"};
var secondHandle;
crossbar.listen(trigger, function (notification) {
// This test assumes that listeners will be called in the order
// registered. It's not wrong for the crossbar to do something different,
// but the test won't be valid in that case, so make it fail so we notice.
calledFirst = true;
if (calledSecond) {
test.fail({
type: "test_assumption_failed",
message: "test assumed that listeners would be called in the order registered"
});
} else {
secondHandle.stop();
}
});
secondHandle = crossbar.listen(trigger, function (notification) {
// This should not get invoked, because it should be stopped by the other
// listener!
calledSecond = true;
});
crossbar.fire(trigger);
test.isTrue(calledFirst);
test.isFalse(calledSecond);
});

View File

@@ -96,7 +96,7 @@ var Connection = function (url, options) {
// methods whose stub wrote at least one document, and whose data-done message
// has not yet been received.
self._documentsWrittenByStub = {};
// collection -> id -> "server document" object. A "server document" has:
// collection -> IdMap of "server document" object. A "server document" has:
// - "document": the version of the document according the
// server (ie, the snapshot before a stub wrote it, amended by any changes
// received from the server)
@@ -767,11 +767,14 @@ _.extend(Connection.prototype, {
var docsWritten = [];
_.each(self._stores, function (s, collection) {
var originals = s.retrieveOriginals();
_.each(originals, function (doc, id) {
if (typeof id !== 'string')
throw new Error("id is not a string");
// not all stores define retrieveOriginals
if (!originals)
return;
originals.forEach(function (doc, id) {
docsWritten.push({collection: collection, id: id});
var serverDoc = Meteor._ensure(self._serverDocuments, collection, id);
if (!_.has(self._serverDocuments, collection))
self._serverDocuments[collection] = new LocalCollection._IdMap;
var serverDoc = self._serverDocuments[collection].setDefault(id, {});
if (serverDoc.writtenByStubs) {
// We're not the first stub to write this doc. Just add our method ID
// to the record.
@@ -1059,17 +1062,24 @@ _.extend(Connection.prototype, {
updates[collection].push(msg);
},
_getServerDoc: function (collection, id) {
var self = this;
if (!_.has(self._serverDocuments, collection))
return null;
var serverDocsForCollection = self._serverDocuments[collection];
return serverDocsForCollection.get(id) || null;
},
_process_added: function (msg, updates) {
var self = this;
var serverDoc = Meteor._get(self._serverDocuments, msg.collection, msg.id);
var id = LocalCollection._idParse(msg.id);
var serverDoc = self._getServerDoc(msg.collection, id);
if (serverDoc) {
// Some outstanding stub wrote here.
if (serverDoc.document !== undefined) {
throw new Error("It doesn't make sense to be adding something we know exists: "
+ msg.id);
}
if (serverDoc.document !== undefined)
throw new Error("Server sent add for existing id: " + msg.id);
serverDoc.document = msg.fields || {};
serverDoc.document._id = LocalCollection._idParse(msg.id);
serverDoc.document._id = id;
} else {
self._pushUpdate(updates, msg.collection, msg);
}
@@ -1077,12 +1087,11 @@ _.extend(Connection.prototype, {
_process_changed: function (msg, updates) {
var self = this;
var serverDoc = Meteor._get(self._serverDocuments, msg.collection, msg.id);
var serverDoc = self._getServerDoc(
msg.collection, LocalCollection._idParse(msg.id));
if (serverDoc) {
if (serverDoc.document === undefined) {
throw new Error("It doesn't make sense to be changing something we don't think exists: "
+ msg.id);
}
if (serverDoc.document === undefined)
throw new Error("Server sent changed for nonexisting id: " + msg.id);
LocalCollection._applyChanges(serverDoc.document, msg.fields);
} else {
self._pushUpdate(updates, msg.collection, msg);
@@ -1091,14 +1100,12 @@ _.extend(Connection.prototype, {
_process_removed: function (msg, updates) {
var self = this;
var serverDoc = Meteor._get(
self._serverDocuments, msg.collection, msg.id);
var serverDoc = self._getServerDoc(
msg.collection, LocalCollection._idParse(msg.id));
if (serverDoc) {
// Some outstanding stub wrote here.
if (serverDoc.document === undefined) {
throw new Error("It doesn't make sense to be deleting something we don't know exists: "
+ msg.id);
}
if (serverDoc.document === undefined)
throw new Error("Server sent removed for nonexisting id:" + msg.id);
serverDoc.document = undefined;
} else {
self._pushUpdate(updates, msg.collection, {
@@ -1114,8 +1121,7 @@ _.extend(Connection.prototype, {
// Process "method done" messages.
_.each(msg.methods, function (methodId) {
_.each(self._documentsWrittenByStub[methodId], function (written) {
var serverDoc = Meteor._get(self._serverDocuments,
written.collection, written.id);
var serverDoc = self._getServerDoc(written.collection, written.id);
if (!serverDoc)
throw new Error("Lost serverDoc for " + JSON.stringify(written));
if (!serverDoc.writtenByStubs[methodId])
@@ -1128,11 +1134,12 @@ _.extend(Connection.prototype, {
// change if the server did not write to this object, or applying the
// server's writes if it did).
// This is a fake ddp 'replace' message. It's just for talking between
// livedata connections and minimongo.
// This is a fake ddp 'replace' message. It's just for talking
// between livedata connections and minimongo. (We have to stringify
// the ID because it's supposed to look like a wire message.)
self._pushUpdate(updates, written.collection, {
msg: 'replace',
id: written.id,
id: LocalCollection._idStringify(written.id),
replace: serverDoc.document
});
// Call all flush callbacks.
@@ -1141,9 +1148,9 @@ _.extend(Connection.prototype, {
});
// Delete this completed serverDocument. Don't bother to GC empty
// objects inside self._serverDocuments, since there probably aren't
// IdMaps inside self._serverDocuments, since there probably aren't
// many collections and they'll be written repeatedly.
delete self._serverDocuments[written.collection][written.id];
self._serverDocuments[written.collection].remove(written.id);
}
});
delete self._documentsWrittenByStub[methodId];
@@ -1197,7 +1204,7 @@ _.extend(Connection.prototype, {
}
};
_.each(self._serverDocuments, function (collectionDocs) {
_.each(collectionDocs, function (serverDoc) {
collectionDocs.forEach(function (serverDoc) {
var writtenByStubForAMethodWithSentMessage = _.any(
serverDoc.writtenByStubs, function (dummy, methodId) {
var invoker = self._methodInvokers[methodId];

View File

@@ -10,16 +10,7 @@ if (Package.webapp) {
Meteor.server = new Server;
Meteor.refresh = function (notification) {
var fence = DDPServer._CurrentWriteFence.get();
if (fence) {
// Block the write fence until all of the invalidations have
// landed.
var proxy_write = fence.beginWrite();
}
DDPServer._InvalidationCrossbar.fire(notification, function () {
if (proxy_write)
proxy_write.committed();
});
DDPServer._InvalidationCrossbar.fire(notification);
};
// Proxy the public methods of Meteor.server so they can

View File

@@ -53,6 +53,8 @@ _.extend(DDPServer._WriteFence.prototype, {
// uncommitted writes, it will activate.
arm: function () {
var self = this;
if (self === DDPServer._CurrentWriteFence.get())
throw Error("Can't arm the current fence");
self.armed = true;
self._maybeFire();
},

View File

@@ -6,71 +6,3 @@
Meteor._noYieldsAllowed = function (f) {
return f();
};
// An even simpler queue of tasks than the fiber-enabled one. This one just
// runs all the tasks when you call runTask or flush, synchronously.
//
Meteor._SynchronousQueue = function () {
var self = this;
self._tasks = [];
self._running = false;
};
_.extend(Meteor._SynchronousQueue.prototype, {
runTask: function (task) {
var self = this;
if (!self.safeToRunTask())
throw new Error("Could not synchronously run a task from a running task");
self._tasks.push(task);
var tasks = self._tasks;
self._tasks = [];
self._running = true;
try {
while (!_.isEmpty(tasks)) {
var t = tasks.shift();
try {
t();
} catch (e) {
if (_.isEmpty(tasks)) {
// this was the last task, that is, the one we're calling runTask
// for.
throw e;
} else {
Meteor._debug("Exception in queued task: " + e.stack);
}
}
}
} finally {
self._running = false;
}
},
queueTask: function (task) {
var self = this;
var wasEmpty = _.isEmpty(self._tasks);
self._tasks.push(task);
// Intentionally not using Meteor.setTimeout, because it doesn't like runing
// in stubs for now.
if (wasEmpty)
setTimeout(_.bind(self.flush, self), 0);
},
flush: function () {
var self = this;
self.runTask(function () {});
},
drain: function () {
var self = this;
if (!self.safeToRunTask())
return;
while (!_.isEmpty(self._tasks)) {
self.flush();
}
},
safeToRunTask: function () {
var self = this;
return !self._running;
}
});

View File

@@ -1,120 +0,0 @@
// Temporary workaround for https://github.com/joyent/node/issues/6506
// Our fix involves replicating a bunch of functions in order to change
// a single line.
var PATCH_VERSIONS = ['v0.10.22', 'v0.10.23', 'v0.10.24'];
if (!_.contains(PATCH_VERSIONS, process.version)) {
if (!process.env.DISABLE_WEBSOCKETS) {
console.error("This version of Meteor contains a patch for a bug in Node v0.10.");
console.error("The patch is against only versions 0.10.22 through 0.10.24.");
console.error("You are using version " + process.version + " instead, so we cannot apply the patch.");
console.error("To mitigate the most common effect of the bug, websockets will be disabled.");
console.error("To enable websockets, use Node v0.10.22 through v0.10.24, or upgrade to a later version of Meteor (if available).");
process.env.DISABLE_WEBSOCKETS = 't';
}
} else {
// This code is all copied from Node's lib/_stream_writable.js, git tag
// v0.10.22, with one change (see "BUGFIX").
var Writable = Npm.require('_stream_writable');
var Duplex = Npm.require('_stream_duplex');
Writable.prototype.write = function(chunk, encoding, cb) {
var state = this._writableState;
var ret = false;
if (typeof encoding === 'function') {
cb = encoding;
encoding = null;
}
if (Buffer.isBuffer(chunk))
encoding = 'buffer';
else if (!encoding)
encoding = state.defaultEncoding;
if (typeof cb !== 'function')
cb = function() {};
if (state.ended)
writeAfterEnd(this, state, cb);
else if (validChunk(this, state, chunk, cb))
ret = writeOrBuffer(this, state, chunk, encoding, cb);
return ret;
};
// Duplex doesn't directly inherit from Writable: it copies over this function
// explicitly. So we have to do it too.
Duplex.prototype.write = Writable.prototype.write;
function writeAfterEnd(stream, state, cb) {
var er = new Error('write after end');
// TODO: defer error events consistently everywhere, not just the cb
stream.emit('error', er);
process.nextTick(function() {
cb(er);
});
}
function validChunk(stream, state, chunk, cb) {
var valid = true;
if (!Buffer.isBuffer(chunk) &&
'string' !== typeof chunk &&
chunk !== null &&
chunk !== undefined &&
!state.objectMode) {
var er = new TypeError('Invalid non-string/buffer chunk');
stream.emit('error', er);
process.nextTick(function() {
cb(er);
});
valid = false;
}
return valid;
}
function writeOrBuffer(stream, state, chunk, encoding, cb) {
chunk = decodeChunk(state, chunk, encoding);
if (Buffer.isBuffer(chunk))
encoding = 'buffer';
var len = state.objectMode ? 1 : chunk.length;
state.length += len;
var ret = state.length < state.highWaterMark;
// This next line is the BUGFIX:
state.needDrain = state.needDrain || !ret;
if (state.writing)
state.buffer.push(new WriteReq(chunk, encoding, cb));
else
doWrite(stream, state, len, chunk, encoding, cb);
return ret;
}
function decodeChunk(state, chunk, encoding) {
if (!state.objectMode &&
state.decodeStrings !== false &&
typeof chunk === 'string') {
chunk = new Buffer(chunk, encoding);
}
return chunk;
}
function WriteReq(chunk, encoding, cb) {
this.chunk = chunk;
this.encoding = encoding;
this.callback = cb;
}
function doWrite(stream, state, len, chunk, encoding, cb) {
state.writelen = len;
state.writecb = cb;
state.writing = true;
state.sync = true;
stream._write(chunk, encoding, state.onwrite);
state.sync = false;
}
}

View File

@@ -15,9 +15,6 @@ Package.on_use(function (api) {
api.export('Meteor');
// Workaround for https://github.com/joyent/node/issues/6506
api.add_files('node-issue-6506-workaround.js', 'server');
api.add_files('client_environment.js', 'client');
api.add_files('server_environment.js', 'server');
api.add_files('helpers.js', ['client', 'server']);
@@ -26,6 +23,7 @@ Package.on_use(function (api) {
api.add_files('errors.js', ['client', 'server']);
api.add_files('fiber_helpers.js', 'server');
api.add_files('fiber_stubs_client.js', 'client');
api.add_files('unyielding_queue.js');
api.add_files('startup_client.js', ['client']);
api.add_files('startup_server.js', ['server']);
api.add_files('debug.js', ['client', 'server']);

View File

@@ -0,0 +1,72 @@
// A simpler version of Meteor._SynchronousQueue with the same external
// interface. It runs on both client and server, unlike _SynchronousQueue which
// only runs on the server. When used on the server, tasks may not yield. This
// one just runs all the tasks when you call runTask or flush, synchronously.
// It itself also does not yield.
//
Meteor._UnyieldingQueue = function () {
var self = this;
self._tasks = [];
self._running = false;
};
_.extend(Meteor._UnyieldingQueue.prototype, {
runTask: function (task) {
var self = this;
if (!self.safeToRunTask())
throw new Error("Could not synchronously run a task from a running task");
self._tasks.push(task);
var tasks = self._tasks;
self._tasks = [];
self._running = true;
try {
while (!_.isEmpty(tasks)) {
var t = tasks.shift();
try {
Meteor._noYieldsAllowed(function () {
t();
});
} catch (e) {
if (_.isEmpty(tasks)) {
// this was the last task, that is, the one we're calling runTask
// for.
throw e;
} else {
Meteor._debug("Exception in queued task: " + e.stack);
}
}
}
} finally {
self._running = false;
}
},
queueTask: function (task) {
var self = this;
var wasEmpty = _.isEmpty(self._tasks);
self._tasks.push(task);
// Intentionally not using Meteor.setTimeout, because it doesn't like runing
// in stubs for now.
if (wasEmpty)
setTimeout(_.bind(self.flush, self), 0);
},
flush: function () {
var self = this;
self.runTask(function () {});
},
drain: function () {
var self = this;
if (!self.safeToRunTask())
return;
while (!_.isEmpty(self._tasks)) {
self.flush();
}
},
safeToRunTask: function () {
var self = this;
return !self._running;
}
});

View File

@@ -2,7 +2,7 @@
// ordered: bool.
// old_results and new_results: collections of documents.
// if ordered, they are arrays.
// if unordered, they are maps {_id: doc}.
// if unordered, they are IdMaps
LocalCollection._diffQueryChanges = function (ordered, oldResults, newResults,
observer) {
if (ordered)
@@ -14,28 +14,29 @@ LocalCollection._diffQueryChanges = function (ordered, oldResults, newResults,
};
LocalCollection._diffQueryUnorderedChanges = function (oldResults, newResults,
observer) {
observer) {
if (observer.movedBefore) {
throw new Error("_diffQueryUnordered called with a movedBefore observer!");
}
_.each(newResults, function (newDoc) {
if (_.has(oldResults, newDoc._id)) {
var oldDoc = oldResults[newDoc._id];
newResults.forEach(function (newDoc, id) {
var oldDoc = oldResults.get(id);
if (oldDoc) {
if (observer.changed && !EJSON.equals(oldDoc, newDoc)) {
observer.changed(newDoc._id, LocalCollection._makeChangedFields(newDoc, oldDoc));
observer.changed(
id, LocalCollection._makeChangedFields(newDoc, oldDoc));
}
} else {
} else if (observer.added) {
var fields = EJSON.clone(newDoc);
delete fields._id;
observer.added && observer.added(newDoc._id, fields);
observer.added(newDoc._id, fields);
}
});
if (observer.removed) {
_.each(oldResults, function (oldDoc) {
if (!_.has(newResults, oldDoc._id))
observer.removed(oldDoc._id);
oldResults.forEach(function (oldDoc, id) {
if (!newResults.has(id))
observer.removed(id);
});
}
};

View File

@@ -37,14 +37,22 @@ _.extend(LocalCollection._IdMap.prototype, {
var self = this;
self._map = {};
},
// Iterates over the items in the map. Return `false` to break the loop.
forEach: function (iterator) {
var self = this;
_.each(self._map, function (value, key, obj) {
var context = this;
iterator.call(context, value, LocalCollection._idParse(key), obj);
});
// don't use _.each, because we can't break out of it.
var keys = _.keys(self._map);
for (var i = 0; i < keys.length; i++) {
var breakIfFalse = iterator.call(null, self._map[keys[i]],
LocalCollection._idParse(keys[i]));
if (breakIfFalse === false)
return;
}
},
size: function () {
var self = this;
return _.size(self._map);
},
// XXX used?
setDefault: function (id, def) {
var self = this;
var key = LocalCollection._idStringify(id);
@@ -52,5 +60,15 @@ _.extend(LocalCollection._IdMap.prototype, {
return self._map[key];
self._map[key] = def;
return def;
},
// Assumes that values are EJSON-cloneable, and that we don't need to clone
// IDs (ie, that nobody is going to mutate an ObjectId).
clone: function () {
var self = this;
var clone = new LocalCollection._IdMap;
self.forEach(function (value, id) {
clone.set(id, EJSON.clone(value));
});
return clone;
}
});

View File

@@ -7,28 +7,52 @@
// ObserveHandle: the return value of a live query.
LocalCollection = function (name) {
this.name = name;
this.docs = {}; // _id -> document (also containing id)
LocalCollection = function (options) {
var self = this;
options = options || {};
this._observeQueue = new Meteor._SynchronousQueue();
self.name = options.name;
// _id -> document (also containing id)
self._docs = new LocalCollection._IdMap;
this.next_qid = 1; // live query id generator
// When writing to this collection, we batch all observeChanges callbacks
// until the end of the write, and run them at this point. On the server, we
// use a single SynchronousQueue to do so, so that we never deliver callbacks
// out of order even if other writes occur during a yield. On the client, or
// on the server if we promise that our callbacks will never yield via an
// undocumented option, we use the simpler UnyieldingQueue.
//
// (What is the _observeCallbacksWillNeverYield option for? In some cases, it
// can be nice (on the server) to be able to write to a LocalCollection
// without yielding (eg, in a _noYieldsAllowed block). It's necessary to
// provide non-yielding allow callbacks in that case, but just doing that
// wouldn't be good enough if we always used SynchronousQueue on the server,
// since it tends to yield in order to run even non-yielding callbacks.)
var queueClass;
if (Meteor._SynchronousQueue && !options._observeCallbacksWillNeverYield) {
queueClass = Meteor._SynchronousQueue;
} else {
queueClass = Meteor._UnyieldingQueue;
}
self._observeQueue = new queueClass();
self.next_qid = 1; // live query id generator
// qid -> live query object. keys:
// ordered: bool. ordered queries have addedBefore/movedBefore callbacks.
// results: array (ordered) or object (unordered) of current results
// results_snapshot: snapshot of results. null if not paused.
// (aliased with self._docs!)
// resultsSnapshot: snapshot of results. null if not paused.
// cursor: Cursor object for the query.
// selector, sorter, (callbacks): functions
this.queries = {};
self.queries = {};
// null if not saving originals; a map from id to original document value if
// saving originals. See comments before saveOriginals().
this._savedOriginals = null;
// null if not saving originals; an IdMap from id to original document value
// if saving originals. See comments before saveOriginals().
self._savedOriginals = null;
// True when observers are paused and we should not send callbacks.
this.paused = false;
self.paused = false;
};
Minimongo = {};
@@ -90,11 +114,11 @@ LocalCollection.Cursor = function (collection, selector, options) {
if (LocalCollection._selectorIsId(selector)) {
// stash for fast path
self.selector_id = LocalCollection._idStringify(selector);
self._selectorId = selector;
self.matcher = new Minimongo.Matcher(selector, self);
self.sorter = undefined;
} else {
self.selector_id = undefined;
self._selectorId = undefined;
self.matcher = new Minimongo.Matcher(selector, self);
self.sorter = (self.matcher.hasGeoQuery() || options.sort) ?
new Sorter(options.sort || []) : null;
@@ -104,15 +128,12 @@ LocalCollection.Cursor = function (collection, selector, options) {
self.fields = options.fields;
if (self.fields)
self.projection_f = LocalCollection._compileProjection(self.fields);
self.projectionFn = LocalCollection._compileProjection(self.fields);
if (options.transform && typeof Deps !== "undefined")
self._transform = Deps._makeNonreactive(options.transform);
else
self._transform = options.transform;
self._transform = LocalCollection.wrapTransform(options.transform);
// db_objects is a list of the objects that match the cursor. (It's always a
// list, never an object: LocalCollection.Cursor is always ordered.)
// db_objects is an array of the objects that match the cursor. (It's always
// an array, never an IdMap: LocalCollection.Cursor is always ordered.)
self.db_objects = null;
self.cursor_pos = 0;
@@ -149,7 +170,7 @@ LocalCollection.Cursor.prototype.forEach = function (callback, thisArg) {
var self = this;
if (self.db_objects === null)
self.db_objects = self._getRawObjects(true);
self.db_objects = self._getRawObjects({ordered: true});
if (self.reactive)
self._depend({
@@ -160,8 +181,8 @@ LocalCollection.Cursor.prototype.forEach = function (callback, thisArg) {
while (self.cursor_pos < self.db_objects.length) {
var elt = EJSON.clone(self.db_objects[self.cursor_pos]);
if (self.projection_f)
elt = self.projection_f(elt);
if (self.projectionFn)
elt = self.projectionFn(elt);
if (self._transform)
elt = self._transform(elt);
callback.call(thisArg, elt, self.cursor_pos, self);
@@ -170,8 +191,7 @@ LocalCollection.Cursor.prototype.forEach = function (callback, thisArg) {
};
LocalCollection.Cursor.prototype.getTransform = function () {
var self = this;
return self._transform;
return this._transform;
};
LocalCollection.Cursor.prototype.map = function (callback, thisArg) {
@@ -200,7 +220,7 @@ LocalCollection.Cursor.prototype.count = function () {
true /* allow the observe to be unordered */);
if (self.db_objects === null)
self.db_objects = self._getRawObjects(true);
self.db_objects = self._getRawObjects({ordered: true});
return self.db_objects.length;
};
@@ -267,6 +287,10 @@ _.extend(LocalCollection.Cursor.prototype, {
var ordered = LocalCollection._observeChangesCallbacksAreOrdered(options);
// there are several places that assume you aren't combining skip/limit with
// unordered observe. eg, update's EJSON.clone, and the "there are several"
// comment in _modifyAndNotify
// XXX allow skip/limit with unordered observe
if (!options._allow_unordered && !ordered && (self.skip || self.limit))
throw new Error("must use ordered observe with skip or limit");
@@ -278,12 +302,10 @@ _.extend(LocalCollection.Cursor.prototype, {
sorter: ordered && self.sorter,
distances: (
self.matcher.hasGeoQuery() && ordered && new LocalCollection._IdMap),
results_snapshot: null,
resultsSnapshot: null,
ordered: ordered,
cursor: self,
observeChanges: options.observeChanges,
fields: self.fields,
projection_f: self.projection_f
projectionFn: self.projectionFn
};
var qid;
@@ -293,9 +315,10 @@ _.extend(LocalCollection.Cursor.prototype, {
qid = self.collection.next_qid++;
self.collection.queries[qid] = query;
}
query.results = self._getRawObjects(ordered, query.distances);
query.results = self._getRawObjects({
ordered: ordered, distances: query.distances});
if (self.collection.paused)
query.results_snapshot = (ordered ? [] : {});
query.resultsSnapshot = (ordered ? [] : new LocalCollection._IdMap);
// wrap callbacks we were passed. callbacks only fire when not paused and
// are never undefined
@@ -311,17 +334,18 @@ _.extend(LocalCollection.Cursor.prototype, {
var context = this;
var args = arguments;
if (fieldsIndex !== undefined && self.projection_f) {
args[fieldsIndex] = self.projection_f(args[fieldsIndex]);
if (self.collection.paused)
return;
if (fieldsIndex !== undefined && self.projectionFn) {
args[fieldsIndex] = self.projectionFn(args[fieldsIndex]);
if (ignoreEmptyFields && _.isEmpty(args[fieldsIndex]))
return;
}
if (!self.collection.paused) {
self.collection._observeQueue.queueTask(function () {
f.apply(context, args);
});
}
self.collection._observeQueue.queueTask(function () {
f.apply(context, args);
});
};
};
query.added = wrapCallback(options.added, 1);
@@ -333,7 +357,11 @@ _.extend(LocalCollection.Cursor.prototype, {
}
if (!options._suppress_initial && !self.collection.paused) {
_.each(query.results, function (doc, i) {
// XXX unify ordered and unordered interface
var each = ordered
? _.bind(_.each, null, query.results)
: _.bind(query.results.forEach, query.results);
each(function (doc) {
var fields = EJSON.clone(doc);
delete fields._id;
@@ -385,26 +413,28 @@ _.extend(LocalCollection.Cursor.prototype, {
// argument, this function will clear it and use it for this purpose (otherwise
// it will just create its own _IdMap). The observeChanges implementation uses
// this to remember the distances after this function returns.
LocalCollection.Cursor.prototype._getRawObjects = function (ordered,
distances) {
LocalCollection.Cursor.prototype._getRawObjects = function (options) {
var self = this;
options = options || {};
var results = ordered ? [] : {};
// XXX use OrderedDict instead of array, and make IdMap and OrderedDict
// compatible
var results = options.ordered ? [] : new LocalCollection._IdMap;
// fast path for single ID value
if (self.selector_id) {
if (self._selectorId !== undefined) {
// If you have non-zero skip and ask for a single id, you get
// nothing. This is so it matches the behavior of the '{_id: foo}'
// path.
if (self.skip)
return results;
if (_.has(self.collection.docs, self.selector_id)) {
var selectedDoc = self.collection.docs[self.selector_id];
if (ordered)
var selectedDoc = self.collection._docs.get(self._selectorId);
if (selectedDoc) {
if (options.ordered)
results.push(selectedDoc);
else
results[self.selector_id] = selectedDoc;
results.set(self._selectorId, selectedDoc);
}
return results;
}
@@ -414,33 +444,36 @@ LocalCollection.Cursor.prototype._getRawObjects = function (ordered,
// in the observeChanges case, distances is actually part of the "query" (ie,
// live results set) object. in other cases, distances is only used inside
// this function.
if (self.matcher.hasGeoQuery() && ordered) {
if (distances)
var distances;
if (self.matcher.hasGeoQuery() && options.ordered) {
if (options.distances) {
distances = options.distances;
distances.clear();
else
} else {
distances = new LocalCollection._IdMap();
}
}
for (var idStringified in self.collection.docs) {
var doc = self.collection.docs[idStringified];
var id = LocalCollection._idParse(idStringified); // XXX use more idmaps
self.collection._docs.forEach(function (doc, id) {
var matchResult = self.matcher.documentMatches(doc);
if (matchResult.result) {
if (ordered) {
if (options.ordered) {
results.push(doc);
if (distances && matchResult.distance !== undefined)
distances.set(id, matchResult.distance);
} else {
results[idStringified] = doc;
results.set(id, doc);
}
}
// Fast path for limited unsorted queries.
// XXX 'length' check here seems wrong for ordered
if (self.limit && !self.skip && !self.sorter &&
results.length === self.limit)
return results;
}
return false; // break
return true; // continue
});
if (!ordered)
if (!options.ordered)
return results;
if (self.sorter) {
@@ -492,13 +525,13 @@ LocalCollection.prototype.insert = function (doc, callback) {
doc._id = LocalCollection._useOID ? new LocalCollection._ObjectID()
: Random.id();
}
var id = LocalCollection._idStringify(doc._id);
var id = doc._id;
if (_.has(self.docs, id))
throw MinimongoError("Duplicate _id '" + doc._id + "'");
if (self._docs.has(id))
throw MinimongoError("Duplicate _id '" + id + "'");
self._saveOriginal(id, undefined);
self.docs[id] = doc;
self._docs.set(id, doc);
var queriesToRecompute = [];
// trigger live queries that match
@@ -507,7 +540,7 @@ LocalCollection.prototype.insert = function (doc, callback) {
var matchResult = query.matcher.documentMatches(doc);
if (matchResult.result) {
if (query.distances && matchResult.distance !== undefined)
query.distances.set(doc._id, matchResult.distance);
query.distances.set(id, matchResult.distance);
if (query.cursor.skip || query.cursor.limit)
queriesToRecompute.push(qid);
else
@@ -525,42 +558,69 @@ LocalCollection.prototype.insert = function (doc, callback) {
// immediately.
if (callback)
Meteor.defer(function () {
callback(null, doc._id);
callback(null, id);
});
return doc._id;
return id;
};
// Iterates over a subset of documents that could match selector; calls
// f(doc, id) on each of them. Specifically, if selector specifies
// specific _id's, it only looks at those. doc is *not* cloned: it is the
// same object that is in _docs.
LocalCollection.prototype._eachPossiblyMatchingDoc = function (selector, f) {
var self = this;
var specificIds = LocalCollection._idsMatchedBySelector(selector);
if (specificIds) {
for (var i = 0; i < specificIds.length; ++i) {
var id = specificIds[i];
var doc = self._docs.get(id);
if (doc) {
var breakIfFalse = f(doc, id);
if (breakIfFalse === false)
break;
}
}
} else {
self._docs.forEach(f);
}
};
LocalCollection.prototype.remove = function (selector, callback) {
var self = this;
var remove = [];
var queriesToRecompute = [];
var matcher = new Minimongo.Matcher(selector, self);
// Avoid O(n) for "remove a single doc by ID".
var specificIds = LocalCollection._idsMatchedBySelector(selector);
if (specificIds) {
_.each(specificIds, function (id) {
var strId = LocalCollection._idStringify(id);
// We still have to run matcher, in case it's something like
// {_id: "X", a: 42}
if (_.has(self.docs, strId)
&& matcher.documentMatches(self.docs[strId]).result)
remove.push(strId);
});
} else {
for (var strId in self.docs) {
var doc = self.docs[strId];
if (matcher.documentMatches(doc).result) {
remove.push(strId);
// Easy special case: if we're not calling observeChanges callbacks and we're
// not saving originals and we got asked to remove everything, then just empty
// everything directly.
if (self.paused && !self._savedOriginals && EJSON.equals(selector, {})) {
var result = self._docs.size();
self._docs.clear();
_.each(self.queries, function (query) {
if (query.ordered) {
query.results = [];
} else {
query.results.clear();
}
});
if (callback) {
Meteor.defer(function () {
callback(null, result);
});
}
return result;
}
var matcher = new Minimongo.Matcher(selector, self);
var remove = [];
self._eachPossiblyMatchingDoc(selector, function (doc, id) {
if (matcher.documentMatches(doc).result)
remove.push(id);
});
var queriesToRecompute = [];
var queryRemove = [];
for (var i = 0; i < remove.length; i++) {
var removeId = remove[i];
var removeDoc = self.docs[removeId];
var removeDoc = self._docs.get(removeId);
_.each(self.queries, function (query, qid) {
if (query.matcher.documentMatches(removeDoc).result) {
if (query.cursor.skip || query.cursor.limit)
@@ -570,7 +630,7 @@ LocalCollection.prototype.remove = function (selector, callback) {
}
});
self._saveOriginal(removeId, removeDoc);
delete self.docs[removeId];
self._docs.remove(removeId);
}
// run live query callbacks _after_ we've removed the documents.
@@ -587,7 +647,7 @@ LocalCollection.prototype.remove = function (selector, callback) {
LocalCollection._recomputeResults(query);
});
self._observeQueue.drain();
var result = remove.length;
result = remove.length;
if (callback)
Meteor.defer(function () {
callback(null, result);
@@ -610,10 +670,12 @@ LocalCollection.prototype.update = function (selector, mod, options, callback) {
// Save the original results of any query that we might need to
// _recomputeResults on, because _modifyAndNotify will mutate the objects in
// it. (We don't need to save the original results of paused queries because
// they already have a results_snapshot and we won't be diffing in
// they already have a resultsSnapshot and we won't be diffing in
// _recomputeResults.)
var qidToOriginalResults = {};
_.each(self.queries, function (query, qid) {
// XXX for now, skip/limit implies ordered observe, so query.results is
// always an array
if ((query.cursor.skip || query.cursor.limit) && !query.paused)
qidToOriginalResults[qid] = EJSON.clone(query.results);
});
@@ -621,8 +683,7 @@ LocalCollection.prototype.update = function (selector, mod, options, callback) {
var updateCount = 0;
for (var id in self.docs) {
var doc = self.docs[id];
self._eachPossiblyMatchingDoc(selector, function (doc, id) {
var queryResult = matcher.documentMatches(doc);
if (queryResult.result) {
// XXX Should we save the original even if mod ends up being a no-op?
@@ -630,9 +691,10 @@ LocalCollection.prototype.update = function (selector, mod, options, callback) {
self._modifyAndNotify(doc, mod, recomputeQids, queryResult.arrayIndex);
++updateCount;
if (!options.multi)
break;
return false; // break
}
}
return true;
});
_.each(recomputeQids, function (dummy, qid) {
var query = self.queries[qid];
@@ -703,8 +765,7 @@ LocalCollection.prototype._modifyAndNotify = function (
} else {
// Because we don't support skip or limit (yet) in unordered queries, we
// can just do a direct lookup.
matched_before[qid] = _.has(query.results,
LocalCollection._idStringify(doc._id));
matched_before[qid] = query.results.has(doc._id);
}
}
@@ -767,7 +828,7 @@ LocalCollection._insertInResults = function (query, doc) {
query.added(doc._id, fields);
} else {
query.added(doc._id, fields);
query.results[LocalCollection._idStringify(doc._id)] = doc;
query.results.set(doc._id, doc);
}
};
@@ -777,9 +838,9 @@ LocalCollection._removeFromResults = function (query, doc) {
query.removed(doc._id);
query.results.splice(i, 1);
} else {
var id = LocalCollection._idStringify(doc._id); // in case callback mutates doc
var id = doc._id; // in case callback mutates doc
query.removed(doc._id);
delete query.results[id];
query.results.remove(id);
}
};
@@ -790,7 +851,7 @@ LocalCollection._updateInResults = function (query, doc, old_doc) {
if (!query.ordered) {
if (!_.isEmpty(changedFields)) {
query.changed(doc._id, changedFields);
query.results[LocalCollection._idStringify(doc._id)] = doc;
query.results.set(doc._id, doc);
}
return;
}
@@ -831,7 +892,8 @@ LocalCollection._recomputeResults = function (query, oldResults) {
oldResults = query.results;
if (query.distances)
query.distances.clear();
query.results = query.cursor._getRawObjects(query.ordered, query.distances);
query.results = query.cursor._getRawObjects({
ordered: query.ordered, distances: query.distances});
if (!query.paused) {
LocalCollection._diffQueryChanges(
@@ -888,7 +950,7 @@ LocalCollection.prototype.saveOriginals = function () {
var self = this;
if (self._savedOriginals)
throw new Error("Called saveOriginals twice without retrieveOriginals");
self._savedOriginals = {};
self._savedOriginals = new LocalCollection._IdMap;
};
LocalCollection.prototype.retrieveOriginals = function () {
var self = this;
@@ -908,9 +970,9 @@ LocalCollection.prototype._saveOriginal = function (id, doc) {
// Have we previously mutated the original (and so 'doc' is not actually
// original)? (Note the 'has' check rather than truth: we store undefined
// here for inserted docs!)
if (_.has(self._savedOriginals, id))
if (self._savedOriginals.has(id))
return;
self._savedOriginals[id] = EJSON.clone(doc);
self._savedOriginals.set(id, EJSON.clone(doc));
};
// Pause the observers. No callbacks from observers will fire until
@@ -927,7 +989,7 @@ LocalCollection.prototype.pauseObservers = function () {
for (var qid in this.queries) {
var query = this.queries[qid];
query.results_snapshot = EJSON.clone(query.results);
query.resultsSnapshot = EJSON.clone(query.results);
}
};
@@ -950,8 +1012,8 @@ LocalCollection.prototype.resumeObservers = function () {
// Diff the current results against the snapshot and send to observers.
// pass the query object for its observer callbacks.
LocalCollection._diffQueryChanges(
query.ordered, query.results_snapshot, query.results, query);
query.results_snapshot = null;
query.ordered, query.resultsSnapshot, query.results, query);
query.resultsSnapshot = null;
}
self._observeQueue.drain();
};

View File

@@ -343,6 +343,12 @@ Tinytest.add("minimongo - selector and projection combination", function (test)
// (are absent)
// - tests with $-operators in the selector (are incomplete and test "not
// ideal" implementation)
// * gives up on $-operators with non-scalar values ({$ne: {x: 1}})
// * analyses $in
// * analyses $nin/$ne
// * analyses $gt, $gte, $lt, $lte
// * gives up on a combination of $gt/$gte/$lt/$lte and $ne/$nin
// * doesn't support $eq properly
var test = null; // set this global in the beginning of every test
// T - should return true
@@ -465,5 +471,66 @@ Tinytest.add("minimongo - selector and projection combination", function (test)
F({ 'a.b.c': 1 }, { $set: { 'a.b': 222 } }, "a simple scalar selector and simple set a wrong type");
});
Tinytest.add("minimongo - can selector become true by modifier - $-scalar selectors and simple tests", function (t) {
test = t;
T({ 'a.b.c': { $lt: 5 } }, { $set: { 'a.b': { c: 4 } } }, "nested $lt");
F({ 'a.b.c': { $lt: 5 } }, { $set: { 'a.b': { c: 5 } } }, "nested $lt");
F({ 'a.b.c': { $lt: 5 } }, { $set: { 'a.b': { c: 6 } } }, "nested $lt");
F({ 'a.b.c': { $lt: 5 } }, { $set: { 'a.b.d': 7 } }, "nested $lt, the change doesn't matter");
F({ 'a.b.c': { $lt: 5 } }, { $set: { 'a.b': { d: 7 } } }, "nested $lt, the key disappears");
T({ 'a.b.c': { $lt: 5 } }, { $set: { 'a.b': { d: 7, c: -1 } } }, "nested $lt");
F({ a: { $lt: 10, $gt: 3 } }, { $unset: { a: 1 } }, "unset $lt");
T({ a: { $lt: 10, $gt: 3 } }, { $set: { a: 4 } }, "set between x and y");
F({ a: { $lt: 10, $gt: 3 } }, { $set: { a: 3 } }, "set between x and y");
F({ a: { $lt: 10, $gt: 3 } }, { $set: { a: 10 } }, "set between x and y");
F({ a: { $gt: 10, $lt: 3 } }, { $set: { a: 9 } }, "impossible statement");
T({ a: { $lte: 10, $gte: 3 } }, { $set: { a: 3 } }, "set between x and y");
T({ a: { $lte: 10, $gte: 3 } }, { $set: { a: 10 } }, "set between x and y");
F({ a: { $lte: 10, $gte: 3 } }, { $set: { a: -10 } }, "set between x and y");
T({ a: { $lte: 10, $gte: 3, $gt: 3, $lt: 10 } }, { $set: { a: 4 } }, "set between x and y");
F({ a: { $lte: 10, $gte: 3, $gt: 3, $lt: 10 } }, { $set: { a: 3 } }, "set between x and y");
F({ a: { $lte: 10, $gte: 3, $gt: 3, $lt: 10 } }, { $set: { a: 10 } }, "set between x and y");
F({ a: { $lte: 10, $gte: 3, $gt: 3, $lt: 10 } }, { $set: { a: Infinity } }, "set between x and y");
T({ a: { $lte: 10, $gte: 3, $gt: 3, $lt: 10 }, x: 1 }, { $set: { x: 1 } }, "set between x and y - dummy");
F({ a: { $lte: 10, $gte: 13, $gt: 3, $lt: 9 }, x: 1 }, { $set: { x: 1 } }, "set between x and y - dummy - impossible");
F({ a: { $lte: 10 } }, { $set: { a: Infinity } }, "Infinity <= 10?");
T({ a: { $lte: 10 } }, { $set: { a: -Infinity } }, "-Infinity <= 10?");
// XXX is this sufficient?
T({ a: { $gt: 9.99999999999999, $lt: 10 }, x: 1 }, { $set: { x: 1 } }, "very close $gt and $lt");
// XXX this test should be F, but since it is so hard to be precise in
// floating point math, the current implementation falls back to T
T({ a: { $gt: 9.999999999999999, $lt: 10 }, x: 1 }, { $set: { x: 1 } }, "very close $gt and $lt");
T({ a: { $ne: 5 } }, { $unset: { a: 1 } }, "unset of $ne");
T({ a: { $ne: 5 } }, { $set: { a: 1 } }, "set of $ne");
T({ a: { $ne: "some string" }, x: 1 }, { $set: { x: 1 } }, "$ne dummy");
T({ a: { $ne: true }, x: 1 }, { $set: { x: 1 } }, "$ne dummy");
T({ a: { $ne: false }, x: 1 }, { $set: { x: 1 } }, "$ne dummy");
T({ a: { $ne: null }, x: 1 }, { $set: { x: 1 } }, "$ne dummy");
T({ a: { $ne: Infinity }, x: 1 }, { $set: { x: 1 } }, "$ne dummy");
T({ a: { $ne: 5 } }, { $set: { a: -10 } }, "set of $ne");
T({ a: { $in: [1, 3, 5, 7] } }, { $set: { a: 5 } }, "$in checks");
F({ a: { $in: [1, 3, 5, 7] } }, { $set: { a: -5 } }, "$in checks");
T({ a: { $in: [1, 3, 5, 7], $gt: 6 }, x: 1 }, { $set: { x: 1 } }, "$in combination with $gt");
F({ a: { $lte: 10, $gte: 3 } }, { $set: { 'a.b': -10 } }, "sel between x and y, set its subfield");
F({ b: { $in: [1, 3, 5, 7] } }, { $set: { 'b.c': 2 } }, "sel $in, set subfield");
T({ b: { $in: [1, 3, 5, 7] } }, { $set: { 'bd.c': 2, b: 3 } }, "sel $in, set similar subfield");
F({ 'b.c': { $in: [1, 3, 5, 7] } }, { $set: { b: 2 } }, "sel subfield of set scalar");
// If modifier tries to set a sub-field of a path expected to be a scalar.
F({ 'a.b': { $gt: 5, $lt: 7}, x: 1 }, { $set: { 'a.b.c': 3, x: 1 } }, "set sub-field of $gt,$lt operator (scalar expected)");
F({ 'a.b': { $gt: 5, $lt: 7}, x: 1 }, { $set: { x: 1 }, $unset: { 'a.b.c': 1 } }, "unset sub-field of $gt,$lt operator (scalar expected)");
});
Tinytest.add("minimongo - can selector become true by modifier - $-nonscalar selectors and simple tests", function (t) {
test = t;
T({ a: { $ne: { x: 5 } } }, { $set: { 'a.x': 3 } }, "set of $ne");
// XXX this test should be F, but it is not implemented yet
T({ a: { $ne: { x: 5 } } }, { $set: { 'a.x': 5 } }, "set of $ne");
T({ a: { $in: [{ b: 1 }, { b: 3 }] } }, { $set: { a: { b: 3 } } }, "$in checks");
// XXX this test should be F, but it is not implemented yet
T({ a: { $in: [{ b: 1 }, { b: 3 }] } }, { $set: { a: { v: 3 } } }, "$in checks");
T({ a: { $ne: { a: 2 } }, x: 1 }, { $set: { x: 1 } }, "$ne dummy");
// XXX this test should be F, but it is not implemented yet
T({ a: { $ne: { a: 2 } } }, { $set: { a: { a: 2 } } }, "$ne object");
});
})();

View File

@@ -216,6 +216,21 @@ Tinytest.add("minimongo - cursors", function (test) {
test.equal(c.findOne(id).i, 2);
});
Tinytest.add("minimongo - transform", function (test) {
var c = new LocalCollection;
c.insert({});
// transform functions must return objects
var invalidTransform = function (doc) { return doc._id; };
test.throws(function () {
c.findOne({}, {transform: invalidTransform});
});
// transformed documents get _id field transplanted if not present
var transformWithoutId = function (doc) { return _.omit(doc, '_id'); };
test.equal(c.findOne({}, {transform: transformWithoutId})._id,
c.findOne()._id);
});
Tinytest.add("minimongo - misc", function (test) {
// deepcopy
var a = {a: [1, 2, 3], b: "x", c: true, d: {x: 12, y: [12]},
@@ -2155,7 +2170,8 @@ Tinytest.add("minimongo - observe ordered", function (test) {
handle.stop();
// test _suppress_initial
handle = c.find({}, {sort: {a: -1}}).observe(_.extend(cbs, {_suppress_initial: true}));
handle = c.find({}, {sort: {a: -1}}).observe(_.extend({
_suppress_initial: true}, cbs));
test.equal(operations.shift(), undefined);
c.insert({a:100});
test.equal(operations.shift(), ['added', {a:100}, 0, idA2]);
@@ -2182,6 +2198,21 @@ Tinytest.add("minimongo - observe ordered", function (test) {
test.equal(operations.shift(), ['changed', {a:3.5}, 0, {a:3}]);
handle.stop();
// test observe limit with pre-existing docs
c.remove({});
c.insert({a: 1});
c.insert({_id: 'two', a: 2});
c.insert({a: 3});
handle = c.find({}, {sort: {a: 1}, limit: 2}).observe(cbs);
test.equal(operations.shift(), ['added', {a:1}, 0, null]);
test.equal(operations.shift(), ['added', {a:2}, 1, null]);
test.equal(operations.shift(), undefined);
c.remove({a: 2});
test.equal(operations.shift(), ['removed', 'two', 1, {a:2}]);
test.equal(operations.shift(), ['added', {a:3}, 1, null]);
test.equal(operations.shift(), undefined);
handle.stop();
// test _no_indices
c.remove({});
@@ -2451,15 +2482,15 @@ Tinytest.add("minimongo - saveOriginals", function (test) {
// Verify the originals.
var originals = c.retrieveOriginals();
var affected = ['bar', 'baz', 'quux', 'whoa', 'hooray'];
test.equal(_.size(originals), _.size(affected));
test.equal(originals.size(), _.size(affected));
_.each(affected, function (id) {
test.isTrue(_.has(originals, id));
test.isTrue(originals.has(id));
});
test.equal(originals.bar, {_id: 'bar', x: 'updateme'});
test.equal(originals.baz, {_id: 'baz', x: 'updateme'});
test.equal(originals.quux, {_id: 'quux', y: 'removeme'});
test.equal(originals.whoa, {_id: 'whoa', y: 'removeme'});
test.equal(originals.hooray, undefined);
test.equal(originals.get('bar'), {_id: 'bar', x: 'updateme'});
test.equal(originals.get('baz'), {_id: 'baz', x: 'updateme'});
test.equal(originals.get('quux'), {_id: 'quux', y: 'removeme'});
test.equal(originals.get('whoa'), {_id: 'whoa', y: 'removeme'});
test.equal(originals.get('hooray'), undefined);
// Verify that changes actually occured.
test.equal(c.find().count(), 4);
@@ -2472,16 +2503,16 @@ Tinytest.add("minimongo - saveOriginals", function (test) {
c.saveOriginals();
originals = c.retrieveOriginals();
test.isTrue(originals);
test.isTrue(_.isEmpty(originals));
test.isTrue(originals.empty());
// Insert and remove a document during the period.
c.saveOriginals();
c.insert({_id: 'temp', q: 8});
c.remove('temp');
originals = c.retrieveOriginals();
test.equal(_.size(originals), 1);
test.isTrue(_.has(originals, 'temp'));
test.equal(originals.temp, undefined);
test.equal(originals.size(), 1);
test.isTrue(originals.has('temp'));
test.equal(originals.get('temp'), undefined);
});
Tinytest.add("minimongo - saveOriginals errors", function (test) {
@@ -2550,6 +2581,14 @@ Tinytest.add("minimongo - pause", function (test) {
test.equal(operations.shift(), ['changed', {a:3}, 0, {a:1}]);
test.length(operations, 0);
// test special case for remove({})
c.pauseObservers();
test.equal(c.remove({}), 1);
test.length(operations, 0);
c.resumeObservers();
test.equal(operations.shift(), ['removed', 1, 0, {a:3}]);
test.length(operations, 0);
h.stop();
});

View File

@@ -175,5 +175,33 @@ LocalCollection._observeFromObserveChanges = function (cursor, observeCallbacks)
{callbacks: observeChangesCallbacks});
var handle = cursor.observeChanges(changeObserver.applyChange);
suppressed = false;
if (changeObserver.ordered) {
// Fetches the current list of documents, in order, as an array. Can be
// called at any time. Internal API assumed by the `observe-sequence`
// package (used by Meteor UI for `#each` blocks). Only defined on ordered
// observes (those that listen on `addedAt` or similar). Continues to work
// after `stop()` is called on the handle.
//
// Because we already materialize the full OrderedDict of all documents, it
// seems nice to provide access to the view rather than making the data
// consumer reconstitute it. This gives the consumer a shot at doing
// something smart with the feed like proxying it, since firing callbacks
// like `changed` and `movedTo` basically requires omniscience (knowing old
// and new documents, old and new indices, and the correct value for
// `before`).
//
// NOTE: If called from an observe callback for a certain change, the result
// is *not* guaranteed to be a snapshot of the cursor up to that
// change. This is because the callbacks are invoked before updating docs.
handle._fetch = function () {
var docsArray = [];
changeObserver.docs.forEach(function (doc) {
docsArray.push(transform(EJSON.clone(doc)));
});
return docsArray;
};
}
return handle;
};

View File

@@ -13,6 +13,7 @@ Package.on_use(function (api) {
api.use('geojson-utils');
api.add_files([
'minimongo.js',
'wrap_transform.js',
'helpers.js',
'selector.js',
'sort.js',
@@ -37,5 +38,6 @@ Package.on_test(function (api) {
api.use(['tinytest', 'underscore', 'ejson', 'ordered-dict',
'random', 'deps']);
api.add_files('minimongo_tests.js', 'client');
api.add_files('wrap_transform_tests.js');
api.add_files('minimongo_server_tests.js', 'server');
});

View File

@@ -28,9 +28,13 @@ Minimongo.Matcher = function (selector) {
self._hasGeoQuery = false;
// Set to true if compilation finds a $where.
self._hasWhere = false;
// Set to false if compilation finds anything other than a simple equality on
// some fields.
self._isEquality = true;
// Set to false if compilation finds anything other than a simple equality or
// one or more of '$gt', '$gte', '$lt', '$lte', '$ne', '$in', '$nin' used with
// scalars as operands.
self._isSimple = true;
// Set to a dummy document which always matches this Matcher. Or set to null
// if such document is too hard to find.
self._matchingDocument = undefined;
// A clone of the original selector. Used by canBecomeTrueByModifier.
self._selector = null;
self._docMatcher = self._compileSelector(selector);
@@ -46,8 +50,8 @@ _.extend(Minimongo.Matcher.prototype, {
hasWhere: function () {
return this._hasWhere;
},
isEquality: function () {
return this._isEquality;
isSimple: function () {
return this._isSimple;
},
// Given a selector, return a function that takes one argument, a
@@ -56,7 +60,7 @@ _.extend(Minimongo.Matcher.prototype, {
var self = this;
// you can pass a literal function instead of a selector
if (selector instanceof Function) {
self._isEquality = false;
self._isSimple = false;
self._selector = selector;
self._recordPathUsed('');
return function (doc) {
@@ -77,7 +81,7 @@ _.extend(Minimongo.Matcher.prototype, {
// likely programmer error, and not what you want, particularly for
// destructive operations.
if (!selector || (('_id' in selector) && !selector._id)) {
self._isEquality = null;
self._isSimple = false;
return nothingMatcher;
}
@@ -116,7 +120,7 @@ var compileDocumentSelector = function (docSelector, matcher, options) {
// this function), or $where.
if (!_.has(LOGICAL_OPERATORS, key))
throw new Error("Unrecognized logical operator: " + key);
matcher._isEquality = false;
matcher._isSimple = false;
docMatchers.push(LOGICAL_OPERATORS[key](subSelector, matcher,
options.inElemMatch));
} else {
@@ -144,7 +148,7 @@ var compileDocumentSelector = function (docSelector, matcher, options) {
// [branched value]->result object.
var compileValueSelector = function (valueSelector, matcher, isRoot) {
if (valueSelector instanceof RegExp) {
matcher._isEquality = false;
matcher._isSimple = false;
return convertElementMatcherToBranchedMatcher(
regexpElementMatcher(valueSelector));
} else if (isOperatorObject(valueSelector)) {
@@ -235,8 +239,16 @@ var operatorBranchedMatcher = function (valueSelector, matcher, isRoot) {
var operatorMatchers = [];
_.each(valueSelector, function (operand, operator) {
// XXX we should actually implement $eq, which is new in 2.6
if (operator !== '$eq')
matcher._isEquality = false;
var simpleRange = _.contains(['$lt', '$lte', '$gt', '$gte'], operator) &&
_.isNumber(operand);
var simpleInequality = operator === '$ne' && !_.isObject(operand);
var simpleInclusion = _.contains(['$in', '$nin'], operator) &&
_.isArray(operand) && !_.any(operand, _.isObject);
if (! (operator === '$eq' || simpleRange ||
simpleInclusion || simpleInequality)) {
matcher._isSimple = false;
}
if (_.has(VALUE_OPERATORS, operator)) {
operatorMatchers.push(

View File

@@ -48,6 +48,9 @@ Minimongo.Matcher.prototype.affectedByModifier = function (modifier) {
// only. (assumed to come from oplog)
// @returns - Boolean: if after applying the modifier, selector can start
// accepting the modified value.
// NOTE: assumes that document affected by modifier didn't match this Matcher
// before, so if modifier can't convince selector in a positive change it would
// stay 'false'.
// Currently doesn't support $-operators and numeric indices precisely.
Minimongo.Matcher.prototype.canBecomeTrueByModifier = function (modifier) {
var self = this;
@@ -55,24 +58,42 @@ Minimongo.Matcher.prototype.canBecomeTrueByModifier = function (modifier) {
return false;
modifier = _.extend({$set:{}, $unset:{}}, modifier);
var modifierPaths = _.keys(modifier.$set).concat(_.keys(modifier.$unset));
if (!self.isEquality())
if (!self.isSimple())
return true;
if (_.any(self._getPaths(), pathHasNumericKeys) ||
_.any(_.keys(modifier.$unset), pathHasNumericKeys) ||
_.any(_.keys(modifier.$set), pathHasNumericKeys))
_.any(modifierPaths, pathHasNumericKeys))
return true;
// convert a selector into an object matching the selector
// { 'a.b': { ans: 42 }, 'foo.bar': null, 'foo.baz': "something" }
// => { a: { b: { ans: 42 } }, foo: { bar: null, baz: "something" } }
var doc = pathsToTree(self._getPaths(),
function (path) { return self._selector[path]; },
_.identity /*conflict resolution is no resolution*/);
// check if there is a $set or $unset that indicates something is an
// object rather than a scalar in the actual object where we saw $-operator
// NOTE: it is correct since we allow only scalars in $-operators
// Example: for selector {'a.b': {$gt: 5}} the modifier {'a.b.c':7} would
// definitely set the result to false as 'a.b' appears to be an object.
var expectedScalarIsObject = _.any(self._selector, function (sel, path) {
if (! isOperatorObject(sel))
return false;
return _.any(modifierPaths, function (modifierPath) {
return startsWith(modifierPath, path + '.');
});
});
if (expectedScalarIsObject)
return false;
// See if we can apply the modifier on the ideally matching object. If it
// still matches the selector, then the modifier could have turned the real
// object in the database into something matching.
var matchingDocument = EJSON.clone(self.matchingDocument());
// The selector is too complex, anything can happen.
if (matchingDocument === null)
return true;
try {
LocalCollection._modify(doc, modifier);
LocalCollection._modify(matchingDocument, modifier);
} catch (e) {
// Couldn't set a property on a field which is a scalar or null in the
// selector.
@@ -89,7 +110,74 @@ Minimongo.Matcher.prototype.canBecomeTrueByModifier = function (modifier) {
throw e;
}
return self.documentMatches(doc).result;
return self.documentMatches(matchingDocument).result;
};
// Returns an object that would match the selector if possible or null if the
// selector is too complex for us to analyze
// { 'a.b': { ans: 42 }, 'foo.bar': null, 'foo.baz': "something" }
// => { a: { b: { ans: 42 } }, foo: { bar: null, baz: "something" } }
Minimongo.Matcher.prototype.matchingDocument = function () {
var self = this;
// check if it was computed before
if (self._matchingDocument !== undefined)
return self._matchingDocument;
// If the analysis of this selector is too hard for our implementation
// fallback to "YES"
var fallback = false;
self._matchingDocument = pathsToTree(self._getPaths(),
function (path) {
var valueSelector = self._selector[path];
if (isOperatorObject(valueSelector)) {
// if there is a strict equality, there is a good
// chance we can use one of those as "matching"
// dummy value
if (valueSelector.$in) {
var matcher = new Minimongo.Matcher({ placeholder: valueSelector });
// Return anything from $in that matches the whole selector for this
// path. If nothing matches, returns `undefined` as nothing can make
// this selector into `true`.
return _.find(valueSelector.$in, function (x) {
return matcher.documentMatches({ placeholder: x }).result;
});
} else if (onlyContainsKeys(valueSelector, ['$gt', '$gte', '$lt', '$lte'])) {
var lowerBound = -Infinity, upperBound = Infinity;
_.each(['$lte', '$lt'], function (op) {
if (_.has(valueSelector, op) && valueSelector[op] < upperBound)
upperBound = valueSelector[op];
});
_.each(['$gte', '$gt'], function (op) {
if (_.has(valueSelector, op) && valueSelector[op] > lowerBound)
lowerBound = valueSelector[op];
});
var middle = (lowerBound + upperBound) / 2;
var matcher = new Minimongo.Matcher({ placeholder: valueSelector });
if (!matcher.documentMatches({ placeholder: middle }).result &&
(middle === lowerBound || middle === upperBound))
fallback = true;
return middle;
} else if (onlyContainsKeys(valueSelector, ['$nin',' $ne'])) {
// Since self._isSimple makes sure $nin and $ne are not combined with
// objects or arrays, we can confidently return an empty object as it
// never matches any scalar.
return {};
} else {
fallback = true;
}
}
return self._selector[path];
},
_.identity /*conflict resolution is no resolution*/);
if (fallback)
self._matchingDocument = null;
return self._matchingDocument;
};
var getPaths = function (sel) {
@@ -106,7 +194,20 @@ var getPaths = function (sel) {
}).flatten().uniq().value();
};
function pathHasNumericKeys (path) {
// A helper to ensure object has only certain keys
var onlyContainsKeys = function (obj, keys) {
return _.all(obj, function (v, k) {
return _.contains(keys, k);
});
};
var pathHasNumericKeys = function (path) {
return _.any(path.split('.'), isNumericKey);
}
// XXX from Underscore.String (http://epeli.github.com/underscore.string/)
var startsWith = function(str, starts) {
return str.length >= starts.length &&
str.substring(0, starts.length) === starts;
};

View File

@@ -48,11 +48,19 @@ Sorter = function (spec) {
// min/max.)
//
// XXX This is actually wrong! In fact, the whole attempt to compile sort
// functions independently of selectors is wrong. In MongoDB, if you have
// documents {_id: 'x', a: [1, 10]} and {_id: 'y', a: [5, 15]},
// then C.find({}, {sort: {a: 1}}) puts x before y (1 comes before 5).
// But C.find({a: {$gt: 3}}, {sort: {a: 1}}) puts y before x (1 does not match
// the selector, and 5 comes before 10).
// functions independently of selectors is wrong. In MongoDB, if you have
// documents {_id: 'x', a: [1, 10]} and {_id: 'y', a: [5, 15]}, then
// C.find({}, {sort: {a: 1}}) puts x before y (1 comes before 5). But
// C.find({a: {$gt: 3}}, {sort: {a: 1}}) puts y before x (1 does not match
// the selector, and 5 comes before 10).
//
// The way this works is pretty subtle! For example, if the documents are
// instead {_id: 'x', a: [{x: 1}, {x: 10}]}) and
// {_id: 'y', a: [{x: 5}, {x: 15}]}),
// then C.find({'a.x': {$gt: 3}}, {sort: {'a.x': 1}}) and
// C.find({a: {$elemMatch: {x: {$gt: 3}}}}, {sort: {'a.x': 1}})
// both follow this rule (y before x). ie, you do have to apply this
// through $elemMatch.
var reduceValue = function (branchValues, findMin) {
// Expand any leaf arrays that we find, and ignore those arrays themselves.
branchValues = expandArraysInBranches(branchValues, true);

View File

@@ -0,0 +1,41 @@
// Wrap a transform function to return objects that have the _id field
// of the untransformed document. This ensures that subsystems such as
// the observe-sequence package that call `observe` can keep track of
// the documents identities.
//
// - Require that it returns objects
// - If the return value has an _id field, verify that it matches the
// original _id field
// - If the return value doesn't have an _id field, add it back.
LocalCollection.wrapTransform = function (transform) {
if (!transform)
return null;
return function (doc) {
if (!_.has(doc, '_id')) {
// XXX do we ever have a transform on the oplog's collection? because that
// collection has no _id.
throw new Error("can only transform documents with _id");
}
var id = doc._id;
// XXX consider making deps a weak dependency and checking Package.deps here
var transformed = Deps.nonreactive(function () {
return transform(doc);
});
if (!isPlainObject(transformed)) {
throw new Error("transform must return object");
}
if (_.has(transformed, '_id')) {
if (!EJSON.equals(transformed._id, id)) {
throw new Error("transformed document can't have different _id");
}
} else {
transformed._id = id;
}
return transformed;
};
};

View File

@@ -0,0 +1,58 @@
Tinytest.add("minimongo - wrapTransform", function (test) {
var wrap = LocalCollection.wrapTransform;
// Transforming no function gives falsey.
test.isFalse(wrap(undefined));
test.isFalse(wrap(null));
// It's OK if you don't change the ID.
var validTransform = function (doc) {
delete doc.x;
doc.y = 42;
doc.z = function () { return 43; };
return doc;
};
var transformed = wrap(validTransform)({_id: "asdf", x: 54});
test.equal(_.keys(transformed), ['_id', 'y', 'z']);
test.equal(transformed.y, 42);
test.equal(transformed.z(), 43);
// Ensure that ObjectIDs work (even if the _ids in question are not ===-equal)
var oid1 = new LocalCollection._ObjectID();
var oid2 = new LocalCollection._ObjectID(oid1.toHexString());
test.equal(wrap(function () {return {_id: oid2};})({_id: oid1}),
{_id: oid2});
// transform functions must return objects
var invalidObjects = [
"asdf", new LocalCollection._ObjectID(), false, null, true,
27, [123], /adsf/, new Date, function () {}, undefined
];
_.each(invalidObjects, function (invalidObject) {
var wrapped = wrap(function () { return invalidObject; });
test.throws(function () {
wrapped({_id: "asdf"});
});
}, /transform must return object/);
// transform functions may not change _ids
var wrapped = wrap(function (doc) { doc._id = 'x'; return doc; });
test.throws(function () {
wrapped({_id: 'y'});
}, /can't have different _id/);
// transform functions may remove _ids
test.equal({_id: 'a', x: 2},
wrap(function (d) {delete d._id; return d;})({_id: 'a', x: 2}));
// test that wrapped transform functions are nonreactive
var unwrapped = function (doc) {
test.isFalse(Deps.active);
return doc;
};
var handle = Deps.autorun(function () {
test.isTrue(Deps.active);
wrap(unwrapped)({_id: "xxx"});
});
handle.stop();
});

View File

@@ -70,6 +70,8 @@ if (Meteor.isServer) {
"withTransform", false, function (doc) {
return doc.a;
});
var restrictedCollectionForInvalidTransformTest = defineCollection(
"collection-restictedForInvalidTransform", false /*insecure*/);
if (needToConfigure) {
restrictedCollectionWithTransform.allow({
@@ -91,6 +93,11 @@ if (Meteor.isServer) {
return !!doc.topLevelField;
}
});
restrictedCollectionForInvalidTransformTest.allow({
// transform must return an object which is not a mongo id
transform: function (doc) { return doc._id; },
insert: function () { return true; }
});
// two calls to allow to verify that either validator is sufficient.
var allows = [{
@@ -253,7 +260,8 @@ if (Meteor.isClient) {
"withTransform", function (doc) {
return doc.a;
});
var restrictedCollectionForInvalidTransformTest = defineCollection(
"collection-restictedForInvalidTransform");
// test that if allow is called once then the collection is
// restricted, and that other mutations aren't allowed
@@ -350,7 +358,7 @@ if (Meteor.isClient) {
},
function (test, expect) {
test.equal(
restrictedCollectionWithTransform.findOne({"a.bar": "bar"}),
_.omit(restrictedCollectionWithTransform.findOne({"a.bar": "bar"}), '_id'),
{foo: "foo", bar: "bar", baz: "baz"});
restrictedCollectionWithTransform.remove(item1, expect(function (e, res) {
test.isFalse(e);
@@ -421,6 +429,7 @@ if (Meteor.isClient) {
{$set: {updated: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.find({updated: true}).count(), 1);
}));
},
@@ -431,6 +440,7 @@ if (Meteor.isClient) {
{$set: {updated: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.find({updated: true}).count(), 2);
}));
},
@@ -603,6 +613,7 @@ if (Meteor.isClient) {
canUpdateId, {$set: {"dotted.field": 1}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.findOne(canUpdateId).dotted.field, 1);
}));
},
@@ -622,6 +633,7 @@ if (Meteor.isClient) {
{$set: {updated: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 0);
// nothing has changed
test.equal(collection.find().count(), 3);
test.equal(collection.find({updated: true}).count(), 0);
@@ -670,6 +682,7 @@ if (Meteor.isClient) {
{$set: {updated: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.find({updated: true}).count(), 1);
}));
},
@@ -701,6 +714,7 @@ if (Meteor.isClient) {
{$set: {cantRemove: false, canUpdate2: true}},
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
test.equal(collection.find({cantRemove: true}).count(), 0);
}));
},
@@ -710,11 +724,23 @@ if (Meteor.isClient) {
collection.remove(canRemoveId,
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 1);
// successfully removed
test.equal(collection.find().count(), 2);
}));
},
// try to remove a doc that doesn't exist. see we remove no docs.
function (test, expect) {
collection.remove('some-random-id-that-never-matches',
expect(function (err, res) {
test.isFalse(err);
test.equal(res, 0);
// nothing removed
test.equal(collection.find().count(), 2);
}));
},
// methods can still bypass restrictions
function (test, expect) {
collection.callClearMethod(
@@ -726,6 +752,13 @@ if (Meteor.isClient) {
}
]);
});
testAsyncMulti(
"collection - allow/deny transform must return object, " + idGeneration,
[function (test, expect) {
restrictedCollectionForInvalidTransformTest.insert({}, expect(function (err, res) {
test.isTrue(err);
}));
}]);
}); // end idGeneration loop
} // end if isClient

View File

@@ -38,10 +38,7 @@ Meteor.Collection = function (name, options) {
break;
}
if (options.transform)
self._transform = Deps._makeNonreactive(options.transform);
else
self._transform = null;
self._transform = LocalCollection.wrapTransform(options.transform);
if (!name && (name !== null)) {
Meteor._debug("Warning: creating anonymous collection. It will not be " +
@@ -554,10 +551,17 @@ Meteor.Collection.ObjectID = LocalCollection._ObjectID;
if (!(options[name] instanceof Function)) {
throw new Error(allowOrDeny + ": Value for `" + name + "` must be a function");
}
if (self._transform && options.transform !== null)
options[name].transform = self._transform;
if (options.transform)
options[name].transform = Deps._makeNonreactive(options.transform);
// If the transform is specified at all (including as 'null') in this
// call, then take that; otherwise, take the transform from the
// collection.
if (options.transform === undefined) {
options[name].transform = self._transform; // already wrapped
} else {
options[name].transform = LocalCollection.wrapTransform(
options.transform);
}
self._validators[name][allowOrDeny].push(options[name]);
}
});
@@ -780,7 +784,7 @@ Meteor.Collection.prototype._validatedUpdate = function(
var doc = self._collection.findOne(selector, findOptions);
if (!doc) // none satisfied!
return;
return 0;
var factoriedDoc;
@@ -813,7 +817,7 @@ Meteor.Collection.prototype._validatedUpdate = function(
// avoid races, but since selector is guaranteed to already just be an ID, we
// don't have to any more.
self._collection.update.call(
return self._collection.update.call(
self._collection, selector, mutator, options);
};
@@ -843,7 +847,7 @@ Meteor.Collection.prototype._validatedRemove = function(userId, selector) {
var doc = self._collection.findOne(selector, findOptions);
if (!doc)
return;
return 0;
// call user validators.
// Any deny returns true means denied.
@@ -864,5 +868,5 @@ Meteor.Collection.prototype._validatedRemove = function(userId, selector) {
// Mongo to avoid races, but since selector is guaranteed to already just be
// an ID, we don't have to any more.
self._collection.remove.call(self._collection, selector);
return self._collection.remove.call(self._collection, selector);
};

View File

@@ -5,7 +5,7 @@ LocalCollectionDriver = function () {
var ensureCollection = function (name, collections) {
if (!(name in collections))
collections[name] = new LocalCollection(name);
collections[name] = new LocalCollection({name: name});
return collections[name];
};

View File

@@ -698,8 +698,7 @@ _.each(['forEach', 'map', 'rewind', 'fetch', 'count'], function (method) {
});
Cursor.prototype.getTransform = function () {
var self = this;
return self._cursorDescription.options.transform;
return this._cursorDescription.options.transform;
};
// When you call Meteor.publish() with a function that returns a Cursor, we need
@@ -779,9 +778,8 @@ var SynchronousCursor = function (dbCursor, cursorDescription, options) {
// inside a user-visible Cursor, we want to provide the outer cursor!
self._selfForIteration = options.selfForIteration || self;
if (options.useTransform && cursorDescription.options.transform) {
self._transform = Deps._makeNonreactive(
cursorDescription.options.transform
);
self._transform = LocalCollection.wrapTransform(
cursorDescription.options.transform);
} else {
self._transform = null;
}
@@ -792,7 +790,7 @@ var SynchronousCursor = function (dbCursor, cursorDescription, options) {
self._synchronousNextObject = Future.wrap(
dbCursor.nextObject.bind(dbCursor), 0);
self._synchronousCount = Future.wrap(dbCursor.count.bind(dbCursor));
self._visitedIds = {};
self._visitedIds = new LocalCollection._IdMap;
};
_.extend(SynchronousCursor.prototype, {
@@ -812,9 +810,8 @@ _.extend(SynchronousCursor.prototype, {
// because we want to maintain O(1) memory usage. And if there isn't _id
// for some reason (maybe it's the oplog), then we don't do this either.
// (Be careful to do this for falsey but existing _id, though.)
var strId = LocalCollection._idStringify(doc._id);
if (self._visitedIds[strId]) continue;
self._visitedIds[strId] = true;
if (self._visitedIds.has(doc._id)) continue;
self._visitedIds.set(doc._id, true);
}
if (self._transform)
@@ -854,7 +851,7 @@ _.extend(SynchronousCursor.prototype, {
// known to be synchronous
self._dbCursor.rewind();
self._visitedIds = {};
self._visitedIds = new LocalCollection._IdMap;
},
// Mostly usable for tailable cursors.
@@ -880,9 +877,9 @@ _.extend(SynchronousCursor.prototype, {
if (ordered) {
return self.fetch();
} else {
var results = {};
var results = new LocalCollection._IdMap;
self.forEach(function (doc) {
results[doc._id] = doc;
results.set(doc._id, doc);
});
return results;
}

View File

@@ -876,15 +876,32 @@ testAsyncMulti('mongo-livedata - document goes through a transform, ' + idGenera
test.isTrue(id);
self.id2 = id;
}));
},
}
]);
testAsyncMulti('mongo-livedata - transform sets _id if not present, ' + idGeneration, [
function (test, expect) {
var self = this;
// Test that a transform that returns something other than a document with
// an _id (eg, a number) works. Regression test for #974.
test.equal(self.coll.find({}, {
transform: function (doc) { return doc.d.getSeconds(); },
sort: {d: 1}
}).fetch(), [50, 51]);
var justId = function (doc) {
return _.omit(doc, '_id');
};
TRANSFORMS["justId"] = justId;
var collectionOptions = {
idGeneration: idGeneration,
transform: justId,
transformName: "justId"
};
var collectionName = Random.id();
if (Meteor.isClient) {
Meteor.call('createInsecureCollection', collectionName, collectionOptions);
Meteor.subscribe('c-' + collectionName);
}
self.coll = new Meteor.Collection(collectionName, collectionOptions);
self.coll.insert({}, expect(function (err, id) {
test.isFalse(err);
test.isTrue(id);
test.equal(self.coll.findOne()._id, id);
}));
}
]);

View File

@@ -2,9 +2,9 @@ var Fiber = Npm.require('fibers');
var Future = Npm.require('fibers/future');
var PHASE = {
QUERYING: 1,
FETCHING: 2,
STEADY: 3
QUERYING: "QUERYING",
FETCHING: "FETCHING",
STEADY: "STEADY"
};
// OplogObserveDriver is an alternative to PollingObserveDriver which follows
@@ -28,15 +28,26 @@ OplogObserveDriver = function (options) {
Package.facts && Package.facts.Facts.incrementServerFact(
"mongo-livedata", "observe-drivers-oplog", 1);
self._phase = PHASE.QUERYING;
self._registerPhaseChange(PHASE.QUERYING);
// A minimongo LocalCollection containing the docs that match the selector,
// and maybe more. It is guaranteed to contain all the fields needed for the
// selector and the projection, and may have other fields too. (In the future
// we may try to make this collection be shared between multiple
// OplogObserveDrivers, but not currently.)
self._collection =
new LocalCollection({_observeCallbacksWillNeverYield: true});
// XXX think about what all the options are
var minimongoCursor = self._collection.find(
self._cursorDescription.selector, self._cursorDescription.options);
self._stopHandles.push(minimongoCursor.observeChanges(self._multiplexer));
self._published = new LocalCollection._IdMap;
var selector = self._cursorDescription.selector;
self._matcher = options.matcher;
var projection = self._cursorDescription.options.fields || {};
self._projectionFn = LocalCollection._compileProjection(projection);
// Projection function, result of combining important fields for selector and
// existing fields projection
var projection = self._cursorDescription.options.fields || {};
self._sharedProjection = self._matcher.combineIntoProjection(projection);
self._sharedProjectionFn = LocalCollection._compileProjection(
self._sharedProjection);
@@ -51,32 +62,32 @@ OplogObserveDriver = function (options) {
forEachTrigger(self._cursorDescription, function (trigger) {
self._stopHandles.push(self._mongoHandle._oplogHandle.onOplogEntry(
trigger, function (notification) {
var op = notification.op;
if (notification.dropCollection) {
// Note: this call is not allowed to block on anything (especially on
// waiting for oplog entries to catch up) because that will block
// onOplogEntry!
self._needToPollQuery();
} else {
// All other operators should be handled depending on phase
if (self._phase === PHASE.QUERYING)
self._handleOplogEntryQuerying(op);
else
self._handleOplogEntrySteadyOrFetching(op);
}
Meteor._noYieldsAllowed(function () {
var op = notification.op;
if (notification.dropCollection) {
// Note: this call is not allowed to block on anything (especially
// on waiting for oplog entries to catch up) because that will block
// onOplogEntry!
self._needToPollQuery();
} else {
// All other operators should be handled depending on phase
if (self._phase === PHASE.QUERYING)
self._handleOplogEntryQuerying(op);
else
self._handleOplogEntrySteadyOrFetching(op);
}
});
}
));
});
// XXX ordering w.r.t. everything else?
self._stopHandles.push(listenAll(
self._cursorDescription, function (notification, complete) {
self._cursorDescription, function (notification) {
// If we're not in a write fence, we don't have to do anything.
var fence = DDPServer._CurrentWriteFence.get();
if (!fence) {
complete();
if (!fence)
return;
}
var write = fence.beginWrite();
// This write cannot complete until we've caught up to "this point" in the
// oplog, and then made it back to the steady state.
@@ -96,7 +107,6 @@ OplogObserveDriver = function (options) {
self._writesToCommitWhenWeReachSteady.push(write);
}
});
complete();
}
));
@@ -110,100 +120,109 @@ OplogObserveDriver = function (options) {
_.extend(OplogObserveDriver.prototype, {
_add: function (doc) {
var self = this;
var id = doc._id;
var fields = _.clone(doc);
delete fields._id;
if (self._published.has(id))
throw Error("tried to add something already published " + id);
self._published.set(id, self._sharedProjectionFn(fields));
self._multiplexer.added(id, self._projectionFn(fields));
doc = self._sharedProjectionFn(doc);
// XXX does _sharedProjection always preserve id?
if (!_.has(doc, '_id'))
throw Error("Can't add doc without _id");
self._collection.insert(doc);
},
_remove: function (id) {
_remove: function (id, options) {
var self = this;
if (!self._published.has(id))
options = options || {};
var removed = self._collection.remove({_id: id});
if (options.mustExist && removed !== 1)
throw Error("tried to remove something unpublished " + id);
self._published.remove(id);
self._multiplexer.removed(id);
},
_handleDoc: function (id, newDoc, mustMatchNow) {
var self = this;
newDoc = _.clone(newDoc);
newDoc = _.clone(newDoc); // *shallow* clone
// XXX this is just about "matching selector", not about skip/limit
var matchesNow = newDoc && self._matcher.documentMatches(newDoc).result;
if (mustMatchNow && !matchesNow) {
throw Error("expected " + EJSON.stringify(newDoc) + " to match "
+ EJSON.stringify(self._cursorDescription));
}
var matchedBefore = self._published.has(id);
var inCollection = !!self._collection.find(id).count();
if (matchesNow && !matchedBefore) {
if (matchesNow && !inCollection) {
// It matches the selector and it isn't in our collection, so add it.
// XXX once we add skip/limit, this may not always send an added, and
// we may need to do some GC
self._add(newDoc);
} else if (matchedBefore && !matchesNow) {
self._remove(id);
} else if (inCollection && !matchesNow) {
// We remove this from the collection to achieve two goals: (a) causing
// the observeChanges to fire removed() and (b) saving memory. That said,
// it would be legitimate (if !!newDoc) to update the collection instead
// of removing, if we thought we might need this doc again soon.
self._remove(id, {mustExist: true});
} else if (matchesNow) {
var oldDoc = self._published.get(id);
if (!oldDoc)
throw Error("thought that " + id + " was there!");
delete newDoc._id;
self._published.set(id, self._sharedProjectionFn(newDoc));
var changed = LocalCollection._makeChangedFields(_.clone(newDoc), oldDoc);
changed = self._projectionFn(changed);
if (!_.isEmpty(changed))
self._multiplexer.changed(id, changed);
// Replace the doc inside our collection, which may trigger a changed
// callback.
newDoc = self._sharedProjectionFn(newDoc);
// XXX does _sharedProjection always preserve id?
if (!_.has(newDoc, '_id'))
throw Error("Can't add newDoc without _id");
self._collection.update(id, newDoc);
}
},
_fetchModifiedDocuments: function () {
var self = this;
self._phase = PHASE.FETCHING;
while (!self._stopped && !self._needToFetch.empty()) {
if (self._phase !== PHASE.FETCHING)
throw new Error("phase in fetchModifiedDocuments: " + self._phase);
self._registerPhaseChange(PHASE.FETCHING);
// Defer, because nothing called from the oplog entry handler may yield, but
// fetch() yields.
Meteor.defer(function () {
while (!self._stopped && !self._needToFetch.empty()) {
if (self._phase !== PHASE.FETCHING)
throw new Error("phase in fetchModifiedDocuments: " + self._phase);
self._currentlyFetching = self._needToFetch;
var thisGeneration = ++self._fetchGeneration;
self._needToFetch = new LocalCollection._IdMap;
var waiting = 0;
var anyError = null;
var fut = new Future;
// This loop is safe, because _currentlyFetching will not be updated
// during this loop (in fact, it is never mutated).
self._currentlyFetching.forEach(function (cacheKey, id) {
waiting++;
self._mongoHandle._docFetcher.fetch(
self._cursorDescription.collectionName, id, cacheKey,
function (err, doc) {
if (err) {
if (!anyError)
anyError = err;
} else if (!self._stopped && self._phase === PHASE.FETCHING
&& self._fetchGeneration === thisGeneration) {
// We re-check the generation in case we've had an explicit
// _pollQuery call which should effectively cancel this round of
// fetches. (_pollQuery increments the generation.)
self._handleDoc(id, doc);
}
waiting--;
// Because fetch() never calls its callback synchronously, this is
// safe (ie, we won't call fut.return() before the forEach is done).
if (waiting === 0)
fut.return();
});
});
fut.wait();
// XXX do this even if we've switched to PHASE.QUERYING?
if (anyError)
throw anyError;
// Exit now if we've had a _pollQuery call.
if (self._phase === PHASE.QUERYING)
return;
self._currentlyFetching = null;
}
self._beSteady();
self._currentlyFetching = self._needToFetch;
var thisGeneration = ++self._fetchGeneration;
self._needToFetch = new LocalCollection._IdMap;
var waiting = 0;
var anyError = null;
var fut = new Future;
// This loop is safe, because _currentlyFetching will not be updated
// during this loop (in fact, it is never mutated).
self._currentlyFetching.forEach(function (cacheKey, id) {
waiting++;
self._mongoHandle._docFetcher.fetch(
self._cursorDescription.collectionName, id, cacheKey,
function (err, doc) {
if (err) {
if (!anyError)
anyError = err;
} else if (!self._stopped && self._phase === PHASE.FETCHING
&& self._fetchGeneration === thisGeneration) {
// We re-check the generation in case we've had an explicit
// _pollQuery call which should effectively cancel this round of
// fetches. (_pollQuery increments the generation.)
self._handleDoc(id, doc);
}
waiting--;
// Because fetch() never calls its callback synchronously, this is
// safe (ie, we won't call fut.return() before the forEach is
// done).
if (waiting === 0)
fut.return();
});
});
fut.wait();
// XXX do this even if we've switched to PHASE.QUERYING?
if (anyError)
throw anyError;
// Exit now if we've had a _pollQuery call.
if (self._phase === PHASE.QUERYING)
return;
self._currentlyFetching = null;
}
self._beSteady();
});
},
_beSteady: function () {
var self = this;
self._phase = PHASE.STEADY;
self._registerPhaseChange(PHASE.STEADY);
var writes = self._writesToCommitWhenWeReachSteady;
self._writesToCommitWhenWeReachSteady = [];
self._multiplexer.onFlush(function () {
@@ -222,16 +241,16 @@ _.extend(OplogObserveDriver.prototype, {
// If we're already fetching this one, or about to, we can't optimize; make
// sure that we fetch it again if necessary.
if (self._phase === PHASE.FETCHING &&
(self._currentlyFetching.has(id) || self._needToFetch.has(id))) {
((self._currentlyFetching && self._currentlyFetching.has(id)) ||
self._needToFetch.has(id))) {
self._needToFetch.set(id, op.ts.toString());
return;
}
if (op.op === 'd') {
if (self._published.has(id))
self._remove(id);
self._remove(id);
} else if (op.op === 'i') {
if (self._published.has(id))
if (self._collection.find(id).count())
throw new Error("insert found for already-existing ID");
// XXX what if selector yields? for now it can't but later it could have
@@ -253,18 +272,24 @@ _.extend(OplogObserveDriver.prototype, {
if (isReplace) {
self._handleDoc(id, _.extend({_id: id}, op.o));
} else if (self._published.has(id) && canDirectlyModifyDoc) {
// Oh great, we actually know what the document is, so we can apply
// this directly.
var newDoc = EJSON.clone(self._published.get(id));
newDoc._id = id;
LocalCollection._modify(newDoc, op.o);
self._handleDoc(id, self._sharedProjectionFn(newDoc));
} else if (!canDirectlyModifyDoc ||
self._matcher.canBecomeTrueByModifier(op.o)) {
self._needToFetch.set(id, op.ts.toString());
if (self._phase === PHASE.STEADY)
self._fetchModifiedDocuments();
} else {
var newDoc = self._collection.findOne(id);
if (newDoc && canDirectlyModifyDoc) {
// Oh great, we actually know what the document is, so we can apply
// this directly.
// XXX just send the modifier to _collection.update? but then
// we don't necessarily get to GC
// We can avoid another deep clone here since the findOne above would
// return a copy anyways
LocalCollection._modify(newDoc, op.o);
self._handleDoc(id, newDoc);
} else if (!canDirectlyModifyDoc ||
self._matcher.canBecomeTrueByModifier(op.o)) {
self._needToFetch.set(id, op.ts.toString());
if (self._phase === PHASE.STEADY)
self._fetchModifiedDocuments();
}
}
} else {
throw Error("XXX SURPRISING OPERATION: " + op);
@@ -312,19 +337,20 @@ _.extend(OplogObserveDriver.prototype, {
self._needToFetch = new LocalCollection._IdMap;
self._currentlyFetching = null;
++self._fetchGeneration; // ignore any in-flight fetches
self._phase = PHASE.QUERYING;
self._registerPhaseChange(PHASE.QUERYING);
self._collection.pauseObservers();
// XXX this won't be quite correct for skip/limit
self._collection.remove({});
// Defer so that we don't block.
Meteor.defer(function () {
// subtle note: _published does not contain _id fields, but newResults
// does
var newResults = new LocalCollection._IdMap;
var cursor = self._cursorForQuery();
cursor.forEach(function (doc) {
newResults.set(doc._id, doc);
// Insert all the documents currently found by the query.
self._cursorForQuery().forEach(function (doc) {
self._collection.insert(doc);
});
self._publishNewResults(newResults);
// Allow observe callbacks (ie multiplexer invocations) to fire.
self._collection.resumeObservers();
self._doneQuerying();
});
@@ -394,34 +420,6 @@ _.extend(OplogObserveDriver.prototype, {
},
// Replace self._published with newResults (both are IdMaps), invoking observe
// callbacks on the multiplexer.
//
// XXX This is very similar to LocalCollection._diffQueryUnorderedChanges. We
// should really: (a) Unify IdMap and OrderedDict into Unordered/OrderedDict (b)
// Rewrite diff.js to use these classes instead of arrays and objects.
_publishNewResults: function (newResults) {
var self = this;
// First remove anything that's gone. Be careful not to modify
// self._published while iterating over it.
var idsToRemove = [];
self._published.forEach(function (doc, id) {
if (!newResults.has(id))
idsToRemove.push(id);
});
_.each(idsToRemove, function (id) {
self._remove(id);
});
// Now do adds and changes.
newResults.forEach(function (doc, id) {
// "true" here means to throw if we think this doc doesn't match the
// selector.
self._handleDoc(id, doc, true);
});
},
// This stop function is invoked from the onStop of the ObserveMultiplexer, so
// it shouldn't actually be possible to call it until the multiplexer is
// ready.
@@ -445,7 +443,6 @@ _.extend(OplogObserveDriver.prototype, {
self._writesToCommitWhenWeReachSteady = null;
// Proactively drop references to potentially big things.
self._published = null;
self._needToFetch = null;
self._currentlyFetching = null;
self._oplogEntryHandle = null;
@@ -453,6 +450,23 @@ _.extend(OplogObserveDriver.prototype, {
Package.facts && Package.facts.Facts.incrementServerFact(
"mongo-livedata", "observe-drivers-oplog", -1);
},
_registerPhaseChange: function (phase) {
var self = this;
var now = new Date;
if (phase === self._phase)
return;
if (self._phase) {
var timeDiff = now - self._phaseStartTime;
Package.facts && Package.facts.Facts.incrementServerFact(
"mongo-livedata", "time-spent-in-" + self._phase + "-phase", timeDiff);
}
self._phase = phase;
self._phaseStartTime = now;
}
});

View File

@@ -74,13 +74,9 @@ _.extend(OplogHandle.prototype, {
self._readyFuture.wait();
var originalCallback = callback;
callback = Meteor.bindEnvironment(function (notification, onComplete) {
callback = Meteor.bindEnvironment(function (notification) {
// XXX can we avoid this clone by making oplog.js careful?
try {
originalCallback(EJSON.clone(notification));
} finally {
onComplete();
}
originalCallback(EJSON.clone(notification));
}, function (err) {
Meteor._debug("Error in oplog callback", err.stack);
});
@@ -208,9 +204,7 @@ _.extend(OplogHandle.prototype, {
trigger.id = idForOp(doc);
}
var f = new Future;
self._crossbar.fire(trigger, f.resolver());
f.wait();
self._crossbar.fire(trigger);
// Now that we've processed this operation, process pending sequencers.
if (!doc.ts)

View File

@@ -34,7 +34,7 @@ PollingObserveDriver = function (options) {
self._taskQueue = new Meteor._SynchronousQueue();
var listenersHandle = listenAll(
self._cursorDescription, function (notification, complete) {
self._cursorDescription, function (notification) {
// When someone does a transaction that might affect us, schedule a poll
// of the database. If that transaction happens inside of a write fence,
// block the fence until we've polled and notified observers.
@@ -46,7 +46,6 @@ PollingObserveDriver = function (options) {
// lead to us calling it unnecessarily in 50ms).
if (self._pollsScheduledButNotStarted === 0)
self._ensurePollIsScheduled();
complete();
}
);
self._stopCallbacks.push(function () { listenersHandle.stop(); });
@@ -129,8 +128,8 @@ _.extend(PollingObserveDriver.prototype, {
var first = false;
if (!self._results) {
first = true;
// XXX maybe use _IdMap/OrderedDict instead?
self._results = self._ordered ? [] : {};
// XXX maybe use OrderedDict instead?
self._results = self._ordered ? [] : new LocalCollection._IdMap;
}
self._testOnlyPollCallback && self._testOnlyPollCallback();

1
packages/underscore-tests/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.build*

View File

@@ -0,0 +1,45 @@
Tinytest.add("underscore - each", function (test) {
// arrays
_.each([42], function (val, index) {
test.equal(index, 0);
test.equal(val, 42);
});
// objects with 'length' field aren't treated as arrays
_.each({length: 42}, function (val, key) {
test.equal(key, 'length');
test.equal(val, 42);
});
// The special 'arguments' variable is treated as an
// array
(function () {
_.each(arguments, function (val, index) {
test.equal(index, 0);
test.equal(val, 42);
});
})(42);
// An object with a 'callee' field isn't treated as arguments
_.each({callee: 42}, function (val, key) {
test.equal(key, 'callee');
test.equal(val, 42);
});
// An object with a 'callee' field isn't treated as arguments
_.each({length: 4, callee: 42}, function (val, key) {
if (key === 'callee')
test.equal(val, 42);
else if (key === 'length')
test.equal(val, 4);
else
test.fail({message: 'unexpected key: ' + key});
});
// NOTE: An object with a numberic 'length' field *and* a function
// 'callee' field will be treated as an array in IE. This may or may
// not be fixable, but isn't a big deal since: (1) 'callee' is a
// pretty rare key, and (2) JSON objects can't have functions
// anyways, which is the main use-case for _.each.
});

View File

@@ -0,0 +1,10 @@
Package.describe({
// These tests can't be directly in the underscore packages since
// Tinytest depends on underscore
summary: "Tests for the underscore package"
});
Package.on_test(function (api) {
api.use(['tinytest', 'underscore']);
api.add_files('each_test.js');
});

View File

@@ -70,6 +70,21 @@
// Collection Functions
// --------------------
// METEOR CHANGE: Define _isArguments instead of depending on
// _.isArguments which is defined using each. In looksLikeArray
// (which each depends on), we then use _isArguments instead of
// _.isArguments.
var _isArguments = function (obj) {
return toString.call(obj) === '[object Arguments]';
};
// Define a fallback version of the method in browsers (ahem, IE), where
// there isn't any inspectable "Arguments" type.
if (!_isArguments(arguments)) {
_isArguments = function (obj) {
return !!(obj && hasOwnProperty.call(obj, 'callee') && typeof obj.callee === 'function');
};
}
// METEOR CHANGE: _.each({length: 5}) should be treated like an object, not an
// array. This looksLikeArray function is introduced by Meteor, and replaces
// all instances of `obj.length === +obj.length`.
@@ -77,7 +92,8 @@
// https://github.com/jashkenas/underscore/issues/770
var looksLikeArray = function (obj) {
return (obj.length === +obj.length
&& (_.isArguments(obj) || obj.constructor !== Object));
// _.isArguments not yet necessarily defined here
&& (_isArguments(obj) || obj.constructor !== Object));
};
// The cornerstone, an `each` implementation, aka `forEach`.

View File

@@ -78,9 +78,7 @@ var checkReleaseDoesNotExistYet = function (release) {
// Writes out a JSON file, pretty-printed and read-only.
var writeJSONFile = function (path, jsonObject) {
fs.writeFileSync(path, JSON.stringify(jsonObject, null, 2));
// In 0.10 we can pass a mode to writeFileSync, but not yet...
fs.chmodSync(path, 0444);
fs.writeFileSync(path, JSON.stringify(jsonObject, null, 2), {mode: 0444});
};
var readJSONFile = function (path) {
return JSON.parse(fs.readFileSync(path));

View File

@@ -41,7 +41,8 @@ fi
FIRST_RUN=true # keep track to place commas correctly
cd packages
for PACKAGE in *
SORTED_PACKAGE_LIST=$(LC_ALL=C ls) # Ensure consistent order between platforms
for PACKAGE in $SORTED_PACKAGE_LIST
do
if [ -a "$PACKAGE/package.js" ]; then
if [ $FIRST_RUN == false ]; then

View File

@@ -76,7 +76,7 @@ cd node
# When upgrading node versions, also update the values of MIN_NODE_VERSION at
# the top of tools/meteor.js and tools/server/boot.js, and the text in
# docs/client/concepts.html and the README in tools/bundler.js.
git checkout v0.10.22
git checkout v0.10.25
./configure --prefix="$DIR"
make -j4
@@ -98,23 +98,18 @@ which npm
cd "$DIR/lib/node_modules"
npm install optimist@0.6.0
npm install semver@2.1.0
npm install request@2.27.0
npm install semver@2.2.1
npm install request@2.33.0
npm install keypress@0.2.1
npm install underscore@1.5.2
npm install fstream@0.1.24
npm install tar@0.1.18
npm install kexec@0.1.1
npm install shell-quote@0.0.1 # now at 1.3.3, which adds plenty of options to parse but doesn't change quote
npm install eachline@2.3.3
npm install source-map@0.1.30
npm install source-map-support@0.2.3
npm install fstream@0.1.25
npm install tar@0.1.19
npm install kexec@0.2.0
npm install eachline@2.4.0
npm install source-map@0.1.31
npm install source-map-support@0.2.5
npm install bcrypt@0.7.7
# Using the unreleased "caronte" branch rewrite of http-proxy (which will become
# 1.0.0), plus this PR:
# https://github.com/nodejitsu/node-http-proxy/pull/495
npm install https://github.com/meteor/node-http-proxy/tarball/f17186f781c6f00b359d25df424ad74922cd1977
npm install http-proxy@1.0.2
# Using the unreleased 1.1 branch. We can probably switch to a built NPM version
# when it gets released.
@@ -141,7 +136,7 @@ cd ../..
# particular version of openssl on the host system.
cd "$DIR/build"
OPENSSL="openssl-1.0.1e"
OPENSSL="openssl-1.0.1f"
OPENSSL_URL="http://www.openssl.org/source/$OPENSSL.tar.gz"
wget $OPENSSL_URL || curl -O $OPENSSL_URL
tar xzf $OPENSSL.tar.gz
@@ -160,7 +155,7 @@ make install
# click 'changelog' under the current version, then 'release notes' in
# the upper right.
cd "$DIR/build"
MONGO_VERSION="2.4.8"
MONGO_VERSION="2.4.9"
# We use Meteor fork since we added some changes to the building script.
# Our patches allow us to link most of the libraries statically.

View File

@@ -1476,9 +1476,7 @@ var writeSiteArchive = function (targets, outputPath, options) {
builder.write('README', { data: new Buffer(
"This is a Meteor application bundle. It has only one dependency:\n" +
"Node.js 0.10 (with the 'fibers' package). The current release of Meteor\n" +
"has been tested with Node 0.10.22 and works best with 0.10.22 through\n" +
"0.10.24. To run the application:\n" +
"Node.js 0.10.25 or newer, plus the 'fibers' module. To run the application:\n" +
"\n" +
" $ rm -r programs/server/node_modules/fibers\n" +
" $ npm install fibers@1.0.1\n" +

View File

@@ -38,7 +38,7 @@ Fiber(function () {
var Future = require('fibers/future');
// This code is duplicated in tools/server/boot.js.
var MIN_NODE_VERSION = 'v0.10.22';
var MIN_NODE_VERSION = 'v0.10.25';
if (require('semver').lt(process.version, MIN_NODE_VERSION)) {
process.stderr.write(
'Meteor requires Node ' + MIN_NODE_VERSION + ' or later.\n');
@@ -1416,10 +1416,8 @@ Fiber(function () {
if (extraArgs)
newArgv.push.apply(newArgv, extraArgs);
// Now shell quote this (because kexec wants to use /bin/sh -c) and execvp.
// XXX fork kexec and make it take an array instead of using shell
var quotedArgv = require('shell-quote').quote(newArgv);
require('kexec')(quotedArgv);
// Now exec; we're not coming back.
require('kexec')(newArgv[0], newArgv);
};
// Implements --version. Note that we only print to stdout and exit 0 if

View File

@@ -951,7 +951,7 @@ _.extend(Package.prototype, {
return;
var Plugin = {
// 'extension' is a file extension without the separation dot
// 'extension' is a file extension without the separation dot
// (eg 'js', 'coffee', 'coffee.md')
//
// 'handler' is a function that takes a single argument, a
@@ -1366,6 +1366,9 @@ _.extend(Package.prototype, {
// used. Can also take literal package objects, if you have
// anonymous packages you want to use (eg, app packages)
//
// @param where 'client', 'server', or an array of those.
// The default is ['client', 'server'].
//
// options can include:
//
// - role: defaults to "use", but you could pass something
@@ -1471,7 +1474,8 @@ _.extend(Package.prototype, {
// Export symbols from this package.
//
// @param symbols String (eg "Foo") or array of String
// @param where 'client', 'server', or an array of those
// @param where 'client', 'server', or an array of those.
// The default is ['client', 'server'].
// @param options 'testOnly', boolean.
export: function (symbols, where, options) {
if (role === "test") {

View File

@@ -110,14 +110,15 @@ var startProxy = function (outerPort, innerPort, callback) {
callback = callback || function () {};
var http = require('http');
// Note: this uses the pre-release 1.0.0 API.
var net = require('net');
var httpProxy = require('http-proxy');
var proxy = httpProxy.createProxyServer({
// agent is required to handle keep-alive, and http-proxy 1.0 is a little
// buggy without it: https://github.com/nodejitsu/node-http-proxy/pull/488
agent: new http.Agent({maxSockets: 100}),
xfwd: true
xfwd: true,
target: 'http://127.0.0.1:' + innerPort
});
var server = http.createServer(function (req, res) {
@@ -140,7 +141,7 @@ var startProxy = function (outerPort, innerPort, callback) {
return;
}
var proxyIt = function () {
proxy.web(req, res, {target: 'http://127.0.0.1:' + innerPort});
proxy.web(req, res);
};
if (Status.listening) {
// server is listening. things are hunky dory!
@@ -154,7 +155,7 @@ var startProxy = function (outerPort, innerPort, callback) {
// requests
server.on('upgrade', function(req, socket, head) {
var proxyIt = function () {
proxy.ws(req, socket, head, { target: 'http://127.0.0.1:' + innerPort});
proxy.ws(req, socket, head);
};
if (Status.listening) {
// server is listening. things are hunky dory!
@@ -183,14 +184,20 @@ var startProxy = function (outerPort, innerPort, callback) {
// don't crash if the app doesn't respond. instead return an error
// immediately. This shouldn't happen much since we try to not send requests
// if the app is down.
proxy.ee.on('http-proxy:outgoing:web:error', function (err, req, res) {
res.writeHead(503, {
'Content-Type': 'text/plain'
});
res.end('Unexpected error.');
});
proxy.ee.on('http-proxy:outgoing:ws:error', function (err, req, socket) {
socket.end();
//
// Currently, this error is emitted if the proxy->server connection has an
// error (whether in HTTP or websocket proxying). It is not emitted if the
// client->proxy connection has an error, though this may change; see
// discussion at https://github.com/nodejitsu/node-http-proxy/pull/488
proxy.on('error', function (err, req, resOrSocket) {
if (resOrSocket instanceof http.ServerResponse) {
resOrSocket.writeHead(503, {
'Content-Type': 'text/plain'
});
resOrSocket.end('Unexpected error.');
} else if (resOrSocket instanceof net.Socket) {
resOrSocket.end();
}
});
server.listen(outerPort, callback);

View File

@@ -6,7 +6,7 @@ var _ = require('underscore');
var sourcemap_support = require('source-map-support');
// This code is duplicated in tools/meteor.js.
var MIN_NODE_VERSION = 'v0.10.22';
var MIN_NODE_VERSION = 'v0.10.25';
if (require('semver').lt(process.version, MIN_NODE_VERSION)) {
process.stderr.write(

View File

@@ -4,7 +4,7 @@ if (Meteor.isClient) {
};
Template.hello.events({
'click input' : function () {
'click input': function () {
// template data, if any, is available in 'this'
if (typeof console !== 'undefined')
console.log("You pressed the button");