mirror of
https://github.com/meteor/meteor.git
synced 2026-05-02 03:01:46 -04:00
750 lines
25 KiB
JavaScript
750 lines
25 KiB
JavaScript
var fs = require('fs');
|
|
var path = require('path');
|
|
var Future = require('fibers/future');
|
|
var _ = require('underscore');
|
|
var auth = require('./auth.js');
|
|
var config = require('./config.js');
|
|
var httpHelpers = require('./http-helpers.js');
|
|
var release = require('./release.js');
|
|
var files = require('./files.js');
|
|
var ServiceConnection = require('./service-connection.js');
|
|
var utils = require('./utils.js');
|
|
var buildmessage = require('./buildmessage.js');
|
|
var compiler = require('./compiler.js');
|
|
var uniload = require('./uniload.js');
|
|
|
|
// Use uniload to load the packages that we need to open a connection to the
|
|
// current package server and use minimongo in memory. We need the following
|
|
// packages.
|
|
//
|
|
// meteor: base package and prerequsite for all others.
|
|
// ddp: DDP client interface to make a connection to the package server.
|
|
var getLoadedPackages = function () {
|
|
return uniload.load({
|
|
packages: [ 'meteor', 'ddp']
|
|
});
|
|
};
|
|
|
|
// Opens a DDP connection to a package server. Loads the packages needed for a
|
|
// DDP connection, then calls DDP connect to the package server URL in config,
|
|
// using a current user-agent header composed by http-helpers.js.
|
|
var openPackageServerConnection = function (packageServerUrl) {
|
|
return new ServiceConnection(
|
|
getLoadedPackages(),
|
|
packageServerUrl || config.getPackageServerUrl(),
|
|
{headers: {"User-Agent": httpHelpers.getUserAgent()},
|
|
_dontPrintErrors: true});
|
|
};
|
|
|
|
var emptyCachedServerDataJson = function () {
|
|
return {
|
|
syncToken: {},
|
|
collections: null
|
|
};
|
|
};
|
|
|
|
// Load the package data that was saved in the local data.json
|
|
// collection from the last time we did a sync to the server. Takes an
|
|
// optional `packageStorageFile` argument (defaults to
|
|
// `config.getPackageStorage()`). This return object consists of
|
|
//
|
|
// - collections: an object keyed by the name of server collections, with the
|
|
// records as an array of javascript objects.
|
|
// - syncToken: a syncToken object representing the last time that we talked to
|
|
// the server, to pass into the getRemotePackageData to get the latest
|
|
// updates.
|
|
// If there is no data.json file, or the file cannot be parsed, return null for
|
|
// the collections and a default syncToken to ask the server for all the data
|
|
// from the beginning of time.
|
|
exports.loadCachedServerData = function (packageStorageFile) {
|
|
var noDataToken = emptyCachedServerDataJson();
|
|
|
|
packageStorageFile = packageStorageFile || config.getPackageStorage();
|
|
|
|
try {
|
|
var data = fs.readFileSync(packageStorageFile, 'utf8');
|
|
} catch (e) {
|
|
if (e.code == 'ENOENT') {
|
|
return noDataToken;
|
|
}
|
|
// XXX we should probably return an error to the caller here to
|
|
// figure out how to handle it
|
|
process.stderr.write("ERROR " + e.message + "\n");
|
|
process.exit(1);
|
|
}
|
|
var ret = noDataToken;
|
|
try {
|
|
ret = JSON.parse(data);
|
|
} catch (err) {
|
|
// XXX error handling
|
|
process.stderr.write(
|
|
"ERROR: Could not parse JSON for local package-metadata cache. \n");
|
|
// This should only happen if you decided to manually edit this or
|
|
// whatever. Regardless, go on and treat this as an empty file.
|
|
}
|
|
return ret;
|
|
};
|
|
|
|
// Requests and returns one page of new package data that we haven't cached on
|
|
// disk. We assume that data is cached chronologically, so essentially, we are
|
|
// asking for a diff from the last time that we did this.
|
|
// Takes in:
|
|
// - conn: the connection to use (does not have to be logged in)
|
|
// - syncToken: a syncToken object to be sent to the server that
|
|
// represents the last time that we talked to the server.
|
|
// - _optionsForTest:
|
|
// - useShortPages (Boolean). Ask the server for pages of ~3 records
|
|
// instead of ~100, for testing pagination.
|
|
//
|
|
// Returns an object, containing the following fields:
|
|
// - syncToken: a new syncToken object, that we can pass to the server in the future.
|
|
// - collections: an object keyed by the name of server collections, with the
|
|
// records as an array of javascript objects.
|
|
var loadRemotePackageData = function (conn, syncToken, _optionsForTest) {
|
|
_optionsForTest = _optionsForTest || {};
|
|
|
|
var syncOpts;
|
|
if (_optionsForTest && _optionsForTest.useShortPages) {
|
|
syncOpts = { shortPagesForTest: _optionsForTest.useShortPages };
|
|
}
|
|
var collectionData;
|
|
if (syncOpts) {
|
|
collectionData = conn.call(
|
|
'syncNewPackageData', syncToken, syncOpts);
|
|
} else {
|
|
collectionData = conn.call(
|
|
'syncNewPackageData', syncToken);
|
|
}
|
|
return collectionData;
|
|
};
|
|
|
|
// Take in an ordered list of javascript objects representing collections of
|
|
// package data. In each object, the server-side names of collections are keys
|
|
// and the values are the mongo records for that collection stored as an
|
|
// array. Goes through the the list in order and merges it into the single
|
|
// object, with collection names as keys and the arrays of records as
|
|
// corresponding values. The inputs list is ordered and records in the later
|
|
// collections will override the records in the earlier collections.
|
|
var mergeCollections = function (sources) {
|
|
var collections = {}; // map from collection to _id to object
|
|
|
|
_.each(sources, function (source) {
|
|
_.each(source, function (records, collectionName) {
|
|
if (! _.has(collections, collectionName))
|
|
collections[collectionName] = {};
|
|
|
|
_.each(records, function (record) {
|
|
collections[collectionName][record._id] = record;
|
|
});
|
|
});
|
|
});
|
|
|
|
var ret = {};
|
|
_.each(collections, function (records, collectionName) {
|
|
ret[collectionName] = _.values(records);
|
|
});
|
|
|
|
return ret;
|
|
};
|
|
|
|
// Writes the cached package data to the on-disk cache.
|
|
//
|
|
// Returns nothing, but
|
|
// XXXX: Does what on errors?
|
|
//
|
|
// options include:
|
|
// - packageStorageFile: String. A file to write the data to instead of
|
|
// `config.getPackageStorage()`.
|
|
var writePackageDataToDisk = function (syncToken, data, options) {
|
|
var filename = (options && options.packageStorageFile) ||
|
|
config.getPackageStorage();
|
|
// XXX think about permissions?
|
|
files.mkdir_p(path.dirname(filename));
|
|
files.writeFileAtomically(filename, JSON.stringify(data, null, 2));
|
|
};
|
|
|
|
// Contacts the package server to get the latest diff and writes changes to
|
|
// disk.
|
|
//
|
|
// Takes in cachedServerData, which is the processed contents of data.json. Uses
|
|
// those to talk to the server and get the latest updates. Applies the diff from
|
|
// the server to the in-memory version of the on-disk data, then writes the new
|
|
// file to disk as the new data.json.
|
|
//
|
|
// Returns null if contacting the server times out. Otherwise, returns
|
|
// all the data.
|
|
//
|
|
// options can include:
|
|
// - packageStorageFile: String. The file to write the data to (overrides
|
|
// `config.getPackageStorage()`)
|
|
// - packageServerUrl: String. The package server (overrides
|
|
// `config.getPackageServerUrl()`)
|
|
// - useShortPages: Boolean. Request short pages of ~3 records from the
|
|
// server, instead of ~100 that it would send otherwise
|
|
exports.updateServerPackageData = function (cachedServerData, options) {
|
|
var self = this;
|
|
options = options || {};
|
|
cachedServerData = cachedServerData || emptyCachedServerDataJson();
|
|
|
|
var done = false;
|
|
var ret = {resetData: false};
|
|
|
|
try {
|
|
var conn = openPackageServerConnection(options.packageServerUrl);
|
|
} catch (err) {
|
|
self.handlePackageServerConnectionError(err);
|
|
ret.data = null;
|
|
return ret;
|
|
}
|
|
|
|
var getSomeData = function () {
|
|
var syncToken = cachedServerData.syncToken;
|
|
var remoteData;
|
|
try {
|
|
remoteData = loadRemotePackageData(conn, syncToken, {
|
|
useShortPages: options.useShortPages
|
|
});
|
|
} catch (err) {
|
|
exports.handlePackageServerConnectionError(err);
|
|
if (err.errorType === "DDP.ConnectionError") {
|
|
cachedServerData = null;
|
|
done = true;
|
|
return;
|
|
} else {
|
|
throw err;
|
|
}
|
|
}
|
|
|
|
// Is the remote server telling us to ignore everything we've heard before?
|
|
// OK, we can do that.
|
|
if (remoteData.resetData) {
|
|
cachedServerData.collections = null;
|
|
// The caller may want to take this as a cue to delete packages from the
|
|
// tropohouse.
|
|
ret.resetData = true;
|
|
}
|
|
|
|
// If there is no new data from the server, don't bother writing things to
|
|
// disk (unless we were just told to reset everything).
|
|
if (!remoteData.resetData && _.isEqual(remoteData.collections, {})) {
|
|
done = true;
|
|
return;
|
|
}
|
|
|
|
var sources = [];
|
|
if (cachedServerData.collections) {
|
|
sources.push(cachedServerData.collections);
|
|
}
|
|
sources.push(remoteData.collections);
|
|
var allCollections = mergeCollections(sources);
|
|
var data = {
|
|
syncToken: remoteData.syncToken,
|
|
formatVersion: "1.0",
|
|
collections: allCollections
|
|
};
|
|
writePackageDataToDisk(remoteData.syncToken, data, {
|
|
packageStorageFile: options.packageStorageFile
|
|
});
|
|
|
|
cachedServerData = data;
|
|
if (remoteData.upToDate)
|
|
done = true;
|
|
};
|
|
|
|
try {
|
|
while (!done) {
|
|
getSomeData();
|
|
}
|
|
} finally {
|
|
conn.close();
|
|
}
|
|
|
|
ret.data = cachedServerData;
|
|
return ret;
|
|
};
|
|
|
|
var AlreadyPrintedMessageError = function () {};
|
|
|
|
// Returns a logged-in DDP connection to the package server, or null if
|
|
// we cannot log in. If an error unrelated to login occurs
|
|
// (e.g. connection to package server times out), then it will be
|
|
// thrown.
|
|
exports.loggedInPackagesConnection = function () {
|
|
// Make sure that we are logged in with Meteor Accounts so that we can
|
|
// do an OAuth flow.
|
|
|
|
if (auth.maybePrintRegistrationLink({onlyAllowIfRegistered: true})) {
|
|
// Oops, we're logged in but with a deferred-registration account.
|
|
// Message has already been printed.
|
|
throw new AlreadyPrintedMessageError;
|
|
}
|
|
|
|
if (! auth.isLoggedIn()) {
|
|
// XXX we should have a better account signup page.
|
|
process.stderr.write(
|
|
"Please log in with your Meteor developer account. If you don't have one,\n" +
|
|
"you can quickly create one at www.meteor.com.\n");
|
|
auth.doUsernamePasswordLogin({ retry: true });
|
|
}
|
|
|
|
var conn = openPackageServerConnection();
|
|
|
|
var accountsConfiguration = auth.getAccountsConfiguration(conn);
|
|
|
|
try {
|
|
auth.loginWithTokenOrOAuth(
|
|
conn,
|
|
accountsConfiguration,
|
|
config.getPackageServerUrl(),
|
|
config.getPackageServerDomain(),
|
|
"package-server"
|
|
);
|
|
} catch (err) {
|
|
if (err.message === "access-denied") {
|
|
// Maybe we thought we were logged in, but our token had been
|
|
// revoked.
|
|
process.stderr.write(
|
|
"It looks like you have been logged out! Please log in with your Meteor\n" +
|
|
"developer account. If you don't have one, you can quickly create one\n" +
|
|
"at www.meteor.com.\n");
|
|
auth.doUsernamePasswordLogin({ retry: true });
|
|
auth.loginWithTokenOrOAuth(
|
|
conn,
|
|
accountsConfiguration,
|
|
config.getPackageServerUrl(),
|
|
config.getPackageServerDomain(),
|
|
"package-server"
|
|
);
|
|
} else {
|
|
throw err;
|
|
}
|
|
}
|
|
return conn;
|
|
};
|
|
|
|
// XXX this is missing a few things:
|
|
// - locking down build-time dependencies: tools version, versions
|
|
// of all (not-built-from-source) plugins used
|
|
// in general, we need to include all the stuff that goes into the watchSet
|
|
// We include npm-shrinkwrap which does not go in the watchSet but
|
|
// probably should.
|
|
//
|
|
// In retrospect a better approach here might be to actually make "save source
|
|
// somewhere else" or perhaps "add source to tarball" be part of the package
|
|
// build itself...
|
|
var bundleSource = function (unipackage, includeSources, packageDir) {
|
|
var name = unipackage.name;
|
|
|
|
var tempDir = files.mkdtemp('build-source-package-');
|
|
var packageTarName = name + '-' + unipackage.version + '-source';
|
|
var dirToTar = path.join(tempDir, 'source', packageTarName);
|
|
var sourcePackageDir = path.join(
|
|
dirToTar,
|
|
name
|
|
);
|
|
if (! files.mkdir_p(sourcePackageDir)) {
|
|
process.stderr.write('Failed to create temporary source directory: ' +
|
|
sourcePackageDir);
|
|
return null;
|
|
}
|
|
|
|
includeSources.push('package.js');
|
|
if (fs.existsSync(path.join(packageDir, '.npm/package/npm-shrinkwrap.json'))) {
|
|
includeSources.push('.npm/package/npm-shrinkwrap.json');
|
|
}
|
|
_.each(unipackage.plugins, function (plugin, pluginName) {
|
|
var pluginShrinkwrap = path.join('.npm/plugin/', pluginName,
|
|
'npm-shrinkwrap.json');
|
|
if (fs.existsSync(path.join(packageDir, pluginShrinkwrap))) {
|
|
includeSources.push(pluginShrinkwrap);
|
|
}
|
|
});
|
|
|
|
// We copy source files into a temp directory and then tar up the temp
|
|
// directory. It would be great if we could avoid the copy, but as far
|
|
// as we can tell, this is the only way to get a tarball with the
|
|
// directory structure that we want (<package name>-<version-source/
|
|
// at the top level).
|
|
_.each(includeSources, function (f) {
|
|
files.copyFile(path.join(packageDir, f),
|
|
path.join(sourcePackageDir, f));
|
|
});
|
|
|
|
// We put this inside the temp dir because mkdtemp makes sure that the
|
|
// temp dir gets cleaned up on process exit, so we don't have to worry
|
|
// about cleaning up our tarball (or our copied source files)
|
|
// ourselves.
|
|
var sourceTarball = path.join(tempDir, packageTarName + '.tgz');
|
|
files.createTarball(dirToTar, sourceTarball);
|
|
|
|
var tarballHash = files.fileHash(sourceTarball);
|
|
var treeHash = files.treeHash(dirToTar);
|
|
|
|
return {
|
|
sourceTarball: sourceTarball,
|
|
tarballHash: tarballHash,
|
|
treeHash: treeHash
|
|
};
|
|
};
|
|
|
|
var uploadTarball = function (putUrl, tarball) {
|
|
var size = fs.statSync(tarball).size;
|
|
var rs = fs.createReadStream(tarball);
|
|
try {
|
|
// Use getUrl instead of request, to throw on 4xx/5xx.
|
|
httpHelpers.getUrl({
|
|
method: 'PUT',
|
|
url: putUrl,
|
|
headers: {
|
|
'content-length': size,
|
|
'content-type': 'application/octet-stream',
|
|
'x-amz-acl': 'public-read'
|
|
},
|
|
bodyStream: rs
|
|
});
|
|
} finally {
|
|
rs.close();
|
|
}
|
|
};
|
|
|
|
exports.uploadTarball = uploadTarball;
|
|
|
|
var bundleBuild = function (unipackage) {
|
|
buildmessage.assertInJob();
|
|
|
|
var tempDir = files.mkdtemp('build-package-');
|
|
var packageTarName = unipackage.tarballName();
|
|
var tarInputDir = path.join(tempDir, packageTarName);
|
|
|
|
unipackage.saveToPath(tarInputDir, {
|
|
// Don't upload buildinfo.json. It's only of interest locally (for example,
|
|
// it contains a watchset with local paths). (This also means we don't
|
|
// need to specify a catalog, yay.)
|
|
elideBuildInfo: true
|
|
});
|
|
|
|
var buildTarball = path.join(tempDir, packageTarName + '.tgz');
|
|
files.createTarball(tarInputDir, buildTarball);
|
|
|
|
var tarballHash = files.fileHash(buildTarball);
|
|
var treeHash = files.treeHash(tarInputDir, {
|
|
// We don't include any package.json from an npm module in the tree hash,
|
|
// because npm isn't super consistent about what it puts in there (eg, does
|
|
// it include the "readme" field)? This ends up leading to spurious
|
|
// differences. The tree hash will still notice any actual CODE changes in
|
|
// the npm packages.
|
|
ignore: function (relativePath) {
|
|
var pieces = relativePath.split(path.sep);
|
|
return pieces.length && _.last(pieces) === 'package.json'
|
|
&& _.contains(pieces, 'npm');
|
|
}
|
|
});
|
|
|
|
return {
|
|
buildTarball: buildTarball,
|
|
tarballHash: tarballHash,
|
|
treeHash: treeHash
|
|
};
|
|
};
|
|
|
|
exports.bundleBuild = bundleBuild;
|
|
|
|
var createAndPublishBuiltPackage = function (conn, unipackage) {
|
|
buildmessage.assertInJob();
|
|
|
|
// Note: we really want to do this before createPackageBuild, because the URL
|
|
// we get from createPackageBuild will expire!
|
|
process.stdout.write('Bundling build...\n');
|
|
var bundleResult = bundleBuild(unipackage);
|
|
if (buildmessage.jobHasMessages())
|
|
return;
|
|
|
|
process.stdout.write('Creating package build...\n');
|
|
var uploadInfo = conn.call('createPackageBuild', {
|
|
packageName: unipackage.name,
|
|
version: unipackage.version,
|
|
buildArchitectures: unipackage.buildArchitectures()
|
|
});
|
|
|
|
process.stdout.write('Uploading build...\n');
|
|
uploadTarball(uploadInfo.uploadUrl,
|
|
bundleResult.buildTarball);
|
|
|
|
process.stdout.write('Publishing package build...\n');
|
|
conn.call('publishPackageBuild',
|
|
uploadInfo.uploadToken,
|
|
bundleResult.tarballHash,
|
|
bundleResult.treeHash);
|
|
|
|
process.stdout.write('Published ' + unipackage.name +
|
|
', version ' + unipackage.version);
|
|
|
|
process.stdout.write('\nDone!\n');
|
|
};
|
|
|
|
exports.createAndPublishBuiltPackage = createAndPublishBuiltPackage;
|
|
|
|
exports.handlePackageServerConnectionError = function (error) {
|
|
if (error instanceof AlreadyPrintedMessageError) {
|
|
// do nothing
|
|
} else if (error.errorType === 'Meteor.Error') {
|
|
process.stderr.write("Error from package server");
|
|
if (error.message) {
|
|
process.stderr.write(": " + error.message);
|
|
}
|
|
process.stderr.write("\n");
|
|
} else if (error.errorType === "DDP.ConnectionError") {
|
|
process.stderr.write("Error connecting to package server: "
|
|
+ error.message + "\n");
|
|
} else {
|
|
throw error;
|
|
}
|
|
};
|
|
|
|
// Publish the package information into the server catalog. Create new records
|
|
// for the package (if needed), the version and the build; upload source and
|
|
// unipackage.
|
|
//
|
|
// packageSource: the packageSource for this package.
|
|
// compileResult: the compiled unipackage and various source files.
|
|
// conn: the open, logged-in connection over which we should talk to the package
|
|
// server. DO NOT CLOSE this connection here.
|
|
// options:
|
|
// new: this package is new, we should call createPackage to create a new
|
|
// package record.
|
|
// existingVersion: we expect the version to exist already, and for us
|
|
// to merely be providing a new build of the same source
|
|
//
|
|
// Return true on success and an error code otherwise.
|
|
exports.publishPackage = function (packageSource, compileResult, conn, options) {
|
|
buildmessage.assertInJob();
|
|
|
|
options = options || {};
|
|
|
|
if (options.new && options.existingVersion)
|
|
throw Error("is it new or does it exist?!?");
|
|
|
|
var name = packageSource.name;
|
|
var version = packageSource.version;
|
|
|
|
// Check that the package name is valid.
|
|
try {
|
|
utils.validatePackageName(name);
|
|
} catch (e) {
|
|
if (!e.versionParserError)
|
|
throw e;
|
|
process.stderr.write(e.error + "\n");
|
|
return 1;
|
|
}
|
|
|
|
// Check that we have a version.
|
|
if (! version) {
|
|
process.stderr.write(
|
|
"That package cannot be published because it doesn't have a version.\n");
|
|
return 1;
|
|
}
|
|
|
|
// Check that the version description is under the character limit. (We check
|
|
// all string limits on the server, but this is the one that is mostly likely
|
|
// to be wrong)
|
|
if (!packageSource.metadata.summary) {
|
|
process.stderr.write("Please describe what your package does. \n");
|
|
process.stderr.write("Set a summary in Package.describe in package.js. \n");
|
|
return 1;
|
|
}
|
|
|
|
if (packageSource.metadata.summary &&
|
|
packageSource.metadata.summary.length > 100) {
|
|
process.stderr.write("Description must be under 100 chars. \n");
|
|
process.stderr.write("Publish failed. \n");
|
|
return 1;
|
|
}
|
|
|
|
var catalog = require('./catalog.js');
|
|
|
|
// Check that we are an authorized maintainer of this package.
|
|
if (!options['new']) {
|
|
var packRecord = catalog.official.getPackage(name);
|
|
if (!packRecord) {
|
|
process.stderr.write('There is no package named ' + name +
|
|
'. If you are creating a new package, use the --create flag. \n');
|
|
process.stderr.write("Publish failed. \n");
|
|
return 1;
|
|
}
|
|
|
|
if (!exports.amIAuthorized(name, conn, false)) {
|
|
process.stderr.write('You are not an authorized maintainer of ' + name + ".\n");
|
|
process.stderr.write('Only authorized maintainers may publish new versions. \n');
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
// Check that the package does not have any unconstrained references.
|
|
var packageDeps = packageSource.getDependencyMetadata();
|
|
var badConstraints = [];
|
|
_.each(packageDeps, function(refs, label) {
|
|
// HACK: we automatically include the meteor package and there is no way for
|
|
// anyone to set its dependency data correctly, so I guess we shouldn't
|
|
// penalize the user for not doing that. It will be resolved at runtime
|
|
// anyway.
|
|
if (label !== "meteor" &&
|
|
refs.constraint == null) {
|
|
badConstraints.push(label);
|
|
}
|
|
});
|
|
|
|
// If we are not a core package and some of our constraints are unspecified,
|
|
// then we should force the user to specify them. This is because we are not
|
|
// sure about pre-0.90 package versions yet.
|
|
if (!packageSource.isCore && !_.isEqual(badConstraints, [])) {
|
|
process.stderr.write(
|
|
"You must specify a version constraint for the following packages:");
|
|
_.each(badConstraints, function(bad) {
|
|
process.stderr.write(" " + bad);
|
|
});
|
|
process.stderr.write(". \n" );
|
|
process.exit(1);
|
|
}
|
|
|
|
// We need to build the test package to get all of its sources.
|
|
var testFiles = [];
|
|
var messages = buildmessage.capture(
|
|
{ title: "getting test sources" },
|
|
function () {
|
|
var testName = packageSource.testName;
|
|
if (testName) {
|
|
var PackageSource = require('./package-source.js');
|
|
var compiler = require('./compiler.js');
|
|
|
|
var testSource = new PackageSource(catalog.complete);
|
|
// We need to pass in the name of the test package in order to
|
|
// initialize it. Otherwise, the defaul behaviour will be to initalize
|
|
// the base package.
|
|
testSource.initFromPackageDir(packageSource.sourceRoot, {
|
|
name: testName
|
|
});
|
|
if (buildmessage.jobHasMessages())
|
|
return; // already have errors, so skip the build
|
|
|
|
var testUnipackage = compiler.compile(testSource, { officialBuild: true });
|
|
testFiles = testUnipackage.sources;
|
|
}
|
|
});
|
|
|
|
if (messages.hasMessages()) {
|
|
process.stderr.write(messages.formatMessages());
|
|
return 1;
|
|
}
|
|
|
|
process.stdout.write('Bundling source...\n');
|
|
|
|
var sources = _.union(compileResult.sources, testFiles);
|
|
|
|
// Send the versions lock file over to the server! We should make sure to use
|
|
// the same version lock file when we build this source elsewhere (ex:
|
|
// publish-for-arch).
|
|
// But see also #PackageVersionFilesHack
|
|
var versionsFile = packageSource.versionsFilePath();
|
|
if (versionsFile && fs.existsSync(versionsFile)) {
|
|
sources.push("versions.json");
|
|
}
|
|
var sourceBundleResult = bundleSource(
|
|
compileResult.unipackage, sources, packageSource.sourceRoot);
|
|
|
|
// Create the package. Check that the metadata exists.
|
|
if (options.new) {
|
|
process.stdout.write('Creating package...\n');
|
|
try {
|
|
var packageId = conn.call('createPackage', {
|
|
name: packageSource.name
|
|
});
|
|
} catch (err) {
|
|
process.stderr.write(err.message + "\n");
|
|
return 3;
|
|
}
|
|
|
|
}
|
|
|
|
if (options.existingVersion) {
|
|
var existingRecord = catalog.official.getVersion(name, version);
|
|
if (!existingRecord) {
|
|
process.stderr.write("Version does not exist.\n");
|
|
return 1;
|
|
}
|
|
if (existingRecord.source.treeHash !== sourceBundleResult.treeHash) {
|
|
process.stderr.write(
|
|
"Package source differs from the existing version.\n");
|
|
return 1;
|
|
}
|
|
|
|
// XXX check that we're actually providing something new?
|
|
} else {
|
|
process.stdout.write('Creating package version...\n');
|
|
var uploadRec = {
|
|
packageName: packageSource.name,
|
|
version: version,
|
|
description: packageSource.metadata.summary,
|
|
git: packageSource.metadata.git,
|
|
earliestCompatibleVersion: packageSource.earliestCompatibleVersion,
|
|
compilerVersion: compiler.BUILT_BY,
|
|
containsPlugins: packageSource.containsPlugins(),
|
|
dependencies: packageDeps
|
|
};
|
|
try {
|
|
var uploadInfo = conn.call('createPackageVersion', uploadRec);
|
|
} catch (err) {
|
|
process.stderr.write("ERROR " + err.message + "\n");
|
|
return 3;
|
|
}
|
|
|
|
// XXX If package version already exists, print a nice error message
|
|
// telling them to try 'meteor publish-for-arch' if they want to
|
|
// publish a new build.
|
|
|
|
process.stdout.write('Uploading source...\n');
|
|
uploadTarball(uploadInfo.uploadUrl, sourceBundleResult.sourceTarball);
|
|
|
|
process.stdout.write('Publishing package version...\n');
|
|
try {
|
|
conn.call('publishPackageVersion',
|
|
uploadInfo.uploadToken,
|
|
{ tarballHash: sourceBundleResult.tarballHash,
|
|
treeHash: sourceBundleResult.treeHash });
|
|
} catch (err) {
|
|
process.stderr.write("ERROR " + err.message + "\n");
|
|
return 3;
|
|
}
|
|
|
|
}
|
|
|
|
createAndPublishBuiltPackage(conn, compileResult.unipackage);
|
|
|
|
return 0;
|
|
};
|
|
|
|
// Call the server to ask if we are authorized to update this release or
|
|
// package. This is a way to save time before sending data to the server. It
|
|
// will mostly ignore most errors (just in case we have a flaky network connection or
|
|
// something) and let the method deal with those.
|
|
//
|
|
// If this returns FALSE, then we are NOT authorized.
|
|
// Otherwise, return true.
|
|
exports.amIAuthorized = function (name, conn, isRelease) {
|
|
var methodName = "amIAuthorized" +
|
|
(isRelease ? "Release" : "Package");
|
|
|
|
try {
|
|
conn.call(methodName, name);
|
|
} catch (err) {
|
|
if (err.error === 401) {
|
|
return false;
|
|
}
|
|
|
|
// We don't know what this error is. Probably we can't contact the server,
|
|
// or the like. It would be a pity to fail all operations with the server
|
|
// just because a preliminary check fails, so return true for now.
|
|
return true;
|
|
}
|
|
return true;
|
|
};
|