diff --git a/atom.sh b/atom.sh
index 0655e343e..a8c30fa19 100755
--- a/atom.sh
+++ b/atom.sh
@@ -28,7 +28,7 @@ while getopts ":wtfvh-:" opt; do
REDIRECT_STDERR=1
EXPECT_OUTPUT=1
;;
- foreground|test)
+ foreground|benchmark|benchmark-test|test)
EXPECT_OUTPUT=1
;;
esac
diff --git a/benchmarks/benchmark-runner.js b/benchmarks/benchmark-runner.js
new file mode 100644
index 000000000..30b23ffbf
--- /dev/null
+++ b/benchmarks/benchmark-runner.js
@@ -0,0 +1,73 @@
+/** @babel */
+
+import Chart from 'chart.js'
+import glob from 'glob'
+import fs from 'fs-plus'
+import path from 'path'
+
+export default async function ({test, benchmarkPaths}) {
+ document.body.style.backgroundColor = '#ffffff'
+ document.body.style.overflow = 'auto'
+
+ let paths = []
+ for (const benchmarkPath of benchmarkPaths) {
+ if (fs.isDirectorySync(benchmarkPath)) {
+ paths = paths.concat(glob.sync(path.join(benchmarkPath, '**', '*.bench.js')))
+ } else {
+ paths.push(benchmarkPath)
+ }
+ }
+
+ while (paths.length > 0) {
+ const benchmark = require(paths.shift())({test})
+ let results
+ if (benchmark instanceof Promise) {
+ results = await benchmark
+ } else {
+ results = benchmark
+ }
+
+ const dataByBenchmarkName = {}
+ for (const {name, duration, x} of results) {
+ dataByBenchmarkName[name] = dataByBenchmarkName[name] || {points: []}
+ dataByBenchmarkName[name].points.push({x, y: duration})
+ }
+
+ const benchmarkContainer = document.createElement('div')
+ document.body.appendChild(benchmarkContainer)
+ for (const key in dataByBenchmarkName) {
+ const data = dataByBenchmarkName[key]
+ if (data.points.length > 1) {
+ const canvas = document.createElement('canvas')
+ benchmarkContainer.appendChild(canvas)
+ const chart = new Chart(canvas, {
+ type: 'line',
+ data: {
+ datasets: [{label: key, fill: false, data: data.points}]
+ },
+ options: {
+ showLines: false,
+ scales: {xAxes: [{type: 'linear', position: 'bottom'}]}
+ }
+ })
+
+ const textualOutput = `${key}:\n\n` + data.points.map((p) => `${p.x}\t${p.y}`).join('\n')
+ console.log(textualOutput)
+ } else {
+ const title = document.createElement('h2')
+ title.textContent = key
+ benchmarkContainer.appendChild(title)
+ const duration = document.createElement('p')
+ duration.textContent = `${data.points[0].y}ms`
+ benchmarkContainer.appendChild(duration)
+
+ const textualOutput = `${key}: ${data.points[0].y}`
+ console.log(textualOutput)
+ }
+
+ global.atom.reset()
+ }
+ }
+
+ return 0
+}
diff --git a/benchmarks/text-editor-large-file-construction.bench.js b/benchmarks/text-editor-large-file-construction.bench.js
new file mode 100644
index 000000000..0e92973f4
--- /dev/null
+++ b/benchmarks/text-editor-large-file-construction.bench.js
@@ -0,0 +1,19 @@
+/** @babel */
+
+import fs from 'fs'
+import temp from 'temp'
+import {TextEditor, TextBuffer} from 'atom'
+
+export default function ({test}) {
+ const text = 'Lorem ipsum dolor sit amet\n'.repeat(test ? 10 : 500000)
+ const t0 = window.performance.now()
+ const buffer = new TextBuffer(text)
+ const editor = new TextEditor({buffer, largeFileMode: true})
+ editor.element.style.height = "600px"
+ document.body.appendChild(editor.element)
+ const t1 = window.performance.now()
+ editor.element.remove()
+ editor.destroy()
+
+ return [{name: 'Opening and rendering a large file', duration: t1 - t0}]
+}
diff --git a/menus/darwin.cson b/menus/darwin.cson
index ccbb5f7a6..b967220c0 100644
--- a/menus/darwin.cson
+++ b/menus/darwin.cson
@@ -147,6 +147,7 @@
{ label: 'Open In Dev Modeā¦', command: 'application:open-dev' }
{ label: 'Reload Window', command: 'window:reload' }
{ label: 'Run Package Specs', command: 'window:run-package-specs' }
+ { label: 'Run Benchmarks', command: 'window:run-benchmarks' }
{ label: 'Toggle Developer Tools', command: 'window:toggle-dev-tools' }
]
}
diff --git a/package.json b/package.json
index 2590727f1..4a849125b 100644
--- a/package.json
+++ b/package.json
@@ -15,11 +15,12 @@
"electronVersion": "1.3.6",
"dependencies": {
"async": "0.2.6",
- "atom-keymap": "7.0.2",
+ "atom-keymap": "7.0.3",
"atom-ui": "0.4.1",
"babel-core": "5.8.38",
"cached-run-in-this-context": "0.4.1",
"chai": "3.5.0",
+ "chart.js": "^2.3.0",
"clear-cut": "^2.0.1",
"coffee-script": "1.11.1",
"color": "^0.7.3",
@@ -32,6 +33,7 @@
"fstream": "0.1.24",
"fuzzaldrin": "^2.1",
"git-utils": "^4.1.2",
+ "glob": "^7.1.1",
"grim": "1.5.0",
"jasmine-json": "~0.0",
"jasmine-tagged": "^1.1.4",
@@ -82,7 +84,7 @@
"autocomplete-atom-api": "0.10.0",
"autocomplete-css": "0.13.1",
"autocomplete-html": "0.7.2",
- "autocomplete-plus": "2.31.4",
+ "autocomplete-plus": "2.32.0",
"autocomplete-snippets": "1.11.0",
"autoflow": "0.27.0",
"autosave": "0.23.1",
@@ -115,7 +117,7 @@
"status-bar": "1.6.0",
"styleguide": "0.47.2",
"symbols-view": "0.113.1",
- "tabs": "0.102.2",
+ "tabs": "0.103.0",
"timecop": "0.33.2",
"tree-view": "0.210.0",
"update-package-dependencies": "0.10.0",
@@ -161,8 +163,6 @@
"test": "node script/test"
},
"standard": {
- "ignore": [],
- "parser": "babel-eslint",
"globals": [
"atom",
"afterEach",
diff --git a/resources/win/atom.cmd b/resources/win/atom.cmd
index 73c4ddb01..43ec8ebe3 100644
--- a/resources/win/atom.cmd
+++ b/resources/win/atom.cmd
@@ -5,14 +5,16 @@ SET WAIT=
SET PSARGS=%*
FOR %%a IN (%*) DO (
- IF /I "%%a"=="-f" SET EXPECT_OUTPUT=YES
- IF /I "%%a"=="--foreground" SET EXPECT_OUTPUT=YES
- IF /I "%%a"=="-h" SET EXPECT_OUTPUT=YES
- IF /I "%%a"=="--help" SET EXPECT_OUTPUT=YES
- IF /I "%%a"=="-t" SET EXPECT_OUTPUT=YES
- IF /I "%%a"=="--test" SET EXPECT_OUTPUT=YES
- IF /I "%%a"=="-v" SET EXPECT_OUTPUT=YES
- IF /I "%%a"=="--version" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="-f" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="--foreground" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="-h" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="--help" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="-t" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="--test" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="--benchmark" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="--benchmark-test" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="-v" SET EXPECT_OUTPUT=YES
+ IF /I "%%a"=="--version" SET EXPECT_OUTPUT=YES
IF /I "%%a"=="-w" (
SET EXPECT_OUTPUT=YES
SET WAIT=YES
diff --git a/script/lib/copy-assets.js b/script/lib/copy-assets.js
index 3c33ad13a..28357ee9a 100644
--- a/script/lib/copy-assets.js
+++ b/script/lib/copy-assets.js
@@ -12,6 +12,7 @@ const includePathInPackagedApp = require('./include-path-in-packaged-app')
module.exports = function () {
console.log(`Copying assets to ${CONFIG.intermediateAppPath}`);
let srcPaths = [
+ path.join(CONFIG.repositoryRootPath, 'benchmarks', 'benchmark-runner.js'),
path.join(CONFIG.repositoryRootPath, 'dot-atom'),
path.join(CONFIG.repositoryRootPath, 'exports'),
path.join(CONFIG.repositoryRootPath, 'node_modules'),
diff --git a/script/lib/transpile-babel-paths.js b/script/lib/transpile-babel-paths.js
index 0a987e124..3c440a4bd 100644
--- a/script/lib/transpile-babel-paths.js
+++ b/script/lib/transpile-babel-paths.js
@@ -25,6 +25,7 @@ module.exports = function () {
function getPathsToTranspile () {
let paths = []
+ paths = paths.concat(glob.sync(path.join(CONFIG.intermediateAppPath, 'benchmarks', '**', '*.js')))
paths = paths.concat(glob.sync(path.join(CONFIG.intermediateAppPath, 'exports', '**', '*.js')))
paths = paths.concat(glob.sync(path.join(CONFIG.intermediateAppPath, 'src', '**', '*.js')))
for (let packageName of Object.keys(CONFIG.appMetadata.packageDependencies)) {
diff --git a/script/package.json b/script/package.json
index 0ddc54928..1f4cf782f 100644
--- a/script/package.json
+++ b/script/package.json
@@ -4,7 +4,6 @@
"dependencies": {
"async": "2.0.1",
"babel-core": "5.8.38",
- "babel-eslint": "6.1.2",
"coffeelint": "1.15.7",
"colors": "1.1.2",
"csslint": "1.0.2",
@@ -24,7 +23,7 @@
"runas": "3.1.1",
"season": "5.3.0",
"semver": "5.3.0",
- "standard": "6.0.0",
+ "standard": "8.4.0",
"sync-request": "3.0.1",
"tello": "1.0.5",
"webdriverio": "2.4.5",
diff --git a/script/test b/script/test
index ccb5dce86..38568207f 100755
--- a/script/test
+++ b/script/test
@@ -83,9 +83,19 @@ for (let packageName in CONFIG.appMetadata.packageDependencies) {
})
}
+function runBenchmarkTests (callback) {
+ const benchmarksPath = path.join(CONFIG.repositoryRootPath, 'benchmarks')
+ const testArguments = ['--benchmark-test', benchmarksPath]
+
+ console.log('Executing benchmark tests'.bold.green)
+ const cp = childProcess.spawn(executablePath, testArguments, {stdio: 'inherit'})
+ cp.on('error', error => { callback(error) })
+ cp.on('close', exitCode => { callback(null, exitCode) })
+}
+
let testSuitesToRun
if (process.platform === 'darwin') {
- testSuitesToRun = [runCoreMainProcessTests, runCoreRenderProcessTests].concat(packageTestSuites)
+ testSuitesToRun = [runCoreMainProcessTests, runCoreRenderProcessTests, runBenchmarkTests].concat(packageTestSuites)
} else {
testSuitesToRun = [runCoreMainProcessTests]
}
diff --git a/spec/text-editor-registry-spec.js b/spec/text-editor-registry-spec.js
index 86bb71a6f..51027e63c 100644
--- a/spec/text-editor-registry-spec.js
+++ b/spec/text-editor-registry-spec.js
@@ -198,13 +198,13 @@ describe('TextEditorRegistry', function () {
registry.maintainConfig(editor2)
await initialPackageActivation
- expect(editor.getRootScopeDescriptor().getScopesArray()).toEqual(['text.plain'])
+ expect(editor.getRootScopeDescriptor().getScopesArray()).toEqual(['text.plain.null-grammar'])
expect(editor2.getRootScopeDescriptor().getScopesArray()).toEqual(['source.js'])
expect(editor.getEncoding()).toBe('utf8')
expect(editor2.getEncoding()).toBe('utf8')
- atom.config.set('core.fileEncoding', 'utf16le', {scopeSelector: '.text.plain'})
+ atom.config.set('core.fileEncoding', 'utf16le', {scopeSelector: '.text.plain.null-grammar'})
atom.config.set('core.fileEncoding', 'utf16be', {scopeSelector: '.source.js'})
expect(editor.getEncoding()).toBe('utf16le')
diff --git a/spec/token-iterator-spec.coffee b/spec/token-iterator-spec.coffee
index f876d30d1..6ae01cd30 100644
--- a/spec/token-iterator-spec.coffee
+++ b/spec/token-iterator-spec.coffee
@@ -29,7 +29,7 @@ describe "TokenIterator", ->
})
tokenizedBuffer.setGrammar(grammar)
- tokenIterator = tokenizedBuffer.tokenizedLineForRow(1).getTokenIterator()
+ tokenIterator = tokenizedBuffer.tokenizedLines[1].getTokenIterator()
tokenIterator.next()
expect(tokenIterator.getBufferStart()).toBe 0
diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee
index ad9fa0ee7..6558d42b4 100644
--- a/spec/tokenized-buffer-spec.coffee
+++ b/spec/tokenized-buffer-spec.coffee
@@ -1,3 +1,4 @@
+NullGrammar = require '../src/null-grammar'
TokenizedBuffer = require '../src/tokenized-buffer'
{Point} = TextBuffer = require 'text-buffer'
_ = require 'underscore-plus'
@@ -32,15 +33,8 @@ describe "TokenizedBuffer", ->
atom.packages.activatePackage('language-coffee-script')
it "deserializes it searching among the buffers in the current project", ->
- tokenizedBufferA = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBufferB = TokenizedBuffer.deserialize(
- JSON.parse(JSON.stringify(tokenizedBufferA.serialize())),
- atom
- )
-
+ tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2})
+ tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom)
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
describe "when the underlying buffer has no path", ->
@@ -48,25 +42,14 @@ describe "TokenizedBuffer", ->
buffer = atom.project.bufferForPathSync(null)
it "deserializes it searching among the buffers in the current project", ->
- tokenizedBufferA = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBufferB = TokenizedBuffer.deserialize(
- JSON.parse(JSON.stringify(tokenizedBufferA.serialize())),
- atom
- )
-
+ tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2})
+ tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom)
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
describe "when the buffer is destroyed", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
startTokenizing(tokenizedBuffer)
it "stops tokenization", ->
@@ -78,11 +61,7 @@ describe "TokenizedBuffer", ->
describe "when the buffer contains soft-tabs", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
startTokenizing(tokenizedBuffer)
afterEach ->
@@ -90,32 +69,29 @@ describe "TokenizedBuffer", ->
buffer.release()
describe "on construction", ->
- it "initially creates un-tokenized screen lines, then tokenizes lines chunk at a time in the background", ->
- line0 = tokenizedBuffer.tokenizedLineForRow(0)
- expect(line0.tokens).toEqual([value: line0.text, scopes: ['source.js']])
+ it "tokenizes lines chunk at a time in the background", ->
+ line0 = tokenizedBuffer.tokenizedLines[0]
+ expect(line0).toBeUndefined()
- line11 = tokenizedBuffer.tokenizedLineForRow(11)
- expect(line11.tokens).toEqual([value: " return sort(Array.apply(this, arguments));", scopes: ['source.js']])
-
- # background tokenization has not begun
- expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack).toBeUndefined()
+ line11 = tokenizedBuffer.tokenizedLines[11]
+ expect(line11).toBeUndefined()
# tokenize chunk 1
advanceClock()
- expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy()
- expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy()
- expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeFalsy()
+ expect(tokenizedBuffer.tokenizedLines[0].ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[4].ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
# tokenize chunk 2
advanceClock()
- expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy()
- expect(tokenizedBuffer.tokenizedLineForRow(9).ruleStack?).toBeTruthy()
- expect(tokenizedBuffer.tokenizedLineForRow(10).ruleStack?).toBeFalsy()
+ expect(tokenizedBuffer.tokenizedLines[5].ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[9].ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[10]).toBeUndefined()
# tokenize last chunk
advanceClock()
- expect(tokenizedBuffer.tokenizedLineForRow(10).ruleStack?).toBeTruthy()
- expect(tokenizedBuffer.tokenizedLineForRow(12).ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[10].ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[12].ruleStack?).toBeTruthy()
describe "when the buffer is partially tokenized", ->
beforeEach ->
@@ -152,8 +128,8 @@ describe "TokenizedBuffer", ->
it "does not attempt to tokenize the lines in the change, and preserves the existing invalid row", ->
expect(tokenizedBuffer.firstInvalidRow()).toBe 5
buffer.setTextInRange([[6, 0], [7, 0]], "\n\n\n")
- expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeFalsy()
- expect(tokenizedBuffer.tokenizedLineForRow(7).ruleStack?).toBeFalsy()
+ expect(tokenizedBuffer.tokenizedLines[6]).toBeUndefined()
+ expect(tokenizedBuffer.tokenizedLines[7]).toBeUndefined()
expect(tokenizedBuffer.firstInvalidRow()).toBe 5
describe "when the buffer is fully tokenized", ->
@@ -165,101 +141,101 @@ describe "TokenizedBuffer", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[0, 0], [2, 0]], "foo()\n7\n")
- expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.decimal.js'])
+ expect(tokenizedBuffer.tokenizedLines[0].tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js'])
+ expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.decimal.js'])
# line 2 is unchanged
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
+ expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
describe "when the change invalidates the tokenization of subsequent lines", ->
it "schedules the invalidated lines to be tokenized in the background", ->
buffer.insert([5, 30], '/* */')
buffer.insert([2, 0], '/*')
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js']
+ expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js']
advanceClock()
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
it "resumes highlighting with the state of the previous line", ->
buffer.insert([0, 0], '/*')
buffer.insert([5, 0], '*/')
buffer.insert([1, 0], 'var ')
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[1].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
describe "when lines are both updated and removed", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[1, 0], [3, 0]], "foo()")
# previous line 0 remains
- expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual(value: 'var', scopes: ['source.js', 'storage.type.var.js'])
+ expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual(value: 'var', scopes: ['source.js', 'storage.type.var.js'])
# previous line 3 should be combined with input to form line 1
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
+ expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
+ expect(tokenizedBuffer.tokenizedLines[1].tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
# lines below deleted regions should be shifted upward
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[1]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[1]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.comparison.js'])
+ expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js'])
+ expect(tokenizedBuffer.tokenizedLines[3].tokens[1]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
+ expect(tokenizedBuffer.tokenizedLines[4].tokens[1]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.comparison.js'])
describe "when the change invalidates the tokenization of subsequent lines", ->
it "schedules the invalidated lines to be tokenized in the background", ->
buffer.insert([5, 30], '/* */')
buffer.setTextInRange([[2, 0], [3, 0]], '/*')
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js']
+ expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
+ expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js']
advanceClock()
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
describe "when lines are both updated and inserted", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[1, 0], [2, 0]], "foo()\nbar()\nbaz()\nquux()")
# previous line 0 remains
- expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual( value: 'var', scopes: ['source.js', 'storage.type.var.js'])
+ expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual( value: 'var', scopes: ['source.js', 'storage.type.var.js'])
# 3 new lines inserted
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0]).toEqual(value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0]).toEqual(value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
+ expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
+ expect(tokenizedBuffer.tokenizedLines[2].tokens[0]).toEqual(value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
+ expect(tokenizedBuffer.tokenizedLines[3].tokens[0]).toEqual(value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
# previous line 2 is joined with quux() on line 4
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0]).toEqual(value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
+ expect(tokenizedBuffer.tokenizedLines[4].tokens[0]).toEqual(value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
+ expect(tokenizedBuffer.tokenizedLines[4].tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
# previous line 3 is pushed down to become line 5
- expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[3]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
+ expect(tokenizedBuffer.tokenizedLines[5].tokens[3]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
describe "when the change invalidates the tokenization of subsequent lines", ->
it "schedules the invalidated lines to be tokenized in the background", ->
buffer.insert([5, 30], '/* */')
buffer.insert([2, 0], '/*\nabcde\nabcder')
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js']
+ expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
+ expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js']
advanceClock() # tokenize invalidated lines in background
- expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(6).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(7).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(8).tokens[0].scopes).not.toBe ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[6].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[7].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLines[8].tokens[0].scopes).not.toBe ['source.js', 'comment.block.js']
describe "when there is an insertion that is larger than the chunk size", ->
it "tokenizes the initial chunk synchronously, then tokenizes the remaining lines in the background", ->
commentBlock = _.multiplyString("// a comment\n", tokenizedBuffer.chunkSize + 2)
buffer.insert([0, 0], commentBlock)
- expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy()
- expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy()
- expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeFalsy()
+ expect(tokenizedBuffer.tokenizedLines[0].ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[4].ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
advanceClock()
- expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy()
- expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[5].ruleStack?).toBeTruthy()
+ expect(tokenizedBuffer.tokenizedLines[6].ruleStack?).toBeTruthy()
it "does not break out soft tabs across a scope boundary", ->
waitsForPromise ->
@@ -284,11 +260,7 @@ describe "TokenizedBuffer", ->
runs ->
buffer = atom.project.bufferForPathSync('sample-with-tabs.coffee')
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.coffee'))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2})
startTokenizing(tokenizedBuffer)
afterEach ->
@@ -352,7 +324,6 @@ describe "TokenizedBuffer", ->
expect(tokenizedHandler.callCount).toBe(1)
it "retokenizes the buffer", ->
-
waitsForPromise ->
atom.packages.activatePackage('language-ruby-on-rails')
@@ -362,14 +333,10 @@ describe "TokenizedBuffer", ->
runs ->
buffer = atom.project.bufferForPathSync()
buffer.setText "
<%= User.find(2).full_name %>
"
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.selectGrammar('test.erb'))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.selectGrammar('test.erb'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
- {tokens} = tokenizedBuffer.tokenizedLineForRow(0)
+ {tokens} = tokenizedBuffer.tokenizedLines[0]
expect(tokens[0]).toEqual value: "", scopes: ["text.html.ruby"]
waitsForPromise ->
@@ -377,7 +344,7 @@ describe "TokenizedBuffer", ->
runs ->
fullyTokenize(tokenizedBuffer)
- {tokens} = tokenizedBuffer.tokenizedLineForRow(0)
+ {tokens} = tokenizedBuffer.tokenizedLines[0]
expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby", "meta.tag.block.any.html", "punctuation.definition.tag.begin.html"]
describe ".tokenForPosition(position)", ->
@@ -387,11 +354,7 @@ describe "TokenizedBuffer", ->
it "returns the correct token (regression)", ->
buffer = atom.project.bufferForPathSync('sample.js')
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenForPosition([1, 0]).scopes).toEqual ["source.js"]
expect(tokenizedBuffer.tokenForPosition([1, 1]).scopes).toEqual ["source.js"]
@@ -400,16 +363,12 @@ describe "TokenizedBuffer", ->
describe ".bufferRangeForScopeAtPosition(selector, position)", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
describe "when the selector does not match the token at the position", ->
it "returns a falsy value", ->
- expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.bogus', [0, 1])).toBeFalsy()
+ expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.bogus', [0, 1])).toBeUndefined()
describe "when the selector matches a single token at the position", ->
it "returns the range covered by the token", ->
@@ -423,11 +382,7 @@ describe "TokenizedBuffer", ->
describe ".indentLevelForRow(row)", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
describe "when the line is non-empty", ->
@@ -469,7 +424,7 @@ describe "TokenizedBuffer", ->
buffer.insert([12, 0], ' ')
expect(tokenizedBuffer.indentLevelForRow(13)).toBe 2
- expect(tokenizedBuffer.tokenizedLineForRow(14)).not.toBeDefined()
+ expect(tokenizedBuffer.tokenizedLines[14]).not.toBeDefined()
it "updates the indentLevel of empty lines surrounding a change that inserts lines", ->
buffer.insert([7, 0], '\n\n')
@@ -503,11 +458,7 @@ describe "TokenizedBuffer", ->
buffer = atom.project.bufferForPathSync('sample.js')
buffer.insert [10, 0], " // multi-line\n // comment\n // block\n"
buffer.insert [0, 0], "// multi-line\n// comment\n// block\n"
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
it "includes the first line of multi-line comments", ->
@@ -570,40 +521,69 @@ describe "TokenizedBuffer", ->
expect(tokenizedBuffer.isFoldableAtRow(7)).toBe false
expect(tokenizedBuffer.isFoldableAtRow(8)).toBe false
+ describe "::tokenizedLineForRow(row)", ->
+ it "returns the tokenized line for a row, or a placeholder line if it hasn't been tokenized yet", ->
+ buffer = atom.project.bufferForPathSync('sample.js')
+ grammar = atom.grammars.grammarForScopeName('source.js')
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
+ line0 = buffer.lineForRow(0)
+
+ jsScopeStartId = grammar.startIdForScope(grammar.scopeName)
+ jsScopeEndId = grammar.endIdForScope(grammar.scopeName)
+ startTokenizing(tokenizedBuffer)
+ expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
+ expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
+ expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([jsScopeStartId, line0.length, jsScopeEndId])
+ advanceClock(1)
+ expect(tokenizedBuffer.tokenizedLines[0]).not.toBeUndefined()
+ expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
+ expect(tokenizedBuffer.tokenizedLineForRow(0).tags).not.toEqual([jsScopeStartId, line0.length, jsScopeEndId])
+
+ nullScopeStartId = NullGrammar.startIdForScope(NullGrammar.scopeName)
+ nullScopeEndId = NullGrammar.endIdForScope(NullGrammar.scopeName)
+ tokenizedBuffer.setGrammar(NullGrammar)
+ startTokenizing(tokenizedBuffer)
+ expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
+ expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
+ expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId])
+ advanceClock(1)
+ expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
+ expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId])
+
+ it "returns undefined if the requested row is outside the buffer range", ->
+ buffer = atom.project.bufferForPathSync('sample.js')
+ grammar = atom.grammars.grammarForScopeName('source.js')
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
+ fullyTokenize(tokenizedBuffer)
+ expect(tokenizedBuffer.tokenizedLineForRow(999)).toBeUndefined()
+
describe "when the buffer is configured with the null grammar", ->
- it "uses the placeholder tokens and does not actually tokenize using the grammar", ->
- spyOn(atom.grammars.nullGrammar, 'tokenizeLine').andCallThrough()
+ it "does not actually tokenize using the grammar", ->
+ spyOn(NullGrammar, 'tokenizeLine').andCallThrough()
buffer = atom.project.bufferForPathSync('sample.will-use-the-null-grammar')
buffer.setText('a\nb\nc')
-
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
+ tokenizedBuffer = new TokenizedBuffer({buffer, tabLength: 2})
tokenizeCallback = jasmine.createSpy('onDidTokenize')
tokenizedBuffer.onDidTokenize(tokenizeCallback)
+ expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
+ expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
+ expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
+ expect(tokenizeCallback.callCount).toBe(0)
+ expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
+
fullyTokenize(tokenizedBuffer)
-
- expect(tokenizeCallback.callCount).toBe 1
- expect(atom.grammars.nullGrammar.tokenizeLine.callCount).toBe 0
-
- expect(tokenizedBuffer.tokenizedLineForRow(0).tokens.length).toBe 1
- expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0].value).toBe 'a'
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens.length).toBe 1
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].value).toBe 'b'
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens.length).toBe 1
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].value).toBe 'c'
+ expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
+ expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
+ expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
+ expect(tokenizeCallback.callCount).toBe(0)
+ expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
describe "text decoration layer API", ->
describe "iterator", ->
it "iterates over the syntactic scope boundaries", ->
buffer = new TextBuffer(text: "var foo = 1 /*\nhello*/var bar = 2\n")
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.selectGrammar(".js"))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName("source.js"), tabLength: 2})
fullyTokenize(tokenizedBuffer)
iterator = tokenizedBuffer.buildIterator()
@@ -655,11 +635,7 @@ describe "TokenizedBuffer", ->
runs ->
buffer = new TextBuffer(text: "# hello\n# world")
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(atom.grammars.selectGrammar(".coffee"))
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName("source.coffee"), tabLength: 2})
fullyTokenize(tokenizedBuffer)
iterator = tokenizedBuffer.buildIterator()
@@ -688,11 +664,7 @@ describe "TokenizedBuffer", ->
})
buffer = new TextBuffer(text: 'start x\nend x\nx')
- tokenizedBuffer = new TokenizedBuffer({
- buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
- assert: atom.assert, tabLength: 2,
- })
- tokenizedBuffer.setGrammar(grammar)
+ tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
fullyTokenize(tokenizedBuffer)
iterator = tokenizedBuffer.buildIterator()
diff --git a/src/initialize-benchmark-window.js b/src/initialize-benchmark-window.js
new file mode 100644
index 000000000..e4be4420b
--- /dev/null
+++ b/src/initialize-benchmark-window.js
@@ -0,0 +1,111 @@
+/** @babel */
+
+import {remote} from 'electron'
+import path from 'path'
+import ipcHelpers from './ipc-helpers'
+import util from 'util'
+
+export default async function () {
+ const {getWindowLoadSettings} = require('./window-load-settings-helpers')
+ const {test, headless, resourcePath, benchmarkPaths} = getWindowLoadSettings()
+ try {
+ const Clipboard = require('../src/clipboard')
+ const ApplicationDelegate = require('../src/application-delegate')
+ const AtomEnvironment = require('../src/atom-environment')
+ const TextEditor = require('../src/text-editor')
+
+ const exportsPath = path.join(resourcePath, 'exports')
+ require('module').globalPaths.push(exportsPath) // Add 'exports' to module search path.
+ process.env.NODE_PATH = exportsPath // Set NODE_PATH env variable since tasks may need it.
+
+ document.title = 'Benchmarks'
+ // Allow `document.title` to be assigned in benchmarks without actually changing the window title.
+ let documentTitle = null
+ Object.defineProperty(document, 'title', {
+ get () { return documentTitle },
+ set (title) { documentTitle = title }
+ })
+
+ window.addEventListener('keydown', (event) => {
+ // Reload: cmd-r / ctrl-r
+ if ((event.metaKey || event.ctrlKey) && event.keyCode === 82) {
+ ipcHelpers.call('window-method', 'reload')
+ }
+
+ // Toggle Dev Tools: cmd-alt-i (Mac) / ctrl-shift-i (Linux/Windows)
+ if (event.keyCode === 73) {
+ const isDarwin = process.platform === 'darwin'
+ if ((isDarwin && event.metaKey && event.altKey) || (!isDarwin && event.ctrlKey && event.shiftKey)) {
+ ipcHelpers.call('window-method', 'toggleDevTools')
+ }
+ }
+
+ // Close: cmd-w / ctrl-w
+ if ((event.metaKey || event.ctrlKey) && event.keyCode === 87) {
+ ipcHelpers.call('window-method', 'close')
+ }
+
+ // Copy: cmd-c / ctrl-c
+ if ((event.metaKey || event.ctrlKey) && event.keyCode === 67) {
+ ipcHelpers.call('window-method', 'copy')
+ }
+ }, true)
+
+ const clipboard = new Clipboard()
+ TextEditor.setClipboard(clipboard)
+
+ const applicationDelegate = new ApplicationDelegate()
+ global.atom = new AtomEnvironment({
+ applicationDelegate,
+ window,
+ document,
+ clipboard,
+ configDirPath: process.env.ATOM_HOME,
+ enablePersistence: false
+ })
+
+ // Prevent benchmarks from modifying application menus
+ global.atom.menu.sendToBrowserProcess = function () { }
+
+ if (headless) {
+ Object.defineProperties(process, {
+ stdout: { value: remote.process.stdout },
+ stderr: { value: remote.process.stderr }
+ })
+
+ console.log = function (...args) {
+ const formatted = util.format(...args)
+ process.stdout.write(formatted + '\n')
+ }
+ console.warn = function (...args) {
+ const formatted = util.format(...args)
+ process.stderr.write(formatted + '\n')
+ }
+ console.error = function (...args) {
+ const formatted = util.format(...args)
+ process.stderr.write(formatted + '\n')
+ }
+ } else {
+ remote.getCurrentWindow().show()
+ }
+
+ const benchmarkRunner = require('../benchmarks/benchmark-runner')
+ const statusCode = await benchmarkRunner({test, benchmarkPaths})
+ if (headless) {
+ exitWithStatusCode(statusCode)
+ }
+ } catch (error) {
+ if (headless) {
+ console.error(error.stack || error)
+ exitWithStatusCode(1)
+ } else {
+ ipcHelpers.call('window-method', 'openDevTools')
+ throw error
+ }
+ }
+}
+
+function exitWithStatusCode (statusCode) {
+ remote.app.emit('will-quit')
+ remote.process.exit(statusCode)
+}
diff --git a/src/language-mode.coffee b/src/language-mode.coffee
index ad038d7db..bb9f339c4 100644
--- a/src/language-mode.coffee
+++ b/src/language-mode.coffee
@@ -148,19 +148,19 @@ class LanguageMode
rowRange
rowRangeForCommentAtBufferRow: (bufferRow) ->
- return unless @editor.tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment()
+ return unless @editor.tokenizedBuffer.tokenizedLines[bufferRow]?.isComment()
startRow = bufferRow
endRow = bufferRow
if bufferRow > 0
for currentRow in [bufferRow-1..0] by -1
- break unless @editor.tokenizedBuffer.tokenizedLineForRow(currentRow).isComment()
+ break unless @editor.tokenizedBuffer.tokenizedLines[currentRow]?.isComment()
startRow = currentRow
if bufferRow < @buffer.getLastRow()
for currentRow in [bufferRow+1..@buffer.getLastRow()] by 1
- break unless @editor.tokenizedBuffer.tokenizedLineForRow(currentRow).isComment()
+ break unless @editor.tokenizedBuffer.tokenizedLines[currentRow]?.isComment()
endRow = currentRow
return [startRow, endRow] if startRow isnt endRow
@@ -189,7 +189,7 @@ class LanguageMode
# row is a comment.
isLineCommentedAtBufferRow: (bufferRow) ->
return false unless 0 <= bufferRow <= @editor.getLastBufferRow()
- @editor.tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment()
+ @editor.tokenizedBuffer.tokenizedLines[bufferRow]?.isComment()
# Find a row range for a 'paragraph' around specified bufferRow. A paragraph
# is a block of text bounded by and empty line or a block of text that is not
@@ -246,10 +246,7 @@ class LanguageMode
@suggestedIndentForTokenizedLineAtBufferRow(bufferRow, line, tokenizedLine, options)
suggestedIndentForLineAtBufferRow: (bufferRow, line, options) ->
- if @editor.largeFileMode or @editor.tokenizedBuffer.grammar is NullGrammar
- tokenizedLine = @editor.tokenizedBuffer.buildPlaceholderTokenizedLineForRowWithText(bufferRow, line)
- else
- tokenizedLine = @editor.tokenizedBuffer.buildTokenizedLineForRowWithText(bufferRow, line)
+ tokenizedLine = @editor.tokenizedBuffer.buildTokenizedLineForRowWithText(bufferRow, line)
@suggestedIndentForTokenizedLineAtBufferRow(bufferRow, line, tokenizedLine, options)
suggestedIndentForTokenizedLineAtBufferRow: (bufferRow, line, tokenizedLine, options) ->
diff --git a/src/main-process/atom-application.coffee b/src/main-process/atom-application.coffee
index 21f138eed..aaceebffe 100644
--- a/src/main-process/atom-application.coffee
+++ b/src/main-process/atom-application.coffee
@@ -42,7 +42,7 @@ class AtomApplication
# take a few seconds to trigger 'error' event, it could be a bug of node
# or atom-shell, before it's fixed we check the existence of socketPath to
# speedup startup.
- if (process.platform isnt 'win32' and not fs.existsSync options.socketPath) or options.test
+ if (process.platform isnt 'win32' and not fs.existsSync options.socketPath) or options.test or options.benchmark or options.benchmarkTest
new AtomApplication(options).initialize(options)
return
@@ -64,7 +64,7 @@ class AtomApplication
constructor: (options) ->
{@resourcePath, @devResourcePath, @version, @devMode, @safeMode, @socketPath, @logFile, @setPortable, @userDataDir} = options
- @socketPath = null if options.test
+ @socketPath = null if options.test or options.benchmark or options.benchmarkTest
@pidsToOpenWindows = {}
@windows = []
@@ -86,7 +86,9 @@ class AtomApplication
@config.onDidChange 'core.useCustomTitleBar', @promptForRestart.bind(this)
- @autoUpdateManager = new AutoUpdateManager(@version, options.test, @resourcePath, @config)
+ @autoUpdateManager = new AutoUpdateManager(
+ @version, options.test or options.benchmark or options.benchmarkTest, @resourcePath, @config
+ )
@applicationMenu = new ApplicationMenu(@version, @autoUpdateManager)
@atomProtocolHandler = new AtomProtocolHandler(@resourcePath, @safeMode)
@@ -103,23 +105,41 @@ class AtomApplication
Promise.all(windowsClosePromises).then(=> @disposable.dispose())
launch: (options) ->
- if options.pathsToOpen?.length > 0 or options.urlsToOpen?.length > 0 or options.test
+ if options.pathsToOpen?.length > 0 or options.urlsToOpen?.length > 0 or options.test or options.benchmark or options.benchmarkTest
@openWithOptions(options)
else
@loadState(options) or @openPath(options)
- openWithOptions: ({initialPaths, pathsToOpen, executedFrom, urlsToOpen, test, pidToKillWhenClosed, devMode, safeMode, newWindow, logFile, profileStartup, timeout, clearWindowState, addToLastWindow, env}) ->
+ openWithOptions: (options) ->
+ {
+ initialPaths, pathsToOpen, executedFrom, urlsToOpen, benchmark,
+ benchmarkTest, test, pidToKillWhenClosed, devMode, safeMode, newWindow,
+ logFile, profileStartup, timeout, clearWindowState, addToLastWindow, env
+ } = options
+
app.focus()
if test
- @runTests({headless: true, devMode, @resourcePath, executedFrom, pathsToOpen, logFile, timeout, env})
+ @runTests({
+ headless: true, devMode, @resourcePath, executedFrom, pathsToOpen,
+ logFile, timeout, env
+ })
+ else if benchmark or benchmarkTest
+ @runBenchmarks({headless: true, test: benchmarkTest, @resourcePath, executedFrom, pathsToOpen, timeout, env})
else if pathsToOpen.length > 0
- @openPaths({initialPaths, pathsToOpen, executedFrom, pidToKillWhenClosed, newWindow, devMode, safeMode, profileStartup, clearWindowState, addToLastWindow, env})
+ @openPaths({
+ initialPaths, pathsToOpen, executedFrom, pidToKillWhenClosed, newWindow,
+ devMode, safeMode, profileStartup, clearWindowState, addToLastWindow, env
+ })
else if urlsToOpen.length > 0
- @openUrl({urlToOpen, devMode, safeMode, env}) for urlToOpen in urlsToOpen
+ for urlToOpen in urlsToOpen
+ @openUrl({urlToOpen, devMode, safeMode, env})
else
# Always open a editor window if this is the first instance of Atom.
- @openPath({initialPaths, pidToKillWhenClosed, newWindow, devMode, safeMode, profileStartup, clearWindowState, addToLastWindow, env})
+ @openPath({
+ initialPaths, pidToKillWhenClosed, newWindow, devMode, safeMode, profileStartup,
+ clearWindowState, addToLastWindow, env
+ })
# Public: Removes the {AtomWindow} from the global window list.
removeWindow: (window) ->
@@ -280,6 +300,9 @@ class AtomApplication
@disposable.add ipcHelpers.on ipcMain, 'run-package-specs', (event, packageSpecPath) =>
@runTests({resourcePath: @devResourcePath, pathsToOpen: [packageSpecPath], headless: false})
+ @disposable.add ipcHelpers.on ipcMain, 'run-benchmarks', (event, benchmarksPath) =>
+ @runBenchmarks({resourcePath: @devResourcePath, pathsToOpen: [benchmarksPath], headless: false, test: false})
+
@disposable.add ipcHelpers.on ipcMain, 'command', (event, command) =>
@emit(command)
@@ -651,6 +674,29 @@ class AtomApplication
safeMode ?= false
new AtomWindow(this, @fileRecoveryService, {windowInitializationScript, resourcePath, headless, isSpec, devMode, testRunnerPath, legacyTestRunnerPath, testPaths, logFile, safeMode, env})
+ runBenchmarks: ({headless, test, resourcePath, executedFrom, pathsToOpen, env}) ->
+ if resourcePath isnt @resourcePath and not fs.existsSync(resourcePath)
+ resourcePath = @resourcePath
+
+ try
+ windowInitializationScript = require.resolve(path.resolve(@devResourcePath, 'src', 'initialize-benchmark-window'))
+ catch error
+ windowInitializationScript = require.resolve(path.resolve(__dirname, '..', '..', 'src', 'initialize-benchmark-window'))
+
+ benchmarkPaths = []
+ if pathsToOpen?
+ for pathToOpen in pathsToOpen
+ benchmarkPaths.push(path.resolve(executedFrom, fs.normalize(pathToOpen)))
+
+ if benchmarkPaths.length is 0
+ process.stderr.write 'Error: Specify at least one benchmark path.\n\n'
+ process.exit(1)
+
+ devMode = true
+ isSpec = true
+ safeMode = false
+ new AtomWindow(this, @fileRecoveryService, {windowInitializationScript, resourcePath, headless, test, isSpec, devMode, benchmarkPaths, safeMode, env})
+
resolveTestRunnerPath: (testPath) ->
FindParentDir ?= require 'find-parent-dir'
diff --git a/src/main-process/main.js b/src/main-process/main.js
index 28871b661..7ccd1a6c3 100644
--- a/src/main-process/main.js
+++ b/src/main-process/main.js
@@ -19,7 +19,7 @@ if (args.resourcePath) {
const stableResourcePath = path.dirname(path.dirname(__dirname))
const defaultRepositoryPath = path.join(electron.app.getPath('home'), 'github', 'atom')
- if (args.dev || args.test) {
+ if (args.dev || args.test || args.benchmark || args.benchmarkTest) {
if (process.env.ATOM_DEV_RESOURCE_PATH) {
resourcePath = process.env.ATOM_DEV_RESOURCE_PATH
} else if (fs.statSyncNoException(defaultRepositoryPath)) {
diff --git a/src/main-process/parse-command-line.js b/src/main-process/parse-command-line.js
index 07b2b49f8..68a18fa30 100644
--- a/src/main-process/parse-command-line.js
+++ b/src/main-process/parse-command-line.js
@@ -45,6 +45,8 @@ module.exports = function parseCommandLine (processArgs) {
'portable',
'Set portable mode. Copies the ~/.atom folder to be a sibling of the installed Atom location if a .atom folder is not already there.'
)
+ options.boolean('benchmark').describe('benchmark', 'Open a new window that runs the specified benchmarks.')
+ options.boolean('benchmark-test').describe('benchmark--test', 'Run a faster version of the benchmarks in headless mode.')
options.alias('t', 'test').boolean('t').describe('t', 'Run the specified specs and exit with error code on failures.')
options.alias('m', 'main-process').boolean('m').describe('m', 'Run the specified specs in the main process.')
options.string('timeout').describe(
@@ -78,6 +80,8 @@ module.exports = function parseCommandLine (processArgs) {
const addToLastWindow = args['add']
const safeMode = args['safe']
const pathsToOpen = args._
+ const benchmark = args['benchmark']
+ const benchmarkTest = args['benchmark-test']
const test = args['test']
const mainProcess = args['main-process']
const timeout = args['timeout']
@@ -132,10 +136,29 @@ module.exports = function parseCommandLine (processArgs) {
devResourcePath = normalizeDriveLetterName(devResourcePath)
return {
- resourcePath, devResourcePath, pathsToOpen, urlsToOpen, executedFrom, test,
- version, pidToKillWhenClosed, devMode, safeMode, newWindow, logFile, socketPath,
- userDataDir, profileStartup, timeout, setPortable, clearWindowState,
- addToLastWindow, mainProcess, env: process.env
+ resourcePath,
+ devResourcePath,
+ pathsToOpen,
+ urlsToOpen,
+ executedFrom,
+ test,
+ version,
+ pidToKillWhenClosed,
+ devMode,
+ safeMode,
+ newWindow,
+ logFile,
+ socketPath,
+ userDataDir,
+ profileStartup,
+ timeout,
+ setPortable,
+ clearWindowState,
+ addToLastWindow,
+ mainProcess,
+ benchmark,
+ benchmarkTest,
+ env: process.env
}
}
diff --git a/src/main-process/start.js b/src/main-process/start.js
index 125f3ed54..84ae9b8c2 100644
--- a/src/main-process/start.js
+++ b/src/main-process/start.js
@@ -57,7 +57,7 @@ module.exports = function start (resourcePath, startTime) {
if (args.userDataDir != null) {
app.setPath('userData', args.userDataDir)
- } else if (args.test) {
+ } else if (args.test || args.benchmark || args.benchmarkTest) {
app.setPath('userData', temp.mkdirSync('atom-test-data'))
}
diff --git a/src/native-compile-cache.js b/src/native-compile-cache.js
index 50fa71fc3..a9857fc0c 100644
--- a/src/native-compile-cache.js
+++ b/src/native-compile-cache.js
@@ -45,7 +45,7 @@ class NativeCompileCache {
Module.prototype._compile = function (content, filename) {
let moduleSelf = this
// remove shebang
- content = content.replace(/^\#\!.*/, '')
+ content = content.replace(/^#!.*/, '')
function require (path) {
return moduleSelf.require(path)
}
diff --git a/src/null-grammar.js b/src/null-grammar.js
index 0ca3f83f1..fe9c3889e 100644
--- a/src/null-grammar.js
+++ b/src/null-grammar.js
@@ -2,12 +2,39 @@
import {Disposable} from 'event-kit'
-export default Object.freeze({
+export default {
name: 'Null Grammar',
- scopeName: 'text.plain',
+ scopeName: 'text.plain.null-grammar',
+ scopeForId (id) {
+ if (id === -1 || id === -2) {
+ return this.scopeName
+ } else {
+ return null
+ }
+ },
+ startIdForScope (scopeName) {
+ if (scopeName === this.scopeName) {
+ return -1
+ } else {
+ return null
+ }
+ },
+ endIdForScope (scopeName) {
+ if (scopeName === this.scopeName) {
+ return -2
+ } else {
+ return null
+ }
+ },
+ tokenizeLine (text) {
+ return {
+ tags: [this.startIdForScope(this.scopeName), text.length, this.endIdForScope(this.scopeName)],
+ ruleStack: null
+ }
+ },
onDidUpdate (callback) {
return new Disposable(noop)
}
-})
+}
function noop () {}
diff --git a/src/register-default-commands.coffee b/src/register-default-commands.coffee
index 3ba50d497..8196d9237 100644
--- a/src/register-default-commands.coffee
+++ b/src/register-default-commands.coffee
@@ -54,6 +54,7 @@ module.exports = ({commandRegistry, commandInstaller, config, notificationManage
'application:open-your-stylesheet': -> ipcRenderer.send('command', 'application:open-your-stylesheet')
'application:open-license': -> @getModel().openLicense()
'window:run-package-specs': -> @runPackageSpecs()
+ 'window:run-benchmarks': -> @runBenchmarks()
'window:focus-next-pane': -> @getModel().activateNextPane()
'window:focus-previous-pane': -> @getModel().activatePreviousPane()
'window:focus-pane-above': -> @focusPaneViewAbove()
diff --git a/src/text-editor-presenter.coffee b/src/text-editor-presenter.coffee
index 580229b7a..bbefc91c0 100644
--- a/src/text-editor-presenter.coffee
+++ b/src/text-editor-presenter.coffee
@@ -9,7 +9,7 @@ class TextEditorPresenter
startBlinkingCursorsAfterDelay: null
stoppedScrollingTimeoutId: null
mouseWheelScreenRow: null
- overlayDimensions: {}
+ overlayDimensions: null
minimumReflowInterval: 200
constructor: (params) ->
@@ -31,6 +31,7 @@ class TextEditorPresenter
@lineDecorationsByScreenRow = {}
@lineNumberDecorationsByScreenRow = {}
@customGutterDecorationsByGutterName = {}
+ @overlayDimensions = {}
@observedBlockDecorations = new Set()
@invalidatedDimensionsByBlockDecoration = new Set()
@invalidateAllBlockDecorationsDimensions = false
diff --git a/src/text-editor.coffee b/src/text-editor.coffee
index 816db07fe..6907db8fe 100644
--- a/src/text-editor.coffee
+++ b/src/text-editor.coffee
@@ -2887,7 +2887,7 @@ class TextEditor extends Model
# whitespace.
usesSoftTabs: ->
for bufferRow in [0..@buffer.getLastRow()]
- continue if @tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment()
+ continue if @tokenizedBuffer.tokenizedLines[bufferRow]?.isComment()
line = @buffer.lineForRow(bufferRow)
return true if line[0] is ' '
diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee
index 80358f23d..ce56e0388 100644
--- a/src/tokenized-buffer.coffee
+++ b/src/tokenized-buffer.coffee
@@ -36,7 +36,6 @@ class TokenizedBuffer extends Model
@tokenIterator = new TokenIterator(this)
@disposables.add @buffer.registerTextDecorationLayer(this)
- @rootScopeDescriptor = new ScopeDescriptor(scopes: ['text.plain'])
@setGrammar(grammar ? NullGrammar)
@@ -95,14 +94,17 @@ class TokenizedBuffer extends Model
false
retokenizeLines: ->
- lastRow = @buffer.getLastRow()
@fullyTokenized = false
- @tokenizedLines = new Array(lastRow + 1)
+ @tokenizedLines = new Array(@buffer.getLineCount())
@invalidRows = []
- @invalidateRow(0)
+ if @largeFileMode or @grammar.name is 'Null Grammar'
+ @markTokenizationComplete()
+ else
+ @invalidateRow(0)
setVisible: (@visible) ->
- @tokenizeInBackground() if @visible
+ if @visible and @grammar.name isnt 'Null Grammar' and not @largeFileMode
+ @tokenizeInBackground()
getTabLength: -> @tabLength
@@ -117,12 +119,6 @@ class TokenizedBuffer extends Model
@tokenizeNextChunk() if @isAlive() and @buffer.isAlive()
tokenizeNextChunk: ->
- # Short circuit null grammar which can just use the placeholder tokens
- if (@grammar.name is 'Null Grammar') and @firstInvalidRow()?
- @invalidRows = []
- @markTokenizationComplete()
- return
-
rowsRemaining = @chunkSize
while @firstInvalidRow()? and rowsRemaining > 0
@@ -167,8 +163,6 @@ class TokenizedBuffer extends Model
return
invalidateRow: (row) ->
- return if @largeFileMode
-
@invalidRows.push(row)
@invalidRows.sort (a, b) -> a - b
@tokenizeInBackground()
@@ -189,18 +183,19 @@ class TokenizedBuffer extends Model
start = oldRange.start.row
end = oldRange.end.row
delta = newRange.end.row - oldRange.end.row
+ oldLineCount = oldRange.end.row - oldRange.start.row + 1
+ newLineCount = newRange.end.row - newRange.start.row + 1
@updateInvalidRows(start, end, delta)
previousEndStack = @stackForRow(end) # used in spill detection below
- if @largeFileMode or @grammar is NullGrammar
- newTokenizedLines = @buildPlaceholderTokenizedLinesForRows(start, end + delta)
+ if @largeFileMode or @grammar.name is 'Null Grammar'
+ _.spliceWithArray(@tokenizedLines, start, oldLineCount, new Array(newLineCount))
else
newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start))
- _.spliceWithArray(@tokenizedLines, start, end - start + 1, newTokenizedLines)
-
- newEndStack = @stackForRow(end + delta)
- if newEndStack and not _.isEqual(newEndStack, previousEndStack)
- @invalidateRow(end + delta + 1)
+ _.spliceWithArray(@tokenizedLines, start, oldLineCount, newTokenizedLines)
+ newEndStack = @stackForRow(end + delta)
+ if newEndStack and not _.isEqual(newEndStack, previousEndStack)
+ @invalidateRow(end + delta + 1)
isFoldableAtRow: (row) ->
if @largeFileMode
@@ -211,46 +206,39 @@ class TokenizedBuffer extends Model
# Returns a {Boolean} indicating whether the given buffer row starts
# a a foldable row range due to the code's indentation patterns.
isFoldableCodeAtRow: (row) ->
- # Investigating an exception that's occurring here due to the line being
- # undefined. This should paper over the problem but we want to figure out
- # what is happening:
- tokenizedLine = @tokenizedLineForRow(row)
- @assert tokenizedLine?, "TokenizedLine is undefined", (error) =>
- error.metadata = {
- row: row
- rowCount: @tokenizedLines.length
- tokenizedBufferChangeCount: @changeCount
- bufferChangeCount: @buffer.changeCount
- }
-
- return false unless tokenizedLine?
-
- return false if @buffer.isRowBlank(row) or tokenizedLine.isComment()
- nextRow = @buffer.nextNonBlankRow(row)
- return false unless nextRow?
-
- @indentLevelForRow(nextRow) > @indentLevelForRow(row)
+ if 0 <= row <= @buffer.getLastRow()
+ nextRow = @buffer.nextNonBlankRow(row)
+ tokenizedLine = @tokenizedLines[row]
+ if @buffer.isRowBlank(row) or tokenizedLine?.isComment() or not nextRow?
+ false
+ else
+ @indentLevelForRow(nextRow) > @indentLevelForRow(row)
+ else
+ false
isFoldableCommentAtRow: (row) ->
previousRow = row - 1
nextRow = row + 1
- return false if nextRow > @buffer.getLastRow()
-
- (row is 0 or not @tokenizedLineForRow(previousRow).isComment()) and
- @tokenizedLineForRow(row).isComment() and
- @tokenizedLineForRow(nextRow).isComment()
+ if nextRow > @buffer.getLastRow()
+ false
+ else
+ Boolean(
+ not (@tokenizedLines[previousRow]?.isComment()) and
+ @tokenizedLines[row]?.isComment() and
+ @tokenizedLines[nextRow]?.isComment()
+ )
buildTokenizedLinesForRows: (startRow, endRow, startingStack, startingopenScopes) ->
ruleStack = startingStack
openScopes = startingopenScopes
stopTokenizingAt = startRow + @chunkSize
- tokenizedLines = for row in [startRow..endRow]
+ tokenizedLines = for row in [startRow..endRow] by 1
if (ruleStack or row is 0) and row < stopTokenizingAt
tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes)
ruleStack = tokenizedLine.ruleStack
openScopes = @scopesFromTags(openScopes, tokenizedLine.tags)
else
- tokenizedLine = @buildPlaceholderTokenizedLineForRow(row, openScopes)
+ tokenizedLine = undefined
tokenizedLine
if endRow >= stopTokenizingAt
@@ -259,21 +247,6 @@ class TokenizedBuffer extends Model
tokenizedLines
- buildPlaceholderTokenizedLinesForRows: (startRow, endRow) ->
- @buildPlaceholderTokenizedLineForRow(row) for row in [startRow..endRow] by 1
-
- buildPlaceholderTokenizedLineForRow: (row) ->
- @buildPlaceholderTokenizedLineForRowWithText(row, @buffer.lineForRow(row))
-
- buildPlaceholderTokenizedLineForRowWithText: (row, text) ->
- if @grammar isnt NullGrammar
- openScopes = [@grammar.startIdForScope(@grammar.scopeName)]
- else
- openScopes = []
- tags = [text.length]
- lineEnding = @buffer.lineEndingForRow(row)
- new TokenizedLine({openScopes, text, tags, lineEnding, @tokenIterator})
-
buildTokenizedLineForRow: (row, ruleStack, openScopes) ->
@buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes)
@@ -283,8 +256,14 @@ class TokenizedBuffer extends Model
new TokenizedLine({openScopes, text, tags, ruleStack, lineEnding, @tokenIterator})
tokenizedLineForRow: (bufferRow) ->
- if 0 <= bufferRow < @tokenizedLines.length
- @tokenizedLines[bufferRow] ?= @buildPlaceholderTokenizedLineForRow(bufferRow)
+ if 0 <= bufferRow <= @buffer.getLastRow()
+ if tokenizedLine = @tokenizedLines[bufferRow]
+ tokenizedLine
+ else
+ text = @buffer.lineForRow(bufferRow)
+ lineEnding = @buffer.lineEndingForRow(bufferRow)
+ tags = [@grammar.startIdForScope(@grammar.scopeName), text.length, @grammar.endIdForScope(@grammar.scopeName)]
+ @tokenizedLines[bufferRow] = new TokenizedLine({openScopes: [], text, tags, lineEnding, @tokenIterator})
tokenizedLinesForRows: (startRow, endRow) ->
for row in [startRow..endRow] by 1
@@ -294,8 +273,7 @@ class TokenizedBuffer extends Model
@tokenizedLines[bufferRow]?.ruleStack
openScopesForRow: (bufferRow) ->
- if bufferRow > 0
- precedingLine = @tokenizedLineForRow(bufferRow - 1)
+ if precedingLine = @tokenizedLines[bufferRow - 1]
@scopesFromTags(precedingLine.openScopes, precedingLine.tags)
else
[]
@@ -448,7 +426,7 @@ class TokenizedBuffer extends Model
logLines: (start=0, end=@buffer.getLastRow()) ->
for row in [start..end]
- line = @tokenizedLineForRow(row).text
+ line = @tokenizedLines[row].text
console.log row, line, line.length
return
diff --git a/src/workspace-element.coffee b/src/workspace-element.coffee
index ab8e39532..be0af81ed 100644
--- a/src/workspace-element.coffee
+++ b/src/workspace-element.coffee
@@ -142,4 +142,13 @@ class WorkspaceElement extends HTMLElement
ipcRenderer.send('run-package-specs', specPath)
+ runBenchmarks: ->
+ if activePath = @workspace.getActivePaneItem()?.getPath?()
+ [projectPath] = @project.relativizePath(activePath)
+ else
+ [projectPath] = @project.getPaths()
+
+ if projectPath
+ ipcRenderer.send('run-benchmarks', path.join(projectPath, 'benchmarks'))
+
module.exports = WorkspaceElement = document.registerElement 'atom-workspace', prototype: WorkspaceElement.prototype