Merge pull request #12933 from atom/mb-remove-placeholder-tokenized-lines

Don't construct placeholder tokenized lines
This commit is contained in:
Antonio Scandurra
2016-10-14 16:30:36 +02:00
committed by GitHub
7 changed files with 206 additions and 232 deletions

View File

@@ -198,13 +198,13 @@ describe('TextEditorRegistry', function () {
registry.maintainConfig(editor2)
await initialPackageActivation
expect(editor.getRootScopeDescriptor().getScopesArray()).toEqual(['text.plain'])
expect(editor.getRootScopeDescriptor().getScopesArray()).toEqual(['text.plain.null-grammar'])
expect(editor2.getRootScopeDescriptor().getScopesArray()).toEqual(['source.js'])
expect(editor.getEncoding()).toBe('utf8')
expect(editor2.getEncoding()).toBe('utf8')
atom.config.set('core.fileEncoding', 'utf16le', {scopeSelector: '.text.plain'})
atom.config.set('core.fileEncoding', 'utf16le', {scopeSelector: '.text.plain.null-grammar'})
atom.config.set('core.fileEncoding', 'utf16be', {scopeSelector: '.source.js'})
expect(editor.getEncoding()).toBe('utf16le')

View File

@@ -29,7 +29,7 @@ describe "TokenIterator", ->
})
tokenizedBuffer.setGrammar(grammar)
tokenIterator = tokenizedBuffer.tokenizedLineForRow(1).getTokenIterator()
tokenIterator = tokenizedBuffer.tokenizedLines[1].getTokenIterator()
tokenIterator.next()
expect(tokenIterator.getBufferStart()).toBe 0

View File

@@ -1,3 +1,4 @@
NullGrammar = require '../src/null-grammar'
TokenizedBuffer = require '../src/tokenized-buffer'
{Point} = TextBuffer = require 'text-buffer'
_ = require 'underscore-plus'
@@ -32,15 +33,8 @@ describe "TokenizedBuffer", ->
atom.packages.activatePackage('language-coffee-script')
it "deserializes it searching among the buffers in the current project", ->
tokenizedBufferA = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBufferB = TokenizedBuffer.deserialize(
JSON.parse(JSON.stringify(tokenizedBufferA.serialize())),
atom
)
tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2})
tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom)
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
describe "when the underlying buffer has no path", ->
@@ -48,25 +42,14 @@ describe "TokenizedBuffer", ->
buffer = atom.project.bufferForPathSync(null)
it "deserializes it searching among the buffers in the current project", ->
tokenizedBufferA = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBufferB = TokenizedBuffer.deserialize(
JSON.parse(JSON.stringify(tokenizedBufferA.serialize())),
atom
)
tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2})
tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom)
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
describe "when the buffer is destroyed", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
startTokenizing(tokenizedBuffer)
it "stops tokenization", ->
@@ -78,11 +61,7 @@ describe "TokenizedBuffer", ->
describe "when the buffer contains soft-tabs", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
startTokenizing(tokenizedBuffer)
afterEach ->
@@ -90,32 +69,29 @@ describe "TokenizedBuffer", ->
buffer.release()
describe "on construction", ->
it "initially creates un-tokenized screen lines, then tokenizes lines chunk at a time in the background", ->
line0 = tokenizedBuffer.tokenizedLineForRow(0)
expect(line0.tokens).toEqual([value: line0.text, scopes: ['source.js']])
it "tokenizes lines chunk at a time in the background", ->
line0 = tokenizedBuffer.tokenizedLines[0]
expect(line0).toBeUndefined()
line11 = tokenizedBuffer.tokenizedLineForRow(11)
expect(line11.tokens).toEqual([value: " return sort(Array.apply(this, arguments));", scopes: ['source.js']])
# background tokenization has not begun
expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack).toBeUndefined()
line11 = tokenizedBuffer.tokenizedLines[11]
expect(line11).toBeUndefined()
# tokenize chunk 1
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeFalsy()
expect(tokenizedBuffer.tokenizedLines[0].ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[4].ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
# tokenize chunk 2
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(9).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(10).ruleStack?).toBeFalsy()
expect(tokenizedBuffer.tokenizedLines[5].ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[9].ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[10]).toBeUndefined()
# tokenize last chunk
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(10).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(12).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[10].ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[12].ruleStack?).toBeTruthy()
describe "when the buffer is partially tokenized", ->
beforeEach ->
@@ -152,8 +128,8 @@ describe "TokenizedBuffer", ->
it "does not attempt to tokenize the lines in the change, and preserves the existing invalid row", ->
expect(tokenizedBuffer.firstInvalidRow()).toBe 5
buffer.setTextInRange([[6, 0], [7, 0]], "\n\n\n")
expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeFalsy()
expect(tokenizedBuffer.tokenizedLineForRow(7).ruleStack?).toBeFalsy()
expect(tokenizedBuffer.tokenizedLines[6]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[7]).toBeUndefined()
expect(tokenizedBuffer.firstInvalidRow()).toBe 5
describe "when the buffer is fully tokenized", ->
@@ -165,101 +141,101 @@ describe "TokenizedBuffer", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[0, 0], [2, 0]], "foo()\n7\n")
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.decimal.js'])
expect(tokenizedBuffer.tokenizedLines[0].tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js'])
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.decimal.js'])
# line 2 is unchanged
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
describe "when the change invalidates the tokenization of subsequent lines", ->
it "schedules the invalidated lines to be tokenized in the background", ->
buffer.insert([5, 30], '/* */')
buffer.insert([2, 0], '/*')
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js']
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js']
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
it "resumes highlighting with the state of the previous line", ->
buffer.insert([0, 0], '/*')
buffer.insert([5, 0], '*/')
buffer.insert([1, 0], 'var ')
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[1].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
describe "when lines are both updated and removed", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[1, 0], [3, 0]], "foo()")
# previous line 0 remains
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual(value: 'var', scopes: ['source.js', 'storage.type.var.js'])
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual(value: 'var', scopes: ['source.js', 'storage.type.var.js'])
# previous line 3 should be combined with input to form line 1
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLines[1].tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
# lines below deleted regions should be shifted upward
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[1]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[1]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.comparison.js'])
expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLines[3].tokens[1]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
expect(tokenizedBuffer.tokenizedLines[4].tokens[1]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.comparison.js'])
describe "when the change invalidates the tokenization of subsequent lines", ->
it "schedules the invalidated lines to be tokenized in the background", ->
buffer.insert([5, 30], '/* */')
buffer.setTextInRange([[2, 0], [3, 0]], '/*')
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js']
expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js']
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
describe "when lines are both updated and inserted", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[1, 0], [2, 0]], "foo()\nbar()\nbaz()\nquux()")
# previous line 0 remains
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual( value: 'var', scopes: ['source.js', 'storage.type.var.js'])
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual( value: 'var', scopes: ['source.js', 'storage.type.var.js'])
# 3 new lines inserted
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0]).toEqual(value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0]).toEqual(value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLines[2].tokens[0]).toEqual(value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLines[3].tokens[0]).toEqual(value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
# previous line 2 is joined with quux() on line 4
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0]).toEqual(value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLines[4].tokens[0]).toEqual(value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js'])
expect(tokenizedBuffer.tokenizedLines[4].tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
# previous line 3 is pushed down to become line 5
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[3]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
expect(tokenizedBuffer.tokenizedLines[5].tokens[3]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
describe "when the change invalidates the tokenization of subsequent lines", ->
it "schedules the invalidated lines to be tokenized in the background", ->
buffer.insert([5, 30], '/* */')
buffer.insert([2, 0], '/*\nabcde\nabcder')
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js']
expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js']
advanceClock() # tokenize invalidated lines in background
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(6).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(7).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(8).tokens[0].scopes).not.toBe ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[6].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[7].tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLines[8].tokens[0].scopes).not.toBe ['source.js', 'comment.block.js']
describe "when there is an insertion that is larger than the chunk size", ->
it "tokenizes the initial chunk synchronously, then tokenizes the remaining lines in the background", ->
commentBlock = _.multiplyString("// a comment\n", tokenizedBuffer.chunkSize + 2)
buffer.insert([0, 0], commentBlock)
expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeFalsy()
expect(tokenizedBuffer.tokenizedLines[0].ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[4].ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[5].ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[6].ruleStack?).toBeTruthy()
it "does not break out soft tabs across a scope boundary", ->
waitsForPromise ->
@@ -284,11 +260,7 @@ describe "TokenizedBuffer", ->
runs ->
buffer = atom.project.bufferForPathSync('sample-with-tabs.coffee')
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.coffee'))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2})
startTokenizing(tokenizedBuffer)
afterEach ->
@@ -352,7 +324,6 @@ describe "TokenizedBuffer", ->
expect(tokenizedHandler.callCount).toBe(1)
it "retokenizes the buffer", ->
waitsForPromise ->
atom.packages.activatePackage('language-ruby-on-rails')
@@ -362,14 +333,10 @@ describe "TokenizedBuffer", ->
runs ->
buffer = atom.project.bufferForPathSync()
buffer.setText "<div class='name'><%= User.find(2).full_name %></div>"
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.selectGrammar('test.erb'))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.selectGrammar('test.erb'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
{tokens} = tokenizedBuffer.tokenizedLineForRow(0)
{tokens} = tokenizedBuffer.tokenizedLines[0]
expect(tokens[0]).toEqual value: "<div class='name'>", scopes: ["text.html.ruby"]
waitsForPromise ->
@@ -377,7 +344,7 @@ describe "TokenizedBuffer", ->
runs ->
fullyTokenize(tokenizedBuffer)
{tokens} = tokenizedBuffer.tokenizedLineForRow(0)
{tokens} = tokenizedBuffer.tokenizedLines[0]
expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby", "meta.tag.block.any.html", "punctuation.definition.tag.begin.html"]
describe ".tokenForPosition(position)", ->
@@ -387,11 +354,7 @@ describe "TokenizedBuffer", ->
it "returns the correct token (regression)", ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenForPosition([1, 0]).scopes).toEqual ["source.js"]
expect(tokenizedBuffer.tokenForPosition([1, 1]).scopes).toEqual ["source.js"]
@@ -400,16 +363,12 @@ describe "TokenizedBuffer", ->
describe ".bufferRangeForScopeAtPosition(selector, position)", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
describe "when the selector does not match the token at the position", ->
it "returns a falsy value", ->
expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.bogus', [0, 1])).toBeFalsy()
expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.bogus', [0, 1])).toBeUndefined()
describe "when the selector matches a single token at the position", ->
it "returns the range covered by the token", ->
@@ -423,11 +382,7 @@ describe "TokenizedBuffer", ->
describe ".indentLevelForRow(row)", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
describe "when the line is non-empty", ->
@@ -469,7 +424,7 @@ describe "TokenizedBuffer", ->
buffer.insert([12, 0], ' ')
expect(tokenizedBuffer.indentLevelForRow(13)).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(14)).not.toBeDefined()
expect(tokenizedBuffer.tokenizedLines[14]).not.toBeDefined()
it "updates the indentLevel of empty lines surrounding a change that inserts lines", ->
buffer.insert([7, 0], '\n\n')
@@ -503,11 +458,7 @@ describe "TokenizedBuffer", ->
buffer = atom.project.bufferForPathSync('sample.js')
buffer.insert [10, 0], " // multi-line\n // comment\n // block\n"
buffer.insert [0, 0], "// multi-line\n// comment\n// block\n"
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js'))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
it "includes the first line of multi-line comments", ->
@@ -570,40 +521,69 @@ describe "TokenizedBuffer", ->
expect(tokenizedBuffer.isFoldableAtRow(7)).toBe false
expect(tokenizedBuffer.isFoldableAtRow(8)).toBe false
describe "::tokenizedLineForRow(row)", ->
it "returns the tokenized line for a row, or a placeholder line if it hasn't been tokenized yet", ->
buffer = atom.project.bufferForPathSync('sample.js')
grammar = atom.grammars.grammarForScopeName('source.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
line0 = buffer.lineForRow(0)
jsScopeStartId = grammar.startIdForScope(grammar.scopeName)
jsScopeEndId = grammar.endIdForScope(grammar.scopeName)
startTokenizing(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([jsScopeStartId, line0.length, jsScopeEndId])
advanceClock(1)
expect(tokenizedBuffer.tokenizedLines[0]).not.toBeUndefined()
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).not.toEqual([jsScopeStartId, line0.length, jsScopeEndId])
nullScopeStartId = NullGrammar.startIdForScope(NullGrammar.scopeName)
nullScopeEndId = NullGrammar.endIdForScope(NullGrammar.scopeName)
tokenizedBuffer.setGrammar(NullGrammar)
startTokenizing(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId])
advanceClock(1)
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId])
it "returns undefined if the requested row is outside the buffer range", ->
buffer = atom.project.bufferForPathSync('sample.js')
grammar = atom.grammars.grammarForScopeName('source.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(999)).toBeUndefined()
describe "when the buffer is configured with the null grammar", ->
it "uses the placeholder tokens and does not actually tokenize using the grammar", ->
spyOn(atom.grammars.nullGrammar, 'tokenizeLine').andCallThrough()
it "does not actually tokenize using the grammar", ->
spyOn(NullGrammar, 'tokenizeLine').andCallThrough()
buffer = atom.project.bufferForPathSync('sample.will-use-the-null-grammar')
buffer.setText('a\nb\nc')
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer = new TokenizedBuffer({buffer, tabLength: 2})
tokenizeCallback = jasmine.createSpy('onDidTokenize')
tokenizedBuffer.onDidTokenize(tokenizeCallback)
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
expect(tokenizeCallback.callCount).toBe(0)
expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
fullyTokenize(tokenizedBuffer)
expect(tokenizeCallback.callCount).toBe 1
expect(atom.grammars.nullGrammar.tokenizeLine.callCount).toBe 0
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens.length).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0].value).toBe 'a'
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens.length).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].value).toBe 'b'
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens.length).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].value).toBe 'c'
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
expect(tokenizeCallback.callCount).toBe(0)
expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
describe "text decoration layer API", ->
describe "iterator", ->
it "iterates over the syntactic scope boundaries", ->
buffer = new TextBuffer(text: "var foo = 1 /*\nhello*/var bar = 2\n")
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.selectGrammar(".js"))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName("source.js"), tabLength: 2})
fullyTokenize(tokenizedBuffer)
iterator = tokenizedBuffer.buildIterator()
@@ -655,11 +635,7 @@ describe "TokenizedBuffer", ->
runs ->
buffer = new TextBuffer(text: "# hello\n# world")
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(atom.grammars.selectGrammar(".coffee"))
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName("source.coffee"), tabLength: 2})
fullyTokenize(tokenizedBuffer)
iterator = tokenizedBuffer.buildIterator()
@@ -688,11 +664,7 @@ describe "TokenizedBuffer", ->
})
buffer = new TextBuffer(text: 'start x\nend x\nx')
tokenizedBuffer = new TokenizedBuffer({
buffer, grammarRegistry: atom.grammars, packageManager: atom.packages,
assert: atom.assert, tabLength: 2,
})
tokenizedBuffer.setGrammar(grammar)
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
fullyTokenize(tokenizedBuffer)
iterator = tokenizedBuffer.buildIterator()

View File

@@ -148,19 +148,19 @@ class LanguageMode
rowRange
rowRangeForCommentAtBufferRow: (bufferRow) ->
return unless @editor.tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment()
return unless @editor.tokenizedBuffer.tokenizedLines[bufferRow]?.isComment()
startRow = bufferRow
endRow = bufferRow
if bufferRow > 0
for currentRow in [bufferRow-1..0] by -1
break unless @editor.tokenizedBuffer.tokenizedLineForRow(currentRow).isComment()
break unless @editor.tokenizedBuffer.tokenizedLines[currentRow]?.isComment()
startRow = currentRow
if bufferRow < @buffer.getLastRow()
for currentRow in [bufferRow+1..@buffer.getLastRow()] by 1
break unless @editor.tokenizedBuffer.tokenizedLineForRow(currentRow).isComment()
break unless @editor.tokenizedBuffer.tokenizedLines[currentRow]?.isComment()
endRow = currentRow
return [startRow, endRow] if startRow isnt endRow
@@ -189,7 +189,7 @@ class LanguageMode
# row is a comment.
isLineCommentedAtBufferRow: (bufferRow) ->
return false unless 0 <= bufferRow <= @editor.getLastBufferRow()
@editor.tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment()
@editor.tokenizedBuffer.tokenizedLines[bufferRow]?.isComment()
# Find a row range for a 'paragraph' around specified bufferRow. A paragraph
# is a block of text bounded by and empty line or a block of text that is not
@@ -246,10 +246,7 @@ class LanguageMode
@suggestedIndentForTokenizedLineAtBufferRow(bufferRow, line, tokenizedLine, options)
suggestedIndentForLineAtBufferRow: (bufferRow, line, options) ->
if @editor.largeFileMode or @editor.tokenizedBuffer.grammar is NullGrammar
tokenizedLine = @editor.tokenizedBuffer.buildPlaceholderTokenizedLineForRowWithText(bufferRow, line)
else
tokenizedLine = @editor.tokenizedBuffer.buildTokenizedLineForRowWithText(bufferRow, line)
tokenizedLine = @editor.tokenizedBuffer.buildTokenizedLineForRowWithText(bufferRow, line)
@suggestedIndentForTokenizedLineAtBufferRow(bufferRow, line, tokenizedLine, options)
suggestedIndentForTokenizedLineAtBufferRow: (bufferRow, line, tokenizedLine, options) ->

View File

@@ -2,12 +2,39 @@
import {Disposable} from 'event-kit'
export default Object.freeze({
export default {
name: 'Null Grammar',
scopeName: 'text.plain',
scopeName: 'text.plain.null-grammar',
scopeForId (id) {
if (id === -1 || id === -2) {
return this.scopeName
} else {
return null
}
},
startIdForScope (scopeName) {
if (scopeName === this.scopeName) {
return -1
} else {
return null
}
},
endIdForScope (scopeName) {
if (scopeName === this.scopeName) {
return -2
} else {
return null
}
},
tokenizeLine (text) {
return {
tags: [this.startIdForScope(this.scopeName), text.length, this.endIdForScope(this.scopeName)],
ruleStack: null
}
},
onDidUpdate (callback) {
return new Disposable(noop)
}
})
}
function noop () {}

View File

@@ -2868,7 +2868,7 @@ class TextEditor extends Model
# whitespace.
usesSoftTabs: ->
for bufferRow in [0..@buffer.getLastRow()]
continue if @tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment()
continue if @tokenizedBuffer.tokenizedLines[bufferRow]?.isComment()
line = @buffer.lineForRow(bufferRow)
return true if line[0] is ' '

View File

@@ -36,7 +36,6 @@ class TokenizedBuffer extends Model
@tokenIterator = new TokenIterator(this)
@disposables.add @buffer.registerTextDecorationLayer(this)
@rootScopeDescriptor = new ScopeDescriptor(scopes: ['text.plain'])
@setGrammar(grammar ? NullGrammar)
@@ -95,14 +94,17 @@ class TokenizedBuffer extends Model
false
retokenizeLines: ->
lastRow = @buffer.getLastRow()
@fullyTokenized = false
@tokenizedLines = new Array(lastRow + 1)
@tokenizedLines = new Array(@buffer.getLineCount())
@invalidRows = []
@invalidateRow(0)
if @largeFileMode or @grammar.name is 'Null Grammar'
@markTokenizationComplete()
else
@invalidateRow(0)
setVisible: (@visible) ->
@tokenizeInBackground() if @visible
if @visible and @grammar.name isnt 'Null Grammar' and not @largeFileMode
@tokenizeInBackground()
getTabLength: -> @tabLength
@@ -117,12 +119,6 @@ class TokenizedBuffer extends Model
@tokenizeNextChunk() if @isAlive() and @buffer.isAlive()
tokenizeNextChunk: ->
# Short circuit null grammar which can just use the placeholder tokens
if (@grammar.name is 'Null Grammar') and @firstInvalidRow()?
@invalidRows = []
@markTokenizationComplete()
return
rowsRemaining = @chunkSize
while @firstInvalidRow()? and rowsRemaining > 0
@@ -167,8 +163,6 @@ class TokenizedBuffer extends Model
return
invalidateRow: (row) ->
return if @largeFileMode
@invalidRows.push(row)
@invalidRows.sort (a, b) -> a - b
@tokenizeInBackground()
@@ -189,18 +183,19 @@ class TokenizedBuffer extends Model
start = oldRange.start.row
end = oldRange.end.row
delta = newRange.end.row - oldRange.end.row
oldLineCount = oldRange.end.row - oldRange.start.row + 1
newLineCount = newRange.end.row - newRange.start.row + 1
@updateInvalidRows(start, end, delta)
previousEndStack = @stackForRow(end) # used in spill detection below
if @largeFileMode or @grammar is NullGrammar
newTokenizedLines = @buildPlaceholderTokenizedLinesForRows(start, end + delta)
if @largeFileMode or @grammar.name is 'Null Grammar'
_.spliceWithArray(@tokenizedLines, start, oldLineCount, new Array(newLineCount))
else
newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start))
_.spliceWithArray(@tokenizedLines, start, end - start + 1, newTokenizedLines)
newEndStack = @stackForRow(end + delta)
if newEndStack and not _.isEqual(newEndStack, previousEndStack)
@invalidateRow(end + delta + 1)
_.spliceWithArray(@tokenizedLines, start, oldLineCount, newTokenizedLines)
newEndStack = @stackForRow(end + delta)
if newEndStack and not _.isEqual(newEndStack, previousEndStack)
@invalidateRow(end + delta + 1)
isFoldableAtRow: (row) ->
if @largeFileMode
@@ -211,46 +206,39 @@ class TokenizedBuffer extends Model
# Returns a {Boolean} indicating whether the given buffer row starts
# a a foldable row range due to the code's indentation patterns.
isFoldableCodeAtRow: (row) ->
# Investigating an exception that's occurring here due to the line being
# undefined. This should paper over the problem but we want to figure out
# what is happening:
tokenizedLine = @tokenizedLineForRow(row)
@assert tokenizedLine?, "TokenizedLine is undefined", (error) =>
error.metadata = {
row: row
rowCount: @tokenizedLines.length
tokenizedBufferChangeCount: @changeCount
bufferChangeCount: @buffer.changeCount
}
return false unless tokenizedLine?
return false if @buffer.isRowBlank(row) or tokenizedLine.isComment()
nextRow = @buffer.nextNonBlankRow(row)
return false unless nextRow?
@indentLevelForRow(nextRow) > @indentLevelForRow(row)
if 0 <= row <= @buffer.getLastRow()
nextRow = @buffer.nextNonBlankRow(row)
tokenizedLine = @tokenizedLines[row]
if @buffer.isRowBlank(row) or tokenizedLine?.isComment() or not nextRow?
false
else
@indentLevelForRow(nextRow) > @indentLevelForRow(row)
else
false
isFoldableCommentAtRow: (row) ->
previousRow = row - 1
nextRow = row + 1
return false if nextRow > @buffer.getLastRow()
(row is 0 or not @tokenizedLineForRow(previousRow).isComment()) and
@tokenizedLineForRow(row).isComment() and
@tokenizedLineForRow(nextRow).isComment()
if nextRow > @buffer.getLastRow()
false
else
Boolean(
not (@tokenizedLines[previousRow]?.isComment()) and
@tokenizedLines[row]?.isComment() and
@tokenizedLines[nextRow]?.isComment()
)
buildTokenizedLinesForRows: (startRow, endRow, startingStack, startingopenScopes) ->
ruleStack = startingStack
openScopes = startingopenScopes
stopTokenizingAt = startRow + @chunkSize
tokenizedLines = for row in [startRow..endRow]
tokenizedLines = for row in [startRow..endRow] by 1
if (ruleStack or row is 0) and row < stopTokenizingAt
tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes)
ruleStack = tokenizedLine.ruleStack
openScopes = @scopesFromTags(openScopes, tokenizedLine.tags)
else
tokenizedLine = @buildPlaceholderTokenizedLineForRow(row, openScopes)
tokenizedLine = undefined
tokenizedLine
if endRow >= stopTokenizingAt
@@ -259,21 +247,6 @@ class TokenizedBuffer extends Model
tokenizedLines
buildPlaceholderTokenizedLinesForRows: (startRow, endRow) ->
@buildPlaceholderTokenizedLineForRow(row) for row in [startRow..endRow] by 1
buildPlaceholderTokenizedLineForRow: (row) ->
@buildPlaceholderTokenizedLineForRowWithText(row, @buffer.lineForRow(row))
buildPlaceholderTokenizedLineForRowWithText: (row, text) ->
if @grammar isnt NullGrammar
openScopes = [@grammar.startIdForScope(@grammar.scopeName)]
else
openScopes = []
tags = [text.length]
lineEnding = @buffer.lineEndingForRow(row)
new TokenizedLine({openScopes, text, tags, lineEnding, @tokenIterator})
buildTokenizedLineForRow: (row, ruleStack, openScopes) ->
@buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes)
@@ -283,8 +256,14 @@ class TokenizedBuffer extends Model
new TokenizedLine({openScopes, text, tags, ruleStack, lineEnding, @tokenIterator})
tokenizedLineForRow: (bufferRow) ->
if 0 <= bufferRow < @tokenizedLines.length
@tokenizedLines[bufferRow] ?= @buildPlaceholderTokenizedLineForRow(bufferRow)
if 0 <= bufferRow <= @buffer.getLastRow()
if tokenizedLine = @tokenizedLines[bufferRow]
tokenizedLine
else
text = @buffer.lineForRow(bufferRow)
lineEnding = @buffer.lineEndingForRow(bufferRow)
tags = [@grammar.startIdForScope(@grammar.scopeName), text.length, @grammar.endIdForScope(@grammar.scopeName)]
@tokenizedLines[bufferRow] = new TokenizedLine({openScopes: [], text, tags, lineEnding, @tokenIterator})
tokenizedLinesForRows: (startRow, endRow) ->
for row in [startRow..endRow] by 1
@@ -294,8 +273,7 @@ class TokenizedBuffer extends Model
@tokenizedLines[bufferRow]?.ruleStack
openScopesForRow: (bufferRow) ->
if bufferRow > 0
precedingLine = @tokenizedLineForRow(bufferRow - 1)
if precedingLine = @tokenizedLines[bufferRow - 1]
@scopesFromTags(precedingLine.openScopes, precedingLine.tags)
else
[]
@@ -448,7 +426,7 @@ class TokenizedBuffer extends Model
logLines: (start=0, end=@buffer.getLastRow()) ->
for row in [start..end]
line = @tokenizedLineForRow(row).text
line = @tokenizedLines[row].text
console.log row, line, line.length
return