From d20372a35f00b7cce786627011ae96772f0a08b8 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Oct 2016 20:59:39 -0700 Subject: [PATCH 01/11] Start on removing placeholder lines in TokenizedBuffer --- spec/tokenized-buffer-spec.coffee | 24 +++++------- src/text-editor.coffee | 2 +- src/tokenized-buffer-iterator.coffee | 18 ++++++--- src/tokenized-buffer.coffee | 58 +++++++++------------------- 4 files changed, 41 insertions(+), 61 deletions(-) diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index ad9fa0ee7..5a1eabe3d 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -2,7 +2,7 @@ TokenizedBuffer = require '../src/tokenized-buffer' {Point} = TextBuffer = require 'text-buffer' _ = require 'underscore-plus' -describe "TokenizedBuffer", -> +fdescribe "TokenizedBuffer", -> [tokenizedBuffer, buffer] = [] beforeEach -> @@ -90,27 +90,24 @@ describe "TokenizedBuffer", -> buffer.release() describe "on construction", -> - it "initially creates un-tokenized screen lines, then tokenizes lines chunk at a time in the background", -> + it "tokenizes lines chunk at a time in the background", -> line0 = tokenizedBuffer.tokenizedLineForRow(0) - expect(line0.tokens).toEqual([value: line0.text, scopes: ['source.js']]) + expect(line0).toBe(undefined) line11 = tokenizedBuffer.tokenizedLineForRow(11) - expect(line11.tokens).toEqual([value: " return sort(Array.apply(this, arguments));", scopes: ['source.js']]) - - # background tokenization has not begun - expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack).toBeUndefined() + expect(line11).toBe(undefined) # tokenize chunk 1 advanceClock() expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy() expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeFalsy() + expect(tokenizedBuffer.tokenizedLineForRow(5)).toBe(undefined) # tokenize chunk 2 advanceClock() expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy() expect(tokenizedBuffer.tokenizedLineForRow(9).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(10).ruleStack?).toBeFalsy() + expect(tokenizedBuffer.tokenizedLineForRow(10)).toBe(undefined) # tokenize last chunk advanceClock() @@ -588,12 +585,9 @@ describe "TokenizedBuffer", -> expect(tokenizeCallback.callCount).toBe 1 expect(atom.grammars.nullGrammar.tokenizeLine.callCount).toBe 0 - expect(tokenizedBuffer.tokenizedLineForRow(0).tokens.length).toBe 1 - expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0].value).toBe 'a' - expect(tokenizedBuffer.tokenizedLineForRow(1).tokens.length).toBe 1 - expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].value).toBe 'b' - expect(tokenizedBuffer.tokenizedLineForRow(2).tokens.length).toBe 1 - expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].value).toBe 'c' + expect(tokenizedBuffer.tokenizedLineForRow(0)).toBe null + expect(tokenizedBuffer.tokenizedLineForRow(1)).toBe null + expect(tokenizedBuffer.tokenizedLineForRow(2)).toBe null describe "text decoration layer API", -> describe "iterator", -> diff --git a/src/text-editor.coffee b/src/text-editor.coffee index 02cce3daf..ca1f1a938 100644 --- a/src/text-editor.coffee +++ b/src/text-editor.coffee @@ -2868,7 +2868,7 @@ class TextEditor extends Model # whitespace. usesSoftTabs: -> for bufferRow in [0..@buffer.getLastRow()] - continue if @tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment() + continue if @tokenizedBuffer.tokenizedLineForRow(bufferRow)?.isComment() line = @buffer.lineForRow(bufferRow) return true if line[0] is ' ' diff --git a/src/tokenized-buffer-iterator.coffee b/src/tokenized-buffer-iterator.coffee index 23b72d5a9..79217af5b 100644 --- a/src/tokenized-buffer-iterator.coffee +++ b/src/tokenized-buffer-iterator.coffee @@ -1,5 +1,7 @@ {Point} = require 'text-buffer' +EMPTY = Object.freeze([]) + module.exports = class TokenizedBufferIterator constructor: (@tokenizedBuffer) -> @@ -12,11 +14,17 @@ class TokenizedBufferIterator @closeTags = [] @tagIndex = null - currentLine = @tokenizedBuffer.tokenizedLineForRow(position.row) - @currentTags = currentLine.tags - @currentLineOpenTags = currentLine.openScopes - @currentLineLength = currentLine.text.length - @containingTags = @currentLineOpenTags.map (id) => @tokenizedBuffer.grammar.scopeForId(id) + if currentLine = @tokenizedBuffer.tokenizedLineForRow(position.row) + @currentTags = currentLine.tags + @currentLineOpenTags = currentLine.openScopes + @currentLineLength = currentLine.text.length + @containingTags = @currentLineOpenTags.map (id) => @tokenizedBuffer.grammar.scopeForId(id) + else + @currentTags = EMPTY + @currentLineOpenTags = EMPTY + @currentLineLength = @tokenizedBuffer.buffer.lineLengthForRow(position.row) + @containingTags = [] + currentColumn = 0 for tag, index in @currentTags diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index 80358f23d..d34e9ce68 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -211,18 +211,7 @@ class TokenizedBuffer extends Model # Returns a {Boolean} indicating whether the given buffer row starts # a a foldable row range due to the code's indentation patterns. isFoldableCodeAtRow: (row) -> - # Investigating an exception that's occurring here due to the line being - # undefined. This should paper over the problem but we want to figure out - # what is happening: tokenizedLine = @tokenizedLineForRow(row) - @assert tokenizedLine?, "TokenizedLine is undefined", (error) => - error.metadata = { - row: row - rowCount: @tokenizedLines.length - tokenizedBufferChangeCount: @changeCount - bufferChangeCount: @buffer.changeCount - } - return false unless tokenizedLine? return false if @buffer.isRowBlank(row) or tokenizedLine.isComment() @@ -236,21 +225,21 @@ class TokenizedBuffer extends Model nextRow = row + 1 return false if nextRow > @buffer.getLastRow() - (row is 0 or not @tokenizedLineForRow(previousRow).isComment()) and - @tokenizedLineForRow(row).isComment() and - @tokenizedLineForRow(nextRow).isComment() + (not @tokenizedLineForRow(previousRow)?.isComment()) and + @tokenizedLineForRow(row)?.isComment() and + @tokenizedLineForRow(nextRow)?.isComment() buildTokenizedLinesForRows: (startRow, endRow, startingStack, startingopenScopes) -> ruleStack = startingStack openScopes = startingopenScopes stopTokenizingAt = startRow + @chunkSize - tokenizedLines = for row in [startRow..endRow] + tokenizedLines = for row in [startRow..endRow] by 1 if (ruleStack or row is 0) and row < stopTokenizingAt tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes) ruleStack = tokenizedLine.ruleStack openScopes = @scopesFromTags(openScopes, tokenizedLine.tags) else - tokenizedLine = @buildPlaceholderTokenizedLineForRow(row, openScopes) + tokenizedLine = null tokenizedLine if endRow >= stopTokenizingAt @@ -260,19 +249,7 @@ class TokenizedBuffer extends Model tokenizedLines buildPlaceholderTokenizedLinesForRows: (startRow, endRow) -> - @buildPlaceholderTokenizedLineForRow(row) for row in [startRow..endRow] by 1 - - buildPlaceholderTokenizedLineForRow: (row) -> - @buildPlaceholderTokenizedLineForRowWithText(row, @buffer.lineForRow(row)) - - buildPlaceholderTokenizedLineForRowWithText: (row, text) -> - if @grammar isnt NullGrammar - openScopes = [@grammar.startIdForScope(@grammar.scopeName)] - else - openScopes = [] - tags = [text.length] - lineEnding = @buffer.lineEndingForRow(row) - new TokenizedLine({openScopes, text, tags, lineEnding, @tokenIterator}) + null for row in [startRow..endRow] by 1 buildTokenizedLineForRow: (row, ruleStack, openScopes) -> @buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes) @@ -283,8 +260,7 @@ class TokenizedBuffer extends Model new TokenizedLine({openScopes, text, tags, ruleStack, lineEnding, @tokenIterator}) tokenizedLineForRow: (bufferRow) -> - if 0 <= bufferRow < @tokenizedLines.length - @tokenizedLines[bufferRow] ?= @buildPlaceholderTokenizedLineForRow(bufferRow) + @tokenizedLines[bufferRow] tokenizedLinesForRows: (startRow, endRow) -> for row in [startRow..endRow] by 1 @@ -366,16 +342,18 @@ class TokenizedBuffer extends Model scopeDescriptorForPosition: (position) -> {row, column} = @buffer.clipPosition(Point.fromObject(position)) - iterator = @tokenizedLineForRow(row).getTokenIterator() - while iterator.next() - if iterator.getBufferEnd() > column - scopes = iterator.getScopes() - break + if iterator = @tokenizedLineForRow(row)?.getTokenIterator() + while iterator.next() + if iterator.getBufferEnd() > column + scopes = iterator.getScopes() + break - # rebuild scope of last token if we iterated off the end - unless scopes? - scopes = iterator.getScopes() - scopes.push(iterator.getScopeEnds().reverse()...) + # rebuild scope of last token if we iterated off the end + unless scopes? + scopes = iterator.getScopes() + scopes.push(iterator.getScopeEnds().reverse()...) + else + scopes = [] new ScopeDescriptor({scopes}) From 66510ae545d63949df4d123aec6bf25b6f6509d4 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Oct 2016 12:48:17 +0200 Subject: [PATCH 02/11] Handle null grammars consistently, building placeholder lines on-demand Previously we were treating the local `NullGrammar` differently from `atom.grammars.nullGrammar`. These two grammars are conceptually the same, as the former was created to support editors creation without a grammar registry. To keep backwards-compatibility, we also build placeholder lines on-demand when calling `TokenizedBuffer.prototype.tokenizedLineForRow`. This ensure that packages relying on the internals of `TokenizedBuffer` to retrieve syntactic boundaries won't break when the null grammar is used or large file mode is on. --- spec/text-editor-registry-spec.js | 4 ++-- spec/tokenized-buffer-spec.coffee | 14 ++++++------- src/language-mode.coffee | 16 +++++++-------- src/null-grammar.js | 29 ++++++++++++++++++++++++++- src/tokenized-buffer-iterator.coffee | 17 +++++----------- src/tokenized-buffer.coffee | 30 +++++++++++++++++++--------- 6 files changed, 71 insertions(+), 39 deletions(-) diff --git a/spec/text-editor-registry-spec.js b/spec/text-editor-registry-spec.js index 86bb71a6f..51027e63c 100644 --- a/spec/text-editor-registry-spec.js +++ b/spec/text-editor-registry-spec.js @@ -198,13 +198,13 @@ describe('TextEditorRegistry', function () { registry.maintainConfig(editor2) await initialPackageActivation - expect(editor.getRootScopeDescriptor().getScopesArray()).toEqual(['text.plain']) + expect(editor.getRootScopeDescriptor().getScopesArray()).toEqual(['text.plain.null-grammar']) expect(editor2.getRootScopeDescriptor().getScopesArray()).toEqual(['source.js']) expect(editor.getEncoding()).toBe('utf8') expect(editor2.getEncoding()).toBe('utf8') - atom.config.set('core.fileEncoding', 'utf16le', {scopeSelector: '.text.plain'}) + atom.config.set('core.fileEncoding', 'utf16le', {scopeSelector: '.text.plain.null-grammar'}) atom.config.set('core.fileEncoding', 'utf16be', {scopeSelector: '.source.js'}) expect(editor.getEncoding()).toBe('utf16le') diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index 5a1eabe3d..cbbaba9d5 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -2,7 +2,7 @@ TokenizedBuffer = require '../src/tokenized-buffer' {Point} = TextBuffer = require 'text-buffer' _ = require 'underscore-plus' -fdescribe "TokenizedBuffer", -> +describe "TokenizedBuffer", -> [tokenizedBuffer, buffer] = [] beforeEach -> @@ -149,8 +149,8 @@ fdescribe "TokenizedBuffer", -> it "does not attempt to tokenize the lines in the change, and preserves the existing invalid row", -> expect(tokenizedBuffer.firstInvalidRow()).toBe 5 buffer.setTextInRange([[6, 0], [7, 0]], "\n\n\n") - expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeFalsy() - expect(tokenizedBuffer.tokenizedLineForRow(7).ruleStack?).toBeFalsy() + expect(tokenizedBuffer.tokenizedLineForRow(6)).toBeFalsy() + expect(tokenizedBuffer.tokenizedLineForRow(7)).toBeFalsy() expect(tokenizedBuffer.firstInvalidRow()).toBe 5 describe "when the buffer is fully tokenized", -> @@ -252,7 +252,7 @@ fdescribe "TokenizedBuffer", -> buffer.insert([0, 0], commentBlock) expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy() expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeFalsy() + expect(tokenizedBuffer.tokenizedLineForRow(5)).toBeFalsy() advanceClock() expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy() @@ -585,9 +585,9 @@ fdescribe "TokenizedBuffer", -> expect(tokenizeCallback.callCount).toBe 1 expect(atom.grammars.nullGrammar.tokenizeLine.callCount).toBe 0 - expect(tokenizedBuffer.tokenizedLineForRow(0)).toBe null - expect(tokenizedBuffer.tokenizedLineForRow(1)).toBe null - expect(tokenizedBuffer.tokenizedLineForRow(2)).toBe null + expect(tokenizedBuffer.tokenizedLineForRow(0)).toBeFalsy() + expect(tokenizedBuffer.tokenizedLineForRow(1)).toBeFalsy() + expect(tokenizedBuffer.tokenizedLineForRow(2)).toBeFalsy() describe "text decoration layer API", -> describe "iterator", -> diff --git a/src/language-mode.coffee b/src/language-mode.coffee index ad038d7db..20d54ae28 100644 --- a/src/language-mode.coffee +++ b/src/language-mode.coffee @@ -148,19 +148,19 @@ class LanguageMode rowRange rowRangeForCommentAtBufferRow: (bufferRow) -> - return unless @editor.tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment() + return unless @editor.tokenizedBuffer.tokenizedLines[bufferRow]?.isComment() startRow = bufferRow endRow = bufferRow if bufferRow > 0 for currentRow in [bufferRow-1..0] by -1 - break unless @editor.tokenizedBuffer.tokenizedLineForRow(currentRow).isComment() + break unless @editor.tokenizedBuffer.tokenizedLines[currentRow]?.isComment() startRow = currentRow if bufferRow < @buffer.getLastRow() for currentRow in [bufferRow+1..@buffer.getLastRow()] by 1 - break unless @editor.tokenizedBuffer.tokenizedLineForRow(currentRow).isComment() + break unless @editor.tokenizedBuffer.tokenizedLines[currentRow]?.isComment() endRow = currentRow return [startRow, endRow] if startRow isnt endRow @@ -189,7 +189,7 @@ class LanguageMode # row is a comment. isLineCommentedAtBufferRow: (bufferRow) -> return false unless 0 <= bufferRow <= @editor.getLastBufferRow() - @editor.tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment() + @editor.tokenizedBuffer.tokenizedLines[bufferRow]?.isComment() # Find a row range for a 'paragraph' around specified bufferRow. A paragraph # is a block of text bounded by and empty line or a block of text that is not @@ -246,10 +246,10 @@ class LanguageMode @suggestedIndentForTokenizedLineAtBufferRow(bufferRow, line, tokenizedLine, options) suggestedIndentForLineAtBufferRow: (bufferRow, line, options) -> - if @editor.largeFileMode or @editor.tokenizedBuffer.grammar is NullGrammar - tokenizedLine = @editor.tokenizedBuffer.buildPlaceholderTokenizedLineForRowWithText(bufferRow, line) - else - tokenizedLine = @editor.tokenizedBuffer.buildTokenizedLineForRowWithText(bufferRow, line) + tokenizedLine = @editor.tokenizedBuffer.buildTokenizedLineForRowWithText(bufferRow, line) + iterator = tokenizedLine.getTokenIterator() + iterator.next() + scopeDescriptor = new ScopeDescriptor(scopes: iterator.getScopes()) @suggestedIndentForTokenizedLineAtBufferRow(bufferRow, line, tokenizedLine, options) suggestedIndentForTokenizedLineAtBufferRow: (bufferRow, line, tokenizedLine, options) -> diff --git a/src/null-grammar.js b/src/null-grammar.js index 0ca3f83f1..01841346e 100644 --- a/src/null-grammar.js +++ b/src/null-grammar.js @@ -4,7 +4,34 @@ import {Disposable} from 'event-kit' export default Object.freeze({ name: 'Null Grammar', - scopeName: 'text.plain', + scopeName: 'text.plain.null-grammar', + scopeForId (id) { + if (id === -1 || id === -2) { + return this.scopeName + } else { + return null + } + }, + startIdForScope (scopeName) { + if (scopeName === this.scopeName) { + return -1 + } else { + return null + } + }, + endIdForScope (scopeName) { + if (scopeName === this.scopeName) { + return -2 + } else { + return null + } + }, + tokenizeLine (text) { + return { + tags: [this.startIdForScope(this.scopeName), text.length, this.endIdForScope(this.scopeName)], + ruleStack: null + } + }, onDidUpdate (callback) { return new Disposable(noop) } diff --git a/src/tokenized-buffer-iterator.coffee b/src/tokenized-buffer-iterator.coffee index 79217af5b..90e29fdfe 100644 --- a/src/tokenized-buffer-iterator.coffee +++ b/src/tokenized-buffer-iterator.coffee @@ -1,7 +1,5 @@ {Point} = require 'text-buffer' -EMPTY = Object.freeze([]) - module.exports = class TokenizedBufferIterator constructor: (@tokenizedBuffer) -> @@ -14,16 +12,11 @@ class TokenizedBufferIterator @closeTags = [] @tagIndex = null - if currentLine = @tokenizedBuffer.tokenizedLineForRow(position.row) - @currentTags = currentLine.tags - @currentLineOpenTags = currentLine.openScopes - @currentLineLength = currentLine.text.length - @containingTags = @currentLineOpenTags.map (id) => @tokenizedBuffer.grammar.scopeForId(id) - else - @currentTags = EMPTY - @currentLineOpenTags = EMPTY - @currentLineLength = @tokenizedBuffer.buffer.lineLengthForRow(position.row) - @containingTags = [] + currentLine = @tokenizedBuffer.tokenizedLineForRow(position.row) + @currentLineLength = currentLine.text.length + @currentLineOpenTags = currentLine.openScopes + @currentTags = currentLine.tags + @containingTags = @currentLineOpenTags.map (id) => @tokenizedBuffer.grammar.scopeForId(id) currentColumn = 0 diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index d34e9ce68..23b73dfb1 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -36,7 +36,6 @@ class TokenizedBuffer extends Model @tokenIterator = new TokenIterator(this) @disposables.add @buffer.registerTextDecorationLayer(this) - @rootScopeDescriptor = new ScopeDescriptor(scopes: ['text.plain']) @setGrammar(grammar ? NullGrammar) @@ -118,7 +117,8 @@ class TokenizedBuffer extends Model tokenizeNextChunk: -> # Short circuit null grammar which can just use the placeholder tokens - if (@grammar.name is 'Null Grammar') and @firstInvalidRow()? + if @grammar.name is 'Null Grammar' and @firstInvalidRow()? + @tokenizedLines = @buildPlaceholderTokenizedLinesForRows(0, @buffer.getLastRow()) @invalidRows = [] @markTokenizationComplete() return @@ -192,7 +192,7 @@ class TokenizedBuffer extends Model @updateInvalidRows(start, end, delta) previousEndStack = @stackForRow(end) # used in spill detection below - if @largeFileMode or @grammar is NullGrammar + if @largeFileMode or @grammar.name is 'Null Grammar' newTokenizedLines = @buildPlaceholderTokenizedLinesForRows(start, end + delta) else newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start)) @@ -234,12 +234,12 @@ class TokenizedBuffer extends Model openScopes = startingopenScopes stopTokenizingAt = startRow + @chunkSize tokenizedLines = for row in [startRow..endRow] by 1 - if (ruleStack or row is 0) and row < stopTokenizingAt + if row < stopTokenizingAt tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes) ruleStack = tokenizedLine.ruleStack openScopes = @scopesFromTags(openScopes, tokenizedLine.tags) else - tokenizedLine = null + tokenizedLine = undefined tokenizedLine if endRow >= stopTokenizingAt @@ -249,7 +249,7 @@ class TokenizedBuffer extends Model tokenizedLines buildPlaceholderTokenizedLinesForRows: (startRow, endRow) -> - null for row in [startRow..endRow] by 1 + new Array(endRow - startRow + 1) buildTokenizedLineForRow: (row, ruleStack, openScopes) -> @buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes) @@ -260,7 +260,20 @@ class TokenizedBuffer extends Model new TokenizedLine({openScopes, text, tags, ruleStack, lineEnding, @tokenIterator}) tokenizedLineForRow: (bufferRow) -> - @tokenizedLines[bufferRow] + if 0 <= bufferRow <= @buffer.getLastRow() + if tokenizedLine = @tokenizedLines[bufferRow] + tokenizedLine + else + text = @buffer.lineForRow(bufferRow) + lineEnding = @buffer.lineEndingForRow(bufferRow) + tags = [ + @grammar.startIdForScope(@grammar.scopeName), + text.length, + @grammar.endIdForScope(@grammar.scopeName) + ] + @tokenizedLines[bufferRow] = new TokenizedLine({openScopes: [], text, tags, lineEnding, @tokenIterator}) + else + null tokenizedLinesForRows: (startRow, endRow) -> for row in [startRow..endRow] by 1 @@ -270,8 +283,7 @@ class TokenizedBuffer extends Model @tokenizedLines[bufferRow]?.ruleStack openScopesForRow: (bufferRow) -> - if bufferRow > 0 - precedingLine = @tokenizedLineForRow(bufferRow - 1) + if precedingLine = @tokenizedLineForRow(bufferRow - 1) @scopesFromTags(precedingLine.openScopes, precedingLine.tags) else [] From 00f4c7b282827345096077c9bb6eeb5a0997b1c2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 08:50:25 +0200 Subject: [PATCH 03/11] Use `TokenizedBuffer.prototype.tokenizedLineForRow` conservatively Since this method will now construct a placeholder line, we want to use it only where necessary to keep memory footprint to a minimum. --- spec/token-iterator-spec.coffee | 2 +- spec/tokenized-buffer-spec.coffee | 114 +++++++++++++++--------------- src/text-editor.coffee | 2 +- src/tokenized-buffer.coffee | 36 +++++----- 4 files changed, 79 insertions(+), 75 deletions(-) diff --git a/spec/token-iterator-spec.coffee b/spec/token-iterator-spec.coffee index f876d30d1..6ae01cd30 100644 --- a/spec/token-iterator-spec.coffee +++ b/spec/token-iterator-spec.coffee @@ -29,7 +29,7 @@ describe "TokenIterator", -> }) tokenizedBuffer.setGrammar(grammar) - tokenIterator = tokenizedBuffer.tokenizedLineForRow(1).getTokenIterator() + tokenIterator = tokenizedBuffer.tokenizedLines[1].getTokenIterator() tokenIterator.next() expect(tokenIterator.getBufferStart()).toBe 0 diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index cbbaba9d5..a21db2fe3 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -91,28 +91,28 @@ describe "TokenizedBuffer", -> describe "on construction", -> it "tokenizes lines chunk at a time in the background", -> - line0 = tokenizedBuffer.tokenizedLineForRow(0) + line0 = tokenizedBuffer.tokenizedLines[0] expect(line0).toBe(undefined) - line11 = tokenizedBuffer.tokenizedLineForRow(11) + line11 = tokenizedBuffer.tokenizedLines[11] expect(line11).toBe(undefined) # tokenize chunk 1 advanceClock() - expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(5)).toBe(undefined) + expect(tokenizedBuffer.tokenizedLines[0].ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[4].ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[5]).toBe(undefined) # tokenize chunk 2 advanceClock() - expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(9).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(10)).toBe(undefined) + expect(tokenizedBuffer.tokenizedLines[5].ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[9].ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[10]).toBe(undefined) # tokenize last chunk advanceClock() - expect(tokenizedBuffer.tokenizedLineForRow(10).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(12).ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[10].ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[12].ruleStack?).toBeTruthy() describe "when the buffer is partially tokenized", -> beforeEach -> @@ -149,8 +149,8 @@ describe "TokenizedBuffer", -> it "does not attempt to tokenize the lines in the change, and preserves the existing invalid row", -> expect(tokenizedBuffer.firstInvalidRow()).toBe 5 buffer.setTextInRange([[6, 0], [7, 0]], "\n\n\n") - expect(tokenizedBuffer.tokenizedLineForRow(6)).toBeFalsy() - expect(tokenizedBuffer.tokenizedLineForRow(7)).toBeFalsy() + expect(tokenizedBuffer.tokenizedLines[6]).toBeUndefined() + expect(tokenizedBuffer.tokenizedLines[7]).toBeUndefined() expect(tokenizedBuffer.firstInvalidRow()).toBe 5 describe "when the buffer is fully tokenized", -> @@ -162,101 +162,101 @@ describe "TokenizedBuffer", -> it "updates tokens to reflect the change", -> buffer.setTextInRange([[0, 0], [2, 0]], "foo()\n7\n") - expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js']) - expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.decimal.js']) + expect(tokenizedBuffer.tokenizedLines[0].tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js']) + expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.decimal.js']) # line 2 is unchanged - expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js']) + expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js']) describe "when the change invalidates the tokenization of subsequent lines", -> it "schedules the invalidated lines to be tokenized in the background", -> buffer.insert([5, 30], '/* */') buffer.insert([2, 0], '/*') - expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js'] + expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js'] advanceClock() - expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] - expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] - expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] it "resumes highlighting with the state of the previous line", -> buffer.insert([0, 0], '/*') buffer.insert([5, 0], '*/') buffer.insert([1, 0], 'var ') - expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[1].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] describe "when lines are both updated and removed", -> it "updates tokens to reflect the change", -> buffer.setTextInRange([[1, 0], [3, 0]], "foo()") # previous line 0 remains - expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual(value: 'var', scopes: ['source.js', 'storage.type.var.js']) + expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual(value: 'var', scopes: ['source.js', 'storage.type.var.js']) # previous line 3 should be combined with input to form line 1 - expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) - expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']) + expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) + expect(tokenizedBuffer.tokenizedLines[1].tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']) # lines below deleted regions should be shifted upward - expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js']) - expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[1]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']) - expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[1]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.comparison.js']) + expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js']) + expect(tokenizedBuffer.tokenizedLines[3].tokens[1]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']) + expect(tokenizedBuffer.tokenizedLines[4].tokens[1]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.comparison.js']) describe "when the change invalidates the tokenization of subsequent lines", -> it "schedules the invalidated lines to be tokenized in the background", -> buffer.insert([5, 30], '/* */') buffer.setTextInRange([[2, 0], [3, 0]], '/*') - expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js'] - expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js'] + expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js'] + expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js'] advanceClock() - expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] - expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] describe "when lines are both updated and inserted", -> it "updates tokens to reflect the change", -> buffer.setTextInRange([[1, 0], [2, 0]], "foo()\nbar()\nbaz()\nquux()") # previous line 0 remains - expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual( value: 'var', scopes: ['source.js', 'storage.type.var.js']) + expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual( value: 'var', scopes: ['source.js', 'storage.type.var.js']) # 3 new lines inserted - expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) - expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0]).toEqual(value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) - expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0]).toEqual(value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) + expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual(value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) + expect(tokenizedBuffer.tokenizedLines[2].tokens[0]).toEqual(value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) + expect(tokenizedBuffer.tokenizedLines[3].tokens[0]).toEqual(value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) # previous line 2 is joined with quux() on line 4 - expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0]).toEqual(value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) - expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js']) + expect(tokenizedBuffer.tokenizedLines[4].tokens[0]).toEqual(value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']) + expect(tokenizedBuffer.tokenizedLines[4].tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js']) # previous line 3 is pushed down to become line 5 - expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[3]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']) + expect(tokenizedBuffer.tokenizedLines[5].tokens[3]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']) describe "when the change invalidates the tokenization of subsequent lines", -> it "schedules the invalidated lines to be tokenized in the background", -> buffer.insert([5, 30], '/* */') buffer.insert([2, 0], '/*\nabcde\nabcder') - expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js'] - expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] - expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] - expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js'] + expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js'] + expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js'] advanceClock() # tokenize invalidated lines in background - expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] - expect(tokenizedBuffer.tokenizedLineForRow(6).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] - expect(tokenizedBuffer.tokenizedLineForRow(7).tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] - expect(tokenizedBuffer.tokenizedLineForRow(8).tokens[0].scopes).not.toBe ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[6].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[7].tokens[0].scopes).toEqual ['source.js', 'comment.block.js'] + expect(tokenizedBuffer.tokenizedLines[8].tokens[0].scopes).not.toBe ['source.js', 'comment.block.js'] describe "when there is an insertion that is larger than the chunk size", -> it "tokenizes the initial chunk synchronously, then tokenizes the remaining lines in the background", -> commentBlock = _.multiplyString("// a comment\n", tokenizedBuffer.chunkSize + 2) buffer.insert([0, 0], commentBlock) - expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(5)).toBeFalsy() + expect(tokenizedBuffer.tokenizedLines[0].ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[4].ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined() advanceClock() - expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[5].ruleStack?).toBeTruthy() + expect(tokenizedBuffer.tokenizedLines[6].ruleStack?).toBeTruthy() it "does not break out soft tabs across a scope boundary", -> waitsForPromise -> @@ -366,7 +366,7 @@ describe "TokenizedBuffer", -> tokenizedBuffer.setGrammar(atom.grammars.selectGrammar('test.erb')) fullyTokenize(tokenizedBuffer) - {tokens} = tokenizedBuffer.tokenizedLineForRow(0) + {tokens} = tokenizedBuffer.tokenizedLines[0] expect(tokens[0]).toEqual value: "
", scopes: ["text.html.ruby"] waitsForPromise -> @@ -374,7 +374,7 @@ describe "TokenizedBuffer", -> runs -> fullyTokenize(tokenizedBuffer) - {tokens} = tokenizedBuffer.tokenizedLineForRow(0) + {tokens} = tokenizedBuffer.tokenizedLines[0] expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby", "meta.tag.block.any.html", "punctuation.definition.tag.begin.html"] describe ".tokenForPosition(position)", -> @@ -406,7 +406,7 @@ describe "TokenizedBuffer", -> describe "when the selector does not match the token at the position", -> it "returns a falsy value", -> - expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.bogus', [0, 1])).toBeFalsy() + expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.bogus', [0, 1])).toBeUndefined() describe "when the selector matches a single token at the position", -> it "returns the range covered by the token", -> @@ -466,7 +466,7 @@ describe "TokenizedBuffer", -> buffer.insert([12, 0], ' ') expect(tokenizedBuffer.indentLevelForRow(13)).toBe 2 - expect(tokenizedBuffer.tokenizedLineForRow(14)).not.toBeDefined() + expect(tokenizedBuffer.tokenizedLines[14]).not.toBeDefined() it "updates the indentLevel of empty lines surrounding a change that inserts lines", -> buffer.insert([7, 0], '\n\n') @@ -585,9 +585,9 @@ describe "TokenizedBuffer", -> expect(tokenizeCallback.callCount).toBe 1 expect(atom.grammars.nullGrammar.tokenizeLine.callCount).toBe 0 - expect(tokenizedBuffer.tokenizedLineForRow(0)).toBeFalsy() - expect(tokenizedBuffer.tokenizedLineForRow(1)).toBeFalsy() - expect(tokenizedBuffer.tokenizedLineForRow(2)).toBeFalsy() + expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined() + expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined() + expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined() describe "text decoration layer API", -> describe "iterator", -> diff --git a/src/text-editor.coffee b/src/text-editor.coffee index ca1f1a938..50b2e6f96 100644 --- a/src/text-editor.coffee +++ b/src/text-editor.coffee @@ -2868,7 +2868,7 @@ class TextEditor extends Model # whitespace. usesSoftTabs: -> for bufferRow in [0..@buffer.getLastRow()] - continue if @tokenizedBuffer.tokenizedLineForRow(bufferRow)?.isComment() + continue if @tokenizedBuffer.tokenizedLines[bufferRow]?.isComment() line = @buffer.lineForRow(bufferRow) return true if line[0] is ' ' diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index 23b73dfb1..ff2d72019 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -211,30 +211,34 @@ class TokenizedBuffer extends Model # Returns a {Boolean} indicating whether the given buffer row starts # a a foldable row range due to the code's indentation patterns. isFoldableCodeAtRow: (row) -> - tokenizedLine = @tokenizedLineForRow(row) - return false unless tokenizedLine? - - return false if @buffer.isRowBlank(row) or tokenizedLine.isComment() - nextRow = @buffer.nextNonBlankRow(row) - return false unless nextRow? - - @indentLevelForRow(nextRow) > @indentLevelForRow(row) + if 0 <= row <= @buffer.getLastRow() + nextRow = @buffer.nextNonBlankRow(row) + tokenizedLine = @tokenizedLines[row] + if @buffer.isRowBlank(row) or tokenizedLine?.isComment() or not nextRow? + false + else + @indentLevelForRow(nextRow) > @indentLevelForRow(row) + else + false isFoldableCommentAtRow: (row) -> previousRow = row - 1 nextRow = row + 1 - return false if nextRow > @buffer.getLastRow() - - (not @tokenizedLineForRow(previousRow)?.isComment()) and - @tokenizedLineForRow(row)?.isComment() and - @tokenizedLineForRow(nextRow)?.isComment() + if nextRow > @buffer.getLastRow() + false + else + Boolean( + not (@tokenizedLines[previousRow]?.isComment()) and + @tokenizedLines[row]?.isComment() and + @tokenizedLines[nextRow]?.isComment() + ) buildTokenizedLinesForRows: (startRow, endRow, startingStack, startingopenScopes) -> ruleStack = startingStack openScopes = startingopenScopes stopTokenizingAt = startRow + @chunkSize tokenizedLines = for row in [startRow..endRow] by 1 - if row < stopTokenizingAt + if (not @firstInvalidRow()? or row < @firstInvalidRow()) and row < stopTokenizingAt tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes) ruleStack = tokenizedLine.ruleStack openScopes = @scopesFromTags(openScopes, tokenizedLine.tags) @@ -283,7 +287,7 @@ class TokenizedBuffer extends Model @tokenizedLines[bufferRow]?.ruleStack openScopesForRow: (bufferRow) -> - if precedingLine = @tokenizedLineForRow(bufferRow - 1) + if precedingLine = @tokenizedLines[bufferRow - 1] @scopesFromTags(precedingLine.openScopes, precedingLine.tags) else [] @@ -438,7 +442,7 @@ class TokenizedBuffer extends Model logLines: (start=0, end=@buffer.getLastRow()) -> for row in [start..end] - line = @tokenizedLineForRow(row).text + line = @tokenizedLines[row].text console.log row, line, line.length return From d3882c165f2d54dbdb60ed036ddf2bd01f78f885 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 12:04:54 +0200 Subject: [PATCH 04/11] :art: --- spec/tokenized-buffer-spec.coffee | 22 +++++++++++++--------- src/null-grammar.js | 4 ++-- src/tokenized-buffer.coffee | 19 ++++++++++--------- 3 files changed, 25 insertions(+), 20 deletions(-) diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index a21db2fe3..16fc4849c 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -1,3 +1,4 @@ +NullGrammar = require '../src/null-grammar' TokenizedBuffer = require '../src/tokenized-buffer' {Point} = TextBuffer = require 'text-buffer' _ = require 'underscore-plus' @@ -568,26 +569,29 @@ describe "TokenizedBuffer", -> expect(tokenizedBuffer.isFoldableAtRow(8)).toBe false describe "when the buffer is configured with the null grammar", -> - it "uses the placeholder tokens and does not actually tokenize using the grammar", -> - spyOn(atom.grammars.nullGrammar, 'tokenizeLine').andCallThrough() + it "does not actually tokenize using the grammar", -> + spyOn(NullGrammar, 'tokenizeLine').andCallThrough() buffer = atom.project.bufferForPathSync('sample.will-use-the-null-grammar') buffer.setText('a\nb\nc') - tokenizedBuffer = new TokenizedBuffer({ buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, + assert: atom.assert, tabLength: 2 }) tokenizeCallback = jasmine.createSpy('onDidTokenize') tokenizedBuffer.onDidTokenize(tokenizeCallback) - fullyTokenize(tokenizedBuffer) - - expect(tokenizeCallback.callCount).toBe 1 - expect(atom.grammars.nullGrammar.tokenizeLine.callCount).toBe 0 - expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined() expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined() expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined() + expect(tokenizeCallback.callCount).toBe(0) + expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled() + + fullyTokenize(tokenizedBuffer) + expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined() + expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined() + expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined() + expect(tokenizeCallback.callCount).toBe(0) + expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled() describe "text decoration layer API", -> describe "iterator", -> diff --git a/src/null-grammar.js b/src/null-grammar.js index 01841346e..fe9c3889e 100644 --- a/src/null-grammar.js +++ b/src/null-grammar.js @@ -2,7 +2,7 @@ import {Disposable} from 'event-kit' -export default Object.freeze({ +export default { name: 'Null Grammar', scopeName: 'text.plain.null-grammar', scopeForId (id) { @@ -35,6 +35,6 @@ export default Object.freeze({ onDidUpdate (callback) { return new Disposable(noop) } -}) +} function noop () {} diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index ff2d72019..20e79d28a 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -94,11 +94,13 @@ class TokenizedBuffer extends Model false retokenizeLines: -> - lastRow = @buffer.getLastRow() @fullyTokenized = false - @tokenizedLines = new Array(lastRow + 1) + @tokenizedLines = new Array(@buffer.getLineCount()) @invalidRows = [] - @invalidateRow(0) + if @largeFileMode or @grammar.name is 'Null Grammar' + @markTokenizationComplete() + else + @invalidateRow(0) setVisible: (@visible) -> @tokenizeInBackground() if @visible @@ -167,11 +169,10 @@ class TokenizedBuffer extends Model return invalidateRow: (row) -> - return if @largeFileMode - - @invalidRows.push(row) - @invalidRows.sort (a, b) -> a - b - @tokenizeInBackground() + if @grammar.name isnt 'Null Grammar' and not @largeFileMode + @invalidRows.push(row) + @invalidRows.sort (a, b) -> a - b + @tokenizeInBackground() updateInvalidRows: (start, end, delta) -> @invalidRows = @invalidRows.map (row) -> @@ -238,7 +239,7 @@ class TokenizedBuffer extends Model openScopes = startingopenScopes stopTokenizingAt = startRow + @chunkSize tokenizedLines = for row in [startRow..endRow] by 1 - if (not @firstInvalidRow()? or row < @firstInvalidRow()) and row < stopTokenizingAt + if (ruleStack or row is 0) and row < stopTokenizingAt tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes) ruleStack = tokenizedLine.ruleStack openScopes = @scopesFromTags(openScopes, tokenizedLine.tags) From e317d7d3259a7e2da3b9e4168ed868079c481fee Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 12:10:24 +0200 Subject: [PATCH 05/11] Clean up tests --- spec/tokenized-buffer-spec.coffee | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index 16fc4849c..e8470d1c1 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -93,22 +93,22 @@ describe "TokenizedBuffer", -> describe "on construction", -> it "tokenizes lines chunk at a time in the background", -> line0 = tokenizedBuffer.tokenizedLines[0] - expect(line0).toBe(undefined) + expect(line0).toBeUndefined() line11 = tokenizedBuffer.tokenizedLines[11] - expect(line11).toBe(undefined) + expect(line11).toBeUndefined() # tokenize chunk 1 advanceClock() expect(tokenizedBuffer.tokenizedLines[0].ruleStack?).toBeTruthy() expect(tokenizedBuffer.tokenizedLines[4].ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLines[5]).toBe(undefined) + expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined() # tokenize chunk 2 advanceClock() expect(tokenizedBuffer.tokenizedLines[5].ruleStack?).toBeTruthy() expect(tokenizedBuffer.tokenizedLines[9].ruleStack?).toBeTruthy() - expect(tokenizedBuffer.tokenizedLines[10]).toBe(undefined) + expect(tokenizedBuffer.tokenizedLines[10]).toBeUndefined() # tokenize last chunk advanceClock() From ea80483cbe6e97a668f720f42f9538267bc52d7f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 12:16:17 +0200 Subject: [PATCH 06/11] Delete `TokenizedBuffer.prototype.buildPlaceholderTokenizedLinesForRows` --- src/tokenized-buffer.coffee | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index 20e79d28a..ba40af58a 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -120,7 +120,7 @@ class TokenizedBuffer extends Model tokenizeNextChunk: -> # Short circuit null grammar which can just use the placeholder tokens if @grammar.name is 'Null Grammar' and @firstInvalidRow()? - @tokenizedLines = @buildPlaceholderTokenizedLinesForRows(0, @buffer.getLastRow()) + @tokenizedLines = new Array(@buffer.getLineCount()) @invalidRows = [] @markTokenizationComplete() return @@ -194,7 +194,8 @@ class TokenizedBuffer extends Model @updateInvalidRows(start, end, delta) previousEndStack = @stackForRow(end) # used in spill detection below if @largeFileMode or @grammar.name is 'Null Grammar' - newTokenizedLines = @buildPlaceholderTokenizedLinesForRows(start, end + delta) + lineCount = ((end + delta) - start) + 1 + newTokenizedLines = new Array(lineCount) else newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start)) _.spliceWithArray(@tokenizedLines, start, end - start + 1, newTokenizedLines) @@ -253,9 +254,6 @@ class TokenizedBuffer extends Model tokenizedLines - buildPlaceholderTokenizedLinesForRows: (startRow, endRow) -> - new Array(endRow - startRow + 1) - buildTokenizedLineForRow: (row, ruleStack, openScopes) -> @buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes) From 633e68f4d5a7518b75b61a97e2ca449e779e584d Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 12:18:58 +0200 Subject: [PATCH 07/11] Remove null guard in `scopeDescriptorForPosition` --- src/tokenized-buffer.coffee | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index ba40af58a..6de370cd5 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -357,18 +357,16 @@ class TokenizedBuffer extends Model scopeDescriptorForPosition: (position) -> {row, column} = @buffer.clipPosition(Point.fromObject(position)) - if iterator = @tokenizedLineForRow(row)?.getTokenIterator() - while iterator.next() - if iterator.getBufferEnd() > column - scopes = iterator.getScopes() - break - - # rebuild scope of last token if we iterated off the end - unless scopes? + iterator = @tokenizedLineForRow(row).getTokenIterator() + while iterator.next() + if iterator.getBufferEnd() > column scopes = iterator.getScopes() - scopes.push(iterator.getScopeEnds().reverse()...) - else - scopes = [] + break + + # rebuild scope of last token if we iterated off the end + unless scopes? + scopes = iterator.getScopes() + scopes.push(iterator.getScopeEnds().reverse()...) new ScopeDescriptor({scopes}) From 2246072ac93b8e797d7f5bcc06faddfa1f3c41f5 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 12:20:32 +0200 Subject: [PATCH 08/11] Restore line order in `TokenizedBufferIterator.prototype.seek` --- src/tokenized-buffer-iterator.coffee | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/tokenized-buffer-iterator.coffee b/src/tokenized-buffer-iterator.coffee index 90e29fdfe..23b72d5a9 100644 --- a/src/tokenized-buffer-iterator.coffee +++ b/src/tokenized-buffer-iterator.coffee @@ -13,11 +13,10 @@ class TokenizedBufferIterator @tagIndex = null currentLine = @tokenizedBuffer.tokenizedLineForRow(position.row) - @currentLineLength = currentLine.text.length - @currentLineOpenTags = currentLine.openScopes @currentTags = currentLine.tags + @currentLineOpenTags = currentLine.openScopes + @currentLineLength = currentLine.text.length @containingTags = @currentLineOpenTags.map (id) => @tokenizedBuffer.grammar.scopeForId(id) - currentColumn = 0 for tag, index in @currentTags From d393cba75de055f618db76e8df50afa915cb2f8d Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 12:28:30 +0200 Subject: [PATCH 09/11] Simplify on-demand placeholder line creation and add test coverage --- spec/tokenized-buffer-spec.coffee | 36 +++++++++++++++++++++++++++++++ src/language-mode.coffee | 3 --- src/tokenized-buffer.coffee | 8 +------ 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index e8470d1c1..f2b487e85 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -568,6 +568,42 @@ describe "TokenizedBuffer", -> expect(tokenizedBuffer.isFoldableAtRow(7)).toBe false expect(tokenizedBuffer.isFoldableAtRow(8)).toBe false + describe "::tokenizedLineForRow(row)", -> + it "returns the tokenized line for a row, or a placeholder line if it hasn't been tokenized yet", -> + buffer = atom.project.bufferForPathSync('sample.js') + grammar = atom.grammars.grammarForScopeName('source.js') + tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2}) + line0 = buffer.lineForRow(0) + + jsScopeStartId = grammar.startIdForScope(grammar.scopeName) + jsScopeEndId = grammar.endIdForScope(grammar.scopeName) + startTokenizing(tokenizedBuffer) + expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined() + expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0) + expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([jsScopeStartId, line0.length, jsScopeEndId]) + advanceClock(1) + expect(tokenizedBuffer.tokenizedLines[0]).not.toBeUndefined() + expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0) + expect(tokenizedBuffer.tokenizedLineForRow(0).tags).not.toEqual([jsScopeStartId, line0.length, jsScopeEndId]) + + nullScopeStartId = NullGrammar.startIdForScope(NullGrammar.scopeName) + nullScopeEndId = NullGrammar.endIdForScope(NullGrammar.scopeName) + tokenizedBuffer.setGrammar(NullGrammar) + startTokenizing(tokenizedBuffer) + expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined() + expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0) + expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId]) + advanceClock(1) + expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0) + expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId]) + + it "returns undefined if the requested row is outside the buffer range", -> + buffer = atom.project.bufferForPathSync('sample.js') + grammar = atom.grammars.grammarForScopeName('source.js') + tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2}) + fullyTokenize(tokenizedBuffer) + expect(tokenizedBuffer.tokenizedLineForRow(999)).toBeUndefined() + describe "when the buffer is configured with the null grammar", -> it "does not actually tokenize using the grammar", -> spyOn(NullGrammar, 'tokenizeLine').andCallThrough() diff --git a/src/language-mode.coffee b/src/language-mode.coffee index 20d54ae28..bb9f339c4 100644 --- a/src/language-mode.coffee +++ b/src/language-mode.coffee @@ -247,9 +247,6 @@ class LanguageMode suggestedIndentForLineAtBufferRow: (bufferRow, line, options) -> tokenizedLine = @editor.tokenizedBuffer.buildTokenizedLineForRowWithText(bufferRow, line) - iterator = tokenizedLine.getTokenIterator() - iterator.next() - scopeDescriptor = new ScopeDescriptor(scopes: iterator.getScopes()) @suggestedIndentForTokenizedLineAtBufferRow(bufferRow, line, tokenizedLine, options) suggestedIndentForTokenizedLineAtBufferRow: (bufferRow, line, tokenizedLine, options) -> diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index 6de370cd5..11a15c575 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -269,14 +269,8 @@ class TokenizedBuffer extends Model else text = @buffer.lineForRow(bufferRow) lineEnding = @buffer.lineEndingForRow(bufferRow) - tags = [ - @grammar.startIdForScope(@grammar.scopeName), - text.length, - @grammar.endIdForScope(@grammar.scopeName) - ] + tags = [@grammar.startIdForScope(@grammar.scopeName), text.length, @grammar.endIdForScope(@grammar.scopeName)] @tokenizedLines[bufferRow] = new TokenizedLine({openScopes: [], text, tags, lineEnding, @tokenIterator}) - else - null tokenizedLinesForRows: (startRow, endRow) -> for row in [startRow..endRow] by 1 From 503f31ea6cf9b9e3b7c0ac615d7255e58a8809d6 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 13:11:34 +0200 Subject: [PATCH 10/11] Delete unnecessary dependencies in `TokenizedBuffer` specs --- spec/tokenized-buffer-spec.coffee | 94 ++++++------------------------- 1 file changed, 16 insertions(+), 78 deletions(-) diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index f2b487e85..6558d42b4 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -33,15 +33,8 @@ describe "TokenizedBuffer", -> atom.packages.activatePackage('language-coffee-script') it "deserializes it searching among the buffers in the current project", -> - tokenizedBufferA = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBufferB = TokenizedBuffer.deserialize( - JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), - atom - ) - + tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2}) + tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom) expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer) describe "when the underlying buffer has no path", -> @@ -49,25 +42,14 @@ describe "TokenizedBuffer", -> buffer = atom.project.bufferForPathSync(null) it "deserializes it searching among the buffers in the current project", -> - tokenizedBufferA = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBufferB = TokenizedBuffer.deserialize( - JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), - atom - ) - + tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2}) + tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom) expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer) describe "when the buffer is destroyed", -> beforeEach -> buffer = atom.project.bufferForPathSync('sample.js') - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js')) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2}) startTokenizing(tokenizedBuffer) it "stops tokenization", -> @@ -79,11 +61,7 @@ describe "TokenizedBuffer", -> describe "when the buffer contains soft-tabs", -> beforeEach -> buffer = atom.project.bufferForPathSync('sample.js') - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js')) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2}) startTokenizing(tokenizedBuffer) afterEach -> @@ -282,11 +260,7 @@ describe "TokenizedBuffer", -> runs -> buffer = atom.project.bufferForPathSync('sample-with-tabs.coffee') - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.coffee')) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2}) startTokenizing(tokenizedBuffer) afterEach -> @@ -350,7 +324,6 @@ describe "TokenizedBuffer", -> expect(tokenizedHandler.callCount).toBe(1) it "retokenizes the buffer", -> - waitsForPromise -> atom.packages.activatePackage('language-ruby-on-rails') @@ -360,11 +333,7 @@ describe "TokenizedBuffer", -> runs -> buffer = atom.project.bufferForPathSync() buffer.setText "
<%= User.find(2).full_name %>
" - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.selectGrammar('test.erb')) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.selectGrammar('test.erb'), tabLength: 2}) fullyTokenize(tokenizedBuffer) {tokens} = tokenizedBuffer.tokenizedLines[0] @@ -385,11 +354,7 @@ describe "TokenizedBuffer", -> it "returns the correct token (regression)", -> buffer = atom.project.bufferForPathSync('sample.js') - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js')) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2}) fullyTokenize(tokenizedBuffer) expect(tokenizedBuffer.tokenForPosition([1, 0]).scopes).toEqual ["source.js"] expect(tokenizedBuffer.tokenForPosition([1, 1]).scopes).toEqual ["source.js"] @@ -398,11 +363,7 @@ describe "TokenizedBuffer", -> describe ".bufferRangeForScopeAtPosition(selector, position)", -> beforeEach -> buffer = atom.project.bufferForPathSync('sample.js') - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js')) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2}) fullyTokenize(tokenizedBuffer) describe "when the selector does not match the token at the position", -> @@ -421,11 +382,7 @@ describe "TokenizedBuffer", -> describe ".indentLevelForRow(row)", -> beforeEach -> buffer = atom.project.bufferForPathSync('sample.js') - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js')) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2}) fullyTokenize(tokenizedBuffer) describe "when the line is non-empty", -> @@ -501,11 +458,7 @@ describe "TokenizedBuffer", -> buffer = atom.project.bufferForPathSync('sample.js') buffer.insert [10, 0], " // multi-line\n // comment\n // block\n" buffer.insert [0, 0], "// multi-line\n// comment\n// block\n" - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.grammarForScopeName('source.js')) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2}) fullyTokenize(tokenizedBuffer) it "includes the first line of multi-line comments", -> @@ -609,10 +562,7 @@ describe "TokenizedBuffer", -> spyOn(NullGrammar, 'tokenizeLine').andCallThrough() buffer = atom.project.bufferForPathSync('sample.will-use-the-null-grammar') buffer.setText('a\nb\nc') - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2 - }) + tokenizedBuffer = new TokenizedBuffer({buffer, tabLength: 2}) tokenizeCallback = jasmine.createSpy('onDidTokenize') tokenizedBuffer.onDidTokenize(tokenizeCallback) @@ -633,11 +583,7 @@ describe "TokenizedBuffer", -> describe "iterator", -> it "iterates over the syntactic scope boundaries", -> buffer = new TextBuffer(text: "var foo = 1 /*\nhello*/var bar = 2\n") - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.selectGrammar(".js")) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName("source.js"), tabLength: 2}) fullyTokenize(tokenizedBuffer) iterator = tokenizedBuffer.buildIterator() @@ -689,11 +635,7 @@ describe "TokenizedBuffer", -> runs -> buffer = new TextBuffer(text: "# hello\n# world") - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(atom.grammars.selectGrammar(".coffee")) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName("source.coffee"), tabLength: 2}) fullyTokenize(tokenizedBuffer) iterator = tokenizedBuffer.buildIterator() @@ -722,11 +664,7 @@ describe "TokenizedBuffer", -> }) buffer = new TextBuffer(text: 'start x\nend x\nx') - tokenizedBuffer = new TokenizedBuffer({ - buffer, grammarRegistry: atom.grammars, packageManager: atom.packages, - assert: atom.assert, tabLength: 2, - }) - tokenizedBuffer.setGrammar(grammar) + tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2}) fullyTokenize(tokenizedBuffer) iterator = tokenizedBuffer.buildIterator() From 1f210adad1e4bd7bf9483181578c020b35e831d6 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Oct 2016 18:41:56 +0200 Subject: [PATCH 11/11] Delete unused conditional in `tokenizeNextChunk` and `invalidateRow` Previously, for null grammar and large file mode, we were short circuiting the tokenization of the next chunk or the invalidation of a row. However, that is unnecessary because there is no additional chunk to process with null grammar or in large file mode. --- src/tokenized-buffer.coffee | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index 11a15c575..ce56e0388 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -103,7 +103,8 @@ class TokenizedBuffer extends Model @invalidateRow(0) setVisible: (@visible) -> - @tokenizeInBackground() if @visible + if @visible and @grammar.name isnt 'Null Grammar' and not @largeFileMode + @tokenizeInBackground() getTabLength: -> @tabLength @@ -118,13 +119,6 @@ class TokenizedBuffer extends Model @tokenizeNextChunk() if @isAlive() and @buffer.isAlive() tokenizeNextChunk: -> - # Short circuit null grammar which can just use the placeholder tokens - if @grammar.name is 'Null Grammar' and @firstInvalidRow()? - @tokenizedLines = new Array(@buffer.getLineCount()) - @invalidRows = [] - @markTokenizationComplete() - return - rowsRemaining = @chunkSize while @firstInvalidRow()? and rowsRemaining > 0 @@ -169,10 +163,9 @@ class TokenizedBuffer extends Model return invalidateRow: (row) -> - if @grammar.name isnt 'Null Grammar' and not @largeFileMode - @invalidRows.push(row) - @invalidRows.sort (a, b) -> a - b - @tokenizeInBackground() + @invalidRows.push(row) + @invalidRows.sort (a, b) -> a - b + @tokenizeInBackground() updateInvalidRows: (start, end, delta) -> @invalidRows = @invalidRows.map (row) -> @@ -190,19 +183,19 @@ class TokenizedBuffer extends Model start = oldRange.start.row end = oldRange.end.row delta = newRange.end.row - oldRange.end.row + oldLineCount = oldRange.end.row - oldRange.start.row + 1 + newLineCount = newRange.end.row - newRange.start.row + 1 @updateInvalidRows(start, end, delta) previousEndStack = @stackForRow(end) # used in spill detection below if @largeFileMode or @grammar.name is 'Null Grammar' - lineCount = ((end + delta) - start) + 1 - newTokenizedLines = new Array(lineCount) + _.spliceWithArray(@tokenizedLines, start, oldLineCount, new Array(newLineCount)) else newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start)) - _.spliceWithArray(@tokenizedLines, start, end - start + 1, newTokenizedLines) - - newEndStack = @stackForRow(end + delta) - if newEndStack and not _.isEqual(newEndStack, previousEndStack) - @invalidateRow(end + delta + 1) + _.spliceWithArray(@tokenizedLines, start, oldLineCount, newTokenizedLines) + newEndStack = @stackForRow(end + delta) + if newEndStack and not _.isEqual(newEndStack, previousEndStack) + @invalidateRow(end + delta + 1) isFoldableAtRow: (row) -> if @largeFileMode