🔥 💚 Fix TokenizedBuffer specs

This commit is contained in:
Antonio Scandurra
2016-04-05 12:19:45 +02:00
parent 80b956e996
commit 544b75c7b0
3 changed files with 43 additions and 352 deletions

View File

@@ -134,13 +134,10 @@ describe "TokenizedBuffer", ->
describe "on construction", ->
it "initially creates un-tokenized screen lines, then tokenizes lines chunk at a time in the background", ->
line0 = tokenizedBuffer.tokenizedLineForRow(0)
expect(line0.tokens.length).toBe 1
expect(line0.tokens[0]).toEqual(value: line0.text, scopes: ['source.js'])
expect(line0.tokens).toEqual([value: line0.text, scopes: ['source.js']])
line11 = tokenizedBuffer.tokenizedLineForRow(11)
expect(line11.tokens.length).toBe 2
expect(line11.tokens[0]).toEqual(value: " ", scopes: ['source.js'], isAtomic: true)
expect(line11.tokens[1]).toEqual(value: "return sort(Array.apply(this, arguments));", scopes: ['source.js'])
expect(line11.tokens).toEqual([value: " return sort(Array.apply(this, arguments));", scopes: ['source.js']])
# background tokenization has not begun
expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack).toBeUndefined()
@@ -236,7 +233,7 @@ describe "TokenizedBuffer", ->
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.decimal.js'])
# line 2 is unchanged
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -283,9 +280,9 @@ describe "TokenizedBuffer", ->
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
# lines below deleted regions should be shifted upward
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[4]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.comparison.js'])
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[1]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[1]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.comparison.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -331,7 +328,7 @@ describe "TokenizedBuffer", ->
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
# previous line 3 is pushed down to become line 5
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[4]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[3]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.assignment.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -377,32 +374,6 @@ describe "TokenizedBuffer", ->
expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeTruthy()
it "tokenizes leading whitespace based on the new tab length", ->
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].isAtomic).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].value).toBe " "
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[1].isAtomic).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[1].value).toBe " "
tokenizedBuffer.setTabLength(4)
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].isAtomic).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].value).toBe " "
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[1].isAtomic).toBeFalsy()
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[1].value).toBe " current "
it "does not tokenize whitespaces followed by combining characters as leading whitespace", ->
buffer.setText(" \u030b")
fullyTokenize(tokenizedBuffer)
{tokens} = tokenizedBuffer.tokenizedLineForRow(0)
expect(tokens[0].value).toBe " "
expect(tokens[0].hasLeadingWhitespace()).toBe true
expect(tokens[1].value).toBe " "
expect(tokens[1].hasLeadingWhitespace()).toBe true
expect(tokens[2].value).toBe " \u030b"
expect(tokens[2].hasLeadingWhitespace()).toBe false
it "does not break out soft tabs across a scope boundary", ->
waitsForPromise ->
atom.packages.activatePackage('language-gfm')
@@ -439,133 +410,6 @@ describe "TokenizedBuffer", ->
beforeEach ->
fullyTokenize(tokenizedBuffer)
it "renders each tab as its own atomic token with a value of size tabLength", ->
tabAsSpaces = _.multiplyString(' ', tokenizedBuffer.getTabLength())
screenLine0 = tokenizedBuffer.tokenizedLineForRow(0)
expect(screenLine0.text).toBe "# Econ 101#{tabAsSpaces}"
{tokens} = screenLine0
expect(tokens.length).toBe 4
expect(tokens[0].value).toBe "#"
expect(tokens[1].value).toBe " Econ 101"
expect(tokens[2].value).toBe tabAsSpaces
expect(tokens[2].scopes).toEqual tokens[1].scopes
expect(tokens[2].isAtomic).toBeTruthy()
expect(tokens[3].value).toBe ""
expect(tokenizedBuffer.tokenizedLineForRow(2).text).toBe "#{tabAsSpaces} buy()#{tabAsSpaces}while supply > demand"
it "aligns the hard tabs to the correct tab stop column", ->
buffer.setText """
1\t2 \t3\t4
12\t3 \t4\t5
123\t4 \t5\t6
"""
tokenizedBuffer.setTabLength(4)
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe "1 2 3 4"
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1].screenDelta).toBe 3
expect(tokenizedBuffer.tokenizedLineForRow(1).text).toBe "12 3 4 5"
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].screenDelta).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(2).text).toBe "123 4 5 6"
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].screenDelta).toBe 1
tokenizedBuffer.setTabLength(3)
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe "1 2 3 4"
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1].screenDelta).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(1).text).toBe "12 3 4 5"
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].screenDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).text).toBe "123 4 5 6"
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].screenDelta).toBe 3
tokenizedBuffer.setTabLength(2)
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe "1 2 3 4"
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1].screenDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(1).text).toBe "12 3 4 5"
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].screenDelta).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(2).text).toBe "123 4 5 6"
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].screenDelta).toBe 1
tokenizedBuffer.setTabLength(1)
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe "1 2 3 4"
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1].screenDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(1).text).toBe "12 3 4 5"
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].screenDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).text).toBe "123 4 5 6"
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].bufferDelta).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].screenDelta).toBe 1
describe "when the buffer contains UTF-8 surrogate pairs", ->
beforeEach ->
waitsForPromise ->
atom.packages.activatePackage('language-javascript')
runs ->
buffer = atom.project.bufferForPathSync 'sample-with-pairs.js'
buffer.setText """
'abc\uD835\uDF97def'
//\uD835\uDF97xyz
"""
tokenizedBuffer = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
fullyTokenize(tokenizedBuffer)
afterEach ->
tokenizedBuffer.destroy()
buffer.release()
it "renders each UTF-8 surrogate pair as its own atomic token", ->
screenLine0 = tokenizedBuffer.tokenizedLineForRow(0)
expect(screenLine0.text).toBe "'abc\uD835\uDF97def'"
{tokens} = screenLine0
expect(tokens.length).toBe 5
expect(tokens[0].value).toBe "'"
expect(tokens[1].value).toBe "abc"
expect(tokens[2].value).toBe "\uD835\uDF97"
expect(tokens[2].isAtomic).toBeTruthy()
expect(tokens[3].value).toBe "def"
expect(tokens[4].value).toBe "'"
screenLine1 = tokenizedBuffer.tokenizedLineForRow(1)
expect(screenLine1.text).toBe "//\uD835\uDF97xyz"
{tokens} = screenLine1
expect(tokens.length).toBe 4
expect(tokens[0].value).toBe '//'
expect(tokens[1].value).toBe '\uD835\uDF97'
expect(tokens[1].value).toBeTruthy()
expect(tokens[2].value).toBe 'xyz'
expect(tokens[3].value).toBe ''
describe "when the grammar is tokenized", ->
it "emits the `tokenized` event", ->
editor = null
@@ -683,132 +527,7 @@ describe "TokenizedBuffer", ->
it "returns the range covered by all contigous tokens (within a single line)", ->
expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.function', [1, 18])).toEqual [[1, 6], [1, 28]]
describe "when the editor.tabLength config value changes", ->
it "updates the tab length of the tokenized lines", ->
buffer = atom.project.bufferForPathSync('sample.js')
buffer.setText('\ttest')
tokenizedBuffer = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenForPosition([0, 0]).value).toBe ' '
atom.config.set('editor.tabLength', 6)
expect(tokenizedBuffer.tokenForPosition([0, 0]).value).toBe ' '
it "does not allow the tab length to be less than 1", ->
buffer = atom.project.bufferForPathSync('sample.js')
buffer.setText('\ttest')
tokenizedBuffer = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenForPosition([0, 0]).value).toBe ' '
atom.config.set('editor.tabLength', 1)
expect(tokenizedBuffer.tokenForPosition([0, 0]).value).toBe ' '
atom.config.set('editor.tabLength', 0)
expect(tokenizedBuffer.tokenForPosition([0, 0]).value).toBe ' '
describe "when the invisibles value changes", ->
beforeEach ->
it "updates the tokens with the appropriate invisible characters", ->
buffer = new TextBuffer(text: " \t a line with tabs\tand \tspaces \t ")
tokenizedBuffer = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
fullyTokenize(tokenizedBuffer)
atom.config.set("editor.showInvisibles", true)
atom.config.set("editor.invisibles", space: 'S', tab: 'T')
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe "SST Sa line with tabsTand T spacesSTS"
# Also needs to work for copies
expect(tokenizedBuffer.tokenizedLineForRow(0).copy().text).toBe "SST Sa line with tabsTand T spacesSTS"
it "assigns endOfLineInvisibles to tokenized lines", ->
buffer = new TextBuffer(text: "a line that ends in a carriage-return-line-feed \r\na line that ends in just a line-feed\na line with no ending")
tokenizedBuffer = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
atom.config.set('editor.showInvisibles', true)
atom.config.set("editor.invisibles", cr: 'R', eol: 'N')
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(0).endOfLineInvisibles).toEqual ['R', 'N']
expect(tokenizedBuffer.tokenizedLineForRow(1).endOfLineInvisibles).toEqual ['N']
# Lines ending in soft wraps get no invisibles
[left, right] = tokenizedBuffer.tokenizedLineForRow(0).softWrapAt(20)
expect(left.endOfLineInvisibles).toBe null
expect(right.endOfLineInvisibles).toEqual ['R', 'N']
atom.config.set("editor.invisibles", cr: 'R', eol: false)
expect(tokenizedBuffer.tokenizedLineForRow(0).endOfLineInvisibles).toEqual ['R']
expect(tokenizedBuffer.tokenizedLineForRow(1).endOfLineInvisibles).toEqual []
describe "leading and trailing whitespace", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
fullyTokenize(tokenizedBuffer)
it "assigns ::firstNonWhitespaceIndex on tokens that have leading whitespace", ->
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0].firstNonWhitespaceIndex).toBe null
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].firstNonWhitespaceIndex).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[1].firstNonWhitespaceIndex).toBe null
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].firstNonWhitespaceIndex).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[1].firstNonWhitespaceIndex).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2].firstNonWhitespaceIndex).toBe null
# The 4th token *has* leading whitespace, but isn't entirely whitespace
buffer.insert([5, 0], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[3].firstNonWhitespaceIndex).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[4].firstNonWhitespaceIndex).toBe null
# Lines that are *only* whitespace are not considered to have leading whitespace
buffer.insert([10, 0], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(10).tokens[0].firstNonWhitespaceIndex).toBe null
it "assigns ::firstTrailingWhitespaceIndex on tokens that have trailing whitespace", ->
buffer.insert([0, Infinity], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[11].firstTrailingWhitespaceIndex).toBe null
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[12].firstTrailingWhitespaceIndex).toBe 0
# The last token *has* trailing whitespace, but isn't entirely whitespace
buffer.setTextInRange([[2, 39], [2, 40]], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[14].firstTrailingWhitespaceIndex).toBe null
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[15].firstTrailingWhitespaceIndex).toBe 6
# Lines that are *only* whitespace are considered to have trailing whitespace
buffer.insert([10, 0], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(10).tokens[0].firstTrailingWhitespaceIndex).toBe 0
it "only marks trailing whitespace on the last segment of a soft-wrapped line", ->
buffer.insert([0, Infinity], ' ')
tokenizedLine = tokenizedBuffer.tokenizedLineForRow(0)
[segment1, segment2] = tokenizedLine.softWrapAt(16)
expect(segment1.tokens[5].value).toBe ' '
expect(segment1.tokens[5].firstTrailingWhitespaceIndex).toBe null
expect(segment2.tokens[6].value).toBe ' '
expect(segment2.tokens[6].firstTrailingWhitespaceIndex).toBe 0
it "sets leading and trailing whitespace correctly on a line with invisible characters that is copied", ->
buffer.setText(" \t a line with tabs\tand \tspaces \t ")
atom.config.set("editor.showInvisibles", true)
atom.config.set("editor.invisibles", space: 'S', tab: 'T')
fullyTokenize(tokenizedBuffer)
line = tokenizedBuffer.tokenizedLineForRow(0).copy()
expect(line.tokens[0].firstNonWhitespaceIndex).toBe 2
expect(line.tokens[line.tokens.length - 1].firstTrailingWhitespaceIndex).toBe 0
describe ".indentLevel on tokenized lines", ->
describe ".indentLevelForRow(row)", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({
@@ -818,43 +537,43 @@ describe "TokenizedBuffer", ->
describe "when the line is non-empty", ->
it "has an indent level based on the leading whitespace on the line", ->
expect(tokenizedBuffer.tokenizedLineForRow(0).indentLevel).toBe 0
expect(tokenizedBuffer.tokenizedLineForRow(1).indentLevel).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).indentLevel).toBe 2
expect(tokenizedBuffer.indentLevelForRow(0)).toBe 0
expect(tokenizedBuffer.indentLevelForRow(1)).toBe 1
expect(tokenizedBuffer.indentLevelForRow(2)).toBe 2
buffer.insert([2, 0], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(2).indentLevel).toBe 2.5
expect(tokenizedBuffer.indentLevelForRow(2)).toBe 2.5
describe "when the line is empty", ->
it "assumes the indentation level of the first non-empty line below or above if one exists", ->
buffer.insert([12, 0], ' ')
buffer.insert([12, Infinity], '\n\n')
expect(tokenizedBuffer.tokenizedLineForRow(13).indentLevel).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(14).indentLevel).toBe 2
expect(tokenizedBuffer.indentLevelForRow(13)).toBe 2
expect(tokenizedBuffer.indentLevelForRow(14)).toBe 2
buffer.insert([1, Infinity], '\n\n')
expect(tokenizedBuffer.tokenizedLineForRow(2).indentLevel).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(3).indentLevel).toBe 2
expect(tokenizedBuffer.indentLevelForRow(2)).toBe 2
expect(tokenizedBuffer.indentLevelForRow(3)).toBe 2
buffer.setText('\n\n\n')
expect(tokenizedBuffer.tokenizedLineForRow(1).indentLevel).toBe 0
expect(tokenizedBuffer.indentLevelForRow(1)).toBe 0
describe "when the changed lines are surrounded by whitespace-only lines", ->
it "updates the indentLevel of empty lines that precede the change", ->
expect(tokenizedBuffer.tokenizedLineForRow(12).indentLevel).toBe 0
expect(tokenizedBuffer.indentLevelForRow(12)).toBe 0
buffer.insert([12, 0], '\n')
buffer.insert([13, 0], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(12).indentLevel).toBe 1
expect(tokenizedBuffer.indentLevelForRow(12)).toBe 1
it "updates empty line indent guides when the empty line is the last line", ->
buffer.insert([12, 2], '\n')
# The newline and the tab need to be in two different operations to surface the bug
buffer.insert([12, 0], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(13).indentLevel).toBe 1
expect(tokenizedBuffer.indentLevelForRow(13)).toBe 1
buffer.insert([12, 0], ' ')
expect(tokenizedBuffer.tokenizedLineForRow(13).indentLevel).toBe 2
expect(tokenizedBuffer.indentLevelForRow(13)).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(14)).not.toBeDefined()
it "updates the indentLevel of empty lines surrounding a change that inserts lines", ->
@@ -862,24 +581,24 @@ describe "TokenizedBuffer", ->
buffer.insert([7, 0], '\n\n')
buffer.insert([5, 0], '\n\n')
expect(tokenizedBuffer.tokenizedLineForRow(5).indentLevel).toBe 3
expect(tokenizedBuffer.tokenizedLineForRow(6).indentLevel).toBe 3
expect(tokenizedBuffer.tokenizedLineForRow(9).indentLevel).toBe 3
expect(tokenizedBuffer.tokenizedLineForRow(10).indentLevel).toBe 3
expect(tokenizedBuffer.tokenizedLineForRow(11).indentLevel).toBe 2
expect(tokenizedBuffer.indentLevelForRow(5)).toBe 3
expect(tokenizedBuffer.indentLevelForRow(6)).toBe 3
expect(tokenizedBuffer.indentLevelForRow(9)).toBe 3
expect(tokenizedBuffer.indentLevelForRow(10)).toBe 3
expect(tokenizedBuffer.indentLevelForRow(11)).toBe 2
tokenizedBuffer.onDidChange changeHandler = jasmine.createSpy('changeHandler')
buffer.setTextInRange([[7, 0], [8, 65]], ' one\n two\n three\n four')
delete changeHandler.argsForCall[0][0].bufferChange
expect(changeHandler).toHaveBeenCalledWith(start: 5, end: 10, delta: 2)
expect(changeHandler).toHaveBeenCalledWith(start: 7, end: 8, delta: 2)
expect(tokenizedBuffer.tokenizedLineForRow(5).indentLevel).toBe 4
expect(tokenizedBuffer.tokenizedLineForRow(6).indentLevel).toBe 4
expect(tokenizedBuffer.tokenizedLineForRow(11).indentLevel).toBe 4
expect(tokenizedBuffer.tokenizedLineForRow(12).indentLevel).toBe 4
expect(tokenizedBuffer.tokenizedLineForRow(13).indentLevel).toBe 2
expect(tokenizedBuffer.indentLevelForRow(5)).toBe 4
expect(tokenizedBuffer.indentLevelForRow(6)).toBe 4
expect(tokenizedBuffer.indentLevelForRow(11)).toBe 4
expect(tokenizedBuffer.indentLevelForRow(12)).toBe 4
expect(tokenizedBuffer.indentLevelForRow(13)).toBe 2
it "updates the indentLevel of empty lines surrounding a change that removes lines", ->
# create some new lines
@@ -891,14 +610,14 @@ describe "TokenizedBuffer", ->
buffer.setTextInRange([[7, 0], [8, 65]], ' ok')
delete changeHandler.argsForCall[0][0].bufferChange
expect(changeHandler).toHaveBeenCalledWith(start: 5, end: 10, delta: -1)
expect(changeHandler).toHaveBeenCalledWith(start: 7, end: 8, delta: -1)
expect(tokenizedBuffer.tokenizedLineForRow(5).indentLevel).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(6).indentLevel).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(7).indentLevel).toBe 2 # new text
expect(tokenizedBuffer.tokenizedLineForRow(8).indentLevel).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(9).indentLevel).toBe 2
expect(tokenizedBuffer.tokenizedLineForRow(10).indentLevel).toBe 2 # }
expect(tokenizedBuffer.indentLevelForRow(5)).toBe 2
expect(tokenizedBuffer.indentLevelForRow(6)).toBe 2
expect(tokenizedBuffer.indentLevelForRow(7)).toBe 2 # new text
expect(tokenizedBuffer.indentLevelForRow(8)).toBe 2
expect(tokenizedBuffer.indentLevelForRow(9)).toBe 2
expect(tokenizedBuffer.indentLevelForRow(10)).toBe 2 # }
describe "::isFoldableAtRow(row)", ->
changes = null

View File

@@ -7,22 +7,10 @@ WhitespaceRegex = /\S/
module.exports =
class Token
value: null
hasPairedCharacter: false
scopes: null
isAtomic: null
isHardTab: null
firstNonWhitespaceIndex: null
firstTrailingWhitespaceIndex: null
hasInvisibleCharacters: false
constructor: (properties) ->
{@value, @scopes, @isAtomic, @isHardTab, @bufferDelta} = properties
{@hasInvisibleCharacters, @hasPairedCharacter, @isSoftWrapIndentation} = properties
@firstNonWhitespaceIndex = properties.firstNonWhitespaceIndex ? null
@firstTrailingWhitespaceIndex = properties.firstTrailingWhitespaceIndex ? null
@screenDelta = @value.length
@bufferDelta ?= @screenDelta
{@value, @scopes} = properties
isEqual: (other) ->
# TODO: scopes is deprecated. This is here for the sake of lang package tests
@@ -31,17 +19,8 @@ class Token
isBracket: ->
/^meta\.brace\b/.test(_.last(@scopes))
isOnlyWhitespace: ->
not WhitespaceRegex.test(@value)
matchesScopeSelector: (selector) ->
targetClasses = selector.replace(StartDotRegex, '').split('.')
_.any @scopes, (scope) ->
scopeClasses = scope.split('.')
_.isSubset(targetClasses, scopeClasses)
hasLeadingWhitespace: ->
@firstNonWhitespaceIndex? and @firstNonWhitespaceIndex > 0
hasTrailingWhitespace: ->
@firstTrailingWhitespaceIndex? and @firstTrailingWhitespaceIndex < @value.length

View File

@@ -7,10 +7,6 @@ idCounter = 1
module.exports =
class TokenizedLine
endOfLineInvisibles: null
lineIsWhitespaceOnly: false
firstNonWhitespaceIndex: 0
constructor: (properties) ->
@id = idCounter++
@@ -36,10 +32,10 @@ class TokenizedLine
@tokens[@tokenIndexAtBufferColumn(bufferColumn)]
tokenIndexAtBufferColumn: (bufferColumn) ->
delta = 0
column = 0
for token, index in @tokens
delta += token.bufferDelta
return index if delta > bufferColumn
column += token.value.length
return index if column > bufferColumn
index - 1
tokenStartColumnForBufferColumn: (bufferColumn) ->
@@ -65,9 +61,6 @@ class TokenizedLine
break
@isCommentLine
isOnlyWhitespace: ->
@lineIsWhitespaceOnly
tokenAtIndex: (index) ->
@tokens[index]