mirror of
https://github.com/atom/atom.git
synced 2026-01-23 05:48:10 -05:00
Get TokenizedBuffer tests passing
This commit is contained in:
@@ -8,9 +8,10 @@ const {it, fit, ffit, fffit, beforeEach, afterEach} = require('./async-spec-help
|
||||
const {ScopedSettingsDelegate} = require('../src/text-editor-registry')
|
||||
|
||||
describe('TokenizedBuffer', () => {
|
||||
let tokenizedBuffer, buffer
|
||||
let tokenizedBuffer, buffer, config
|
||||
|
||||
beforeEach(async () => {
|
||||
config = atom.config
|
||||
// enable async tokenization
|
||||
TokenizedBuffer.prototype.chunkSize = 5
|
||||
jasmine.unspy(TokenizedBuffer.prototype, 'tokenizeInBackground')
|
||||
@@ -61,36 +62,11 @@ describe('TokenizedBuffer', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('serialization', () => {
|
||||
describe('when the underlying buffer has a path', () => {
|
||||
beforeEach(async () => {
|
||||
buffer = atom.project.bufferForPathSync('sample.js')
|
||||
await atom.packages.activatePackage('language-coffee-script')
|
||||
})
|
||||
|
||||
it('deserializes it searching among the buffers in the current project', () => {
|
||||
const tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2})
|
||||
const tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom)
|
||||
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the underlying buffer has no path', () => {
|
||||
beforeEach(() => buffer = atom.project.bufferForPathSync(null))
|
||||
|
||||
it('deserializes it searching among the buffers in the current project', () => {
|
||||
const tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2})
|
||||
const tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom)
|
||||
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('tokenizing', () => {
|
||||
describe('when the buffer is destroyed', () => {
|
||||
beforeEach(() => {
|
||||
buffer = atom.project.bufferForPathSync('sample.js')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, config, grammar: atom.grammars.grammarForScopeName('source.js')})
|
||||
startTokenizing(tokenizedBuffer)
|
||||
})
|
||||
|
||||
@@ -105,7 +81,8 @@ describe('TokenizedBuffer', () => {
|
||||
describe('when the buffer contains soft-tabs', () => {
|
||||
beforeEach(() => {
|
||||
buffer = atom.project.bufferForPathSync('sample.js')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar: atom.grammars.grammarForScopeName('source.js')})
|
||||
buffer.setLanguageMode(tokenizedBuffer)
|
||||
startTokenizing(tokenizedBuffer)
|
||||
})
|
||||
|
||||
@@ -299,7 +276,7 @@ describe('TokenizedBuffer', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('when there is an insertion that is larger than the chunk size', () =>
|
||||
describe('when there is an insertion that is larger than the chunk size', () => {
|
||||
it('tokenizes the initial chunk synchronously, then tokenizes the remaining lines in the background', () => {
|
||||
const commentBlock = _.multiplyString('// a comment\n', tokenizedBuffer.chunkSize + 2)
|
||||
buffer.insert([0, 0], commentBlock)
|
||||
@@ -311,22 +288,6 @@ describe('TokenizedBuffer', () => {
|
||||
expect(tokenizedBuffer.tokenizedLines[5].ruleStack != null).toBeTruthy()
|
||||
expect(tokenizedBuffer.tokenizedLines[6].ruleStack != null).toBeTruthy()
|
||||
})
|
||||
)
|
||||
|
||||
it('does not break out soft tabs across a scope boundary', async () => {
|
||||
await atom.packages.activatePackage('language-gfm')
|
||||
|
||||
tokenizedBuffer.setTabLength(4)
|
||||
tokenizedBuffer.setGrammar(atom.grammars.selectGrammar('.md'))
|
||||
buffer.setText(' <![]()\n ')
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
|
||||
let length = 0
|
||||
for (let tag of tokenizedBuffer.tokenizedLines[1].tags) {
|
||||
if (tag > 0) length += tag
|
||||
}
|
||||
|
||||
expect(length).toBe(4)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -336,7 +297,7 @@ describe('TokenizedBuffer', () => {
|
||||
atom.packages.activatePackage('language-coffee-script')
|
||||
|
||||
buffer = atom.project.bufferForPathSync('sample-with-tabs.coffee')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar: atom.grammars.grammarForScopeName('source.coffee')})
|
||||
startTokenizing(tokenizedBuffer)
|
||||
})
|
||||
|
||||
@@ -356,7 +317,7 @@ describe('TokenizedBuffer', () => {
|
||||
|
||||
const tokenizedHandler = jasmine.createSpy('tokenized handler')
|
||||
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
|
||||
fullyTokenize(editor.tokenizedBuffer)
|
||||
fullyTokenize(editor.getBuffer().getLanguageMode())
|
||||
expect(tokenizedHandler.callCount).toBe(1)
|
||||
})
|
||||
|
||||
@@ -374,16 +335,16 @@ describe('TokenizedBuffer', () => {
|
||||
|
||||
describe('when the grammar is updated because a grammar it includes is activated', async () => {
|
||||
it('re-emits the `tokenized` event', async () => {
|
||||
const editor = await atom.workspace.open('coffee.coffee')
|
||||
let tokenizationCount = 0
|
||||
|
||||
const tokenizedHandler = jasmine.createSpy('tokenized handler')
|
||||
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
|
||||
fullyTokenize(editor.tokenizedBuffer)
|
||||
tokenizedHandler.reset()
|
||||
const editor = await atom.workspace.open('coffee.coffee')
|
||||
editor.onDidTokenize(() => { tokenizationCount++ })
|
||||
fullyTokenize(editor.getBuffer().getLanguageMode())
|
||||
tokenizationCount = 0
|
||||
|
||||
await atom.packages.activatePackage('language-coffee-script')
|
||||
fullyTokenize(editor.tokenizedBuffer)
|
||||
expect(tokenizedHandler.callCount).toBe(1)
|
||||
fullyTokenize(editor.getBuffer().getLanguageMode())
|
||||
expect(tokenizationCount).toBe(1)
|
||||
})
|
||||
|
||||
it('retokenizes the buffer', async () => {
|
||||
@@ -393,7 +354,7 @@ describe('TokenizedBuffer', () => {
|
||||
buffer = atom.project.bufferForPathSync()
|
||||
buffer.setText("<div class='name'><%= User.find(2).full_name %></div>")
|
||||
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.selectGrammar('test.erb'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar: atom.grammars.selectGrammar('test.erb')})
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({
|
||||
value: "<div class='name'>",
|
||||
@@ -414,7 +375,7 @@ describe('TokenizedBuffer', () => {
|
||||
spyOn(NullGrammar, 'tokenizeLine').andCallThrough()
|
||||
buffer = atom.project.bufferForPathSync('sample.will-use-the-null-grammar')
|
||||
buffer.setText('a\nb\nc')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config})
|
||||
const tokenizeCallback = jasmine.createSpy('onDidTokenize')
|
||||
tokenizedBuffer.onDidTokenize(tokenizeCallback)
|
||||
|
||||
@@ -442,7 +403,7 @@ describe('TokenizedBuffer', () => {
|
||||
|
||||
it('returns the correct token (regression)', () => {
|
||||
buffer = atom.project.bufferForPathSync('sample.js')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar: atom.grammars.grammarForScopeName('source.js')})
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
expect(tokenizedBuffer.tokenForPosition([1, 0]).scopes).toEqual(['source.js'])
|
||||
expect(tokenizedBuffer.tokenForPosition([1, 1]).scopes).toEqual(['source.js'])
|
||||
@@ -453,7 +414,7 @@ describe('TokenizedBuffer', () => {
|
||||
describe('.bufferRangeForScopeAtPosition(selector, position)', () => {
|
||||
beforeEach(() => {
|
||||
buffer = atom.project.bufferForPathSync('sample.js')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar: atom.grammars.grammarForScopeName('source.js')})
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
})
|
||||
|
||||
@@ -479,7 +440,7 @@ describe('TokenizedBuffer', () => {
|
||||
it("returns the tokenized line for a row, or a placeholder line if it hasn't been tokenized yet", () => {
|
||||
buffer = atom.project.bufferForPathSync('sample.js')
|
||||
const grammar = atom.grammars.grammarForScopeName('source.js')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar})
|
||||
const line0 = buffer.lineForRow(0)
|
||||
|
||||
const jsScopeStartId = grammar.startIdForScope(grammar.scopeName)
|
||||
@@ -492,23 +453,12 @@ describe('TokenizedBuffer', () => {
|
||||
expect(tokenizedBuffer.tokenizedLines[0]).not.toBeUndefined()
|
||||
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
|
||||
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).not.toEqual([jsScopeStartId, line0.length, jsScopeEndId])
|
||||
|
||||
const nullScopeStartId = NullGrammar.startIdForScope(NullGrammar.scopeName)
|
||||
const nullScopeEndId = NullGrammar.endIdForScope(NullGrammar.scopeName)
|
||||
tokenizedBuffer.setGrammar(NullGrammar)
|
||||
startTokenizing(tokenizedBuffer)
|
||||
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
|
||||
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
|
||||
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId])
|
||||
advanceClock(1)
|
||||
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
|
||||
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId])
|
||||
})
|
||||
|
||||
it('returns undefined if the requested row is outside the buffer range', () => {
|
||||
buffer = atom.project.bufferForPathSync('sample.js')
|
||||
const grammar = atom.grammars.grammarForScopeName('source.js')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar})
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
expect(tokenizedBuffer.tokenizedLineForRow(999)).toBeUndefined()
|
||||
})
|
||||
@@ -518,10 +468,10 @@ describe('TokenizedBuffer', () => {
|
||||
describe('iterator', () => {
|
||||
it('iterates over the syntactic scope boundaries', () => {
|
||||
buffer = new TextBuffer({text: 'var foo = 1 /*\nhello*/var bar = 2\n'})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar: atom.grammars.grammarForScopeName('source.js')})
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
|
||||
const iterator = tokenizedBuffer.buildIterator()
|
||||
const iterator = tokenizedBuffer.buildHighlightIterator()
|
||||
iterator.seek(Point(0, 0))
|
||||
|
||||
const expectedBoundaries = [
|
||||
@@ -583,10 +533,10 @@ describe('TokenizedBuffer', () => {
|
||||
await atom.packages.activatePackage('language-coffee-script')
|
||||
|
||||
buffer = new TextBuffer({text: '# hello\n# world'})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar: atom.grammars.grammarForScopeName('source.coffee')})
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
|
||||
const iterator = tokenizedBuffer.buildIterator()
|
||||
const iterator = tokenizedBuffer.buildHighlightIterator()
|
||||
iterator.seek(Point(0, 0))
|
||||
iterator.moveToSuccessor()
|
||||
iterator.moveToSuccessor()
|
||||
@@ -613,10 +563,10 @@ describe('TokenizedBuffer', () => {
|
||||
})
|
||||
|
||||
buffer = new TextBuffer({text: 'start x\nend x\nx'})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar})
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
|
||||
const iterator = tokenizedBuffer.buildIterator()
|
||||
const iterator = tokenizedBuffer.buildHighlightIterator()
|
||||
iterator.seek(Point(1, 0))
|
||||
|
||||
expect(iterator.getPosition()).toEqual([1, 0])
|
||||
@@ -676,7 +626,8 @@ describe('TokenizedBuffer', () => {
|
||||
buffer = atom.project.bufferForPathSync('sample.js')
|
||||
buffer.insert([10, 0], ' // multi-line\n // comment\n // block\n')
|
||||
buffer.insert([0, 0], '// multi-line\n// comment\n// block\n')
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config, grammar: atom.grammars.grammarForScopeName('source.js')})
|
||||
buffer.setLanguageMode(tokenizedBuffer)
|
||||
fullyTokenize(tokenizedBuffer)
|
||||
})
|
||||
|
||||
@@ -763,7 +714,7 @@ describe('TokenizedBuffer', () => {
|
||||
}
|
||||
`)
|
||||
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config})
|
||||
|
||||
expect(simulateFold(tokenizedBuffer.getFoldableRangesAtIndentLevel(0, 2))).toBe(dedent `
|
||||
if (a) {⋯
|
||||
@@ -825,7 +776,7 @@ describe('TokenizedBuffer', () => {
|
||||
}
|
||||
`)
|
||||
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config})
|
||||
|
||||
expect(tokenizedBuffer.getFoldableRanges(2).map(r => r.toString())).toEqual([
|
||||
...tokenizedBuffer.getFoldableRangesAtIndentLevel(0, 2),
|
||||
@@ -855,7 +806,7 @@ describe('TokenizedBuffer', () => {
|
||||
}
|
||||
`)
|
||||
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer})
|
||||
tokenizedBuffer = new TokenizedBuffer({buffer, config})
|
||||
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(0, 5), 2)).toBeNull()
|
||||
|
||||
@@ -900,10 +851,10 @@ describe('TokenizedBuffer', () => {
|
||||
buffer = editor.buffer
|
||||
tokenizedBuffer = editor.tokenizedBuffer
|
||||
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(0, Infinity))).toEqual([[0, Infinity], [20, Infinity]])
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(1, Infinity))).toEqual([[1, Infinity], [17, Infinity]])
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(2, Infinity))).toEqual([[1, Infinity], [17, Infinity]])
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(19, Infinity))).toEqual([[19, Infinity], [20, Infinity]])
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(0, Infinity), 2)).toEqual([[0, Infinity], [20, Infinity]])
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(1, Infinity), 2)).toEqual([[1, Infinity], [17, Infinity]])
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(2, Infinity), 2)).toEqual([[1, Infinity], [17, Infinity]])
|
||||
expect(tokenizedBuffer.getFoldableRangeContainingPoint(Point(19, Infinity), 2)).toEqual([[19, Infinity], [20, Infinity]])
|
||||
})
|
||||
|
||||
it('works for javascript', async () => {
|
||||
@@ -912,10 +863,10 @@ describe('TokenizedBuffer', () => {
|
||||
buffer = editor.buffer
|
||||
tokenizedBuffer = editor.tokenizedBuffer
|
||||
|
||||
expect(editor.tokenizedBuffer.getFoldableRangeContainingPoint(Point(0, Infinity))).toEqual([[0, Infinity], [12, Infinity]])
|
||||
expect(editor.tokenizedBuffer.getFoldableRangeContainingPoint(Point(1, Infinity))).toEqual([[1, Infinity], [9, Infinity]])
|
||||
expect(editor.tokenizedBuffer.getFoldableRangeContainingPoint(Point(2, Infinity))).toEqual([[1, Infinity], [9, Infinity]])
|
||||
expect(editor.tokenizedBuffer.getFoldableRangeContainingPoint(Point(4, Infinity))).toEqual([[4, Infinity], [7, Infinity]])
|
||||
expect(editor.tokenizedBuffer.getFoldableRangeContainingPoint(Point(0, Infinity), 2)).toEqual([[0, Infinity], [12, Infinity]])
|
||||
expect(editor.tokenizedBuffer.getFoldableRangeContainingPoint(Point(1, Infinity), 2)).toEqual([[1, Infinity], [9, Infinity]])
|
||||
expect(editor.tokenizedBuffer.getFoldableRangeContainingPoint(Point(2, Infinity), 2)).toEqual([[1, Infinity], [9, Infinity]])
|
||||
expect(editor.tokenizedBuffer.getFoldableRangeContainingPoint(Point(4, Infinity), 2)).toEqual([[4, Infinity], [7, Infinity]])
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -176,7 +176,10 @@ class TextEditor {
|
||||
})
|
||||
|
||||
const languageMode = this.buffer.getLanguageMode()
|
||||
if (languageMode && languageMode.setTabLength) languageMode.setTabLength(tabLength)
|
||||
this.languageModeSubscription = languageMode.onDidTokenize && languageMode.onDidTokenize(() => {
|
||||
this.emitter.emit('did-tokenize')
|
||||
})
|
||||
if (this.languageModeSubscription) this.disposables.add(this.languageModeSubscription)
|
||||
|
||||
if (params.displayLayer) {
|
||||
this.displayLayer = params.displayLayer
|
||||
@@ -333,7 +336,6 @@ class TextEditor {
|
||||
|
||||
case 'tabLength':
|
||||
if (value > 0 && value !== this.displayLayer.tabLength) {
|
||||
this.buffer.getLanguageMode().setTabLength(value)
|
||||
displayLayerParams.tabLength = value
|
||||
}
|
||||
break
|
||||
@@ -3559,12 +3561,7 @@ class TextEditor {
|
||||
|
||||
// Experimental: Get a notification when async tokenization is completed.
|
||||
onDidTokenize (callback) {
|
||||
const languageMode = this.buffer.getLanguageMode()
|
||||
if (languageMode.onDidTokenize) {
|
||||
return this.buffer.getLanguageMode().onDidTokenize(callback)
|
||||
} else {
|
||||
return new Disposable(() => {})
|
||||
}
|
||||
return this.emitter.on('did-tokenize', callback)
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4121,7 +4118,16 @@ class TextEditor {
|
||||
|
||||
handleLanguageModeChange () {
|
||||
this.unfoldAll()
|
||||
this.emitter.emit('did-change-grammar', this.buffer.getLanguageMode().grammar)
|
||||
if (this.languageModeSubscription) {
|
||||
this.languageModeSubscription.dispose()
|
||||
this.disposables.remove(this.languageModeSubscription)
|
||||
}
|
||||
const languageMode = this.buffer.getLanguageMode()
|
||||
this.languageModeSubscription = languageMode.onDidTokenize && languageMode.onDidTokenize(() => {
|
||||
this.emitter.emit('did-tokenize')
|
||||
})
|
||||
if (this.languageModeSubscription) this.disposables.add(this.languageModeSubscription)
|
||||
this.emitter.emit('did-change-grammar', languageMode.grammar)
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -26,7 +26,6 @@ class TokenizedBuffer {
|
||||
this.visible = false
|
||||
this.id = params.id != null ? params.id : nextId++
|
||||
this.buffer = params.buffer
|
||||
this.tabLength = params.tabLength
|
||||
this.largeFileMode = params.largeFileMode
|
||||
this.config = params.config
|
||||
this.largeFileMode = params.largeFileMode != null
|
||||
@@ -263,12 +262,6 @@ class TokenizedBuffer {
|
||||
}
|
||||
}
|
||||
|
||||
getTabLength () { return this.tabLength }
|
||||
|
||||
setTabLength (tabLength) {
|
||||
this.tabLength = tabLength
|
||||
}
|
||||
|
||||
tokenizeInBackground () {
|
||||
if (!this.visible || this.pendingChunk || !this.alive) return
|
||||
|
||||
|
||||
Reference in New Issue
Block a user