diff --git a/spec/text-editor-registry-spec.js b/spec/text-editor-registry-spec.js index 8ecb40c5b..b4c64db10 100644 --- a/spec/text-editor-registry-spec.js +++ b/spec/text-editor-registry-spec.js @@ -75,7 +75,7 @@ describe('TextEditorRegistry', function () { }) it('updates the editor\'s grammar when a more appropriate grammar is added for its path', async function () { - expect(editor.getGrammar()).toBe(null) + expect(editor.getGrammar().name).toBe('Null Grammar') editor.getBuffer().setPath('test.js') registry.maintainGrammar(editor) @@ -473,13 +473,13 @@ describe('TextEditorRegistry', function () { const editor2Copy = TextEditor.deserialize(editor2.serialize(), atom) const registryCopy = TextEditorRegistry.deserialize(registry.serialize(), atom) - expect(editorCopy.getGrammar()).toBe(null) - expect(editor2Copy.getGrammar()).toBe(null) + expect(editorCopy.getGrammar().name).toBe('Null Grammar') + expect(editor2Copy.getGrammar().name).toBe('Null Grammar') registryCopy.maintainGrammar(editorCopy) registryCopy.maintainGrammar(editor2Copy) expect(editorCopy.getGrammar().name).toBe('C') - expect(editor2Copy.getGrammar()).toBe(null) + expect(editor2Copy.getGrammar().name).toBe('Null Grammar') await atom.packages.activatePackage('language-javascript') expect(editorCopy.getGrammar().name).toBe('C') diff --git a/src/null-grammar.js b/src/null-grammar.js new file mode 100644 index 000000000..33f7a5dc4 --- /dev/null +++ b/src/null-grammar.js @@ -0,0 +1,4 @@ +module.exports = Object.freeze({ + name: 'Null Grammar', + scopeName: 'text.plain' +}) diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index 90666b783..aa5789016 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -8,10 +8,11 @@ TokenIterator = require './token-iterator' Token = require './token' ScopeDescriptor = require './scope-descriptor' TokenizedBufferIterator = require './tokenized-buffer-iterator' +NullGrammar = require './null-grammar' module.exports = class TokenizedBuffer extends Model - grammar: null + grammar: NullGrammar buffer: null tabLength: null tokenizedLines: null @@ -127,7 +128,7 @@ class TokenizedBuffer extends Model tokenizeNextChunk: -> # Short circuit null grammar which can just use the placeholder tokens - if (not @grammar? or @grammar.name is 'Null grammar') and @firstInvalidRow()? + if (@grammar.name is 'Null Grammar') and @firstInvalidRow()? @invalidRows = [] @markTokenizationComplete() return @@ -203,7 +204,7 @@ class TokenizedBuffer extends Model @updateInvalidRows(start, end, delta) previousEndStack = @stackForRow(end) # used in spill detection below - if @largeFileMode or not @grammar? + if @largeFileMode or @grammar is NullGrammar newTokenizedLines = @buildPlaceholderTokenizedLinesForRows(start, end + delta) else newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start)) @@ -279,7 +280,7 @@ class TokenizedBuffer extends Model @buildPlaceholderTokenizedLineForRow(row) for row in [startRow..endRow] by 1 buildPlaceholderTokenizedLineForRow: (row) -> - if @grammar? + if @grammar isnt NullGrammar openScopes = [@grammar.startIdForScope(@grammar.scopeName)] else openScopes = []