diff --git a/spec/token-iterator-spec.coffee b/spec/token-iterator-spec.coffee deleted file mode 100644 index 6ae01cd30..000000000 --- a/spec/token-iterator-spec.coffee +++ /dev/null @@ -1,37 +0,0 @@ -TextBuffer = require 'text-buffer' -TokenizedBuffer = require '../src/tokenized-buffer' - -describe "TokenIterator", -> - it "correctly terminates scopes at the beginning of the line (regression)", -> - grammar = atom.grammars.createGrammar('test', { - 'scopeName': 'text.broken' - 'name': 'Broken grammar' - 'patterns': [ - { - 'begin': 'start' - 'end': '(?=end)' - 'name': 'blue.broken' - } - { - 'match': '.' - 'name': 'yellow.broken' - } - ] - }) - - buffer = new TextBuffer(text: """ - start x - end x - x - """) - tokenizedBuffer = new TokenizedBuffer({ - buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert - }) - tokenizedBuffer.setGrammar(grammar) - - tokenIterator = tokenizedBuffer.tokenizedLines[1].getTokenIterator() - tokenIterator.next() - - expect(tokenIterator.getBufferStart()).toBe 0 - expect(tokenIterator.getScopeEnds()).toEqual [] - expect(tokenIterator.getScopeStarts()).toEqual ['text.broken', 'yellow.broken'] diff --git a/spec/token-iterator-spec.js b/spec/token-iterator-spec.js new file mode 100644 index 000000000..f6d43395c --- /dev/null +++ b/spec/token-iterator-spec.js @@ -0,0 +1,43 @@ +const TextBuffer = require('text-buffer') +const TokenizedBuffer = require('../src/tokenized-buffer') + +describe('TokenIterator', () => + it('correctly terminates scopes at the beginning of the line (regression)', () => { + const grammar = atom.grammars.createGrammar('test', { + 'scopeName': 'text.broken', + 'name': 'Broken grammar', + 'patterns': [ + { + 'begin': 'start', + 'end': '(?=end)', + 'name': 'blue.broken' + }, + { + 'match': '.', + 'name': 'yellow.broken' + } + ] + }) + + const buffer = new TextBuffer({text: `\ +start x +end x +x\ +`}) + const tokenizedBuffer = new TokenizedBuffer({ + buffer, + config: atom.config, + grammarRegistry: atom.grammars, + packageManager: atom.packages, + assert: atom.assert + }) + tokenizedBuffer.setGrammar(grammar) + + const tokenIterator = tokenizedBuffer.tokenizedLines[1].getTokenIterator() + tokenIterator.next() + + expect(tokenIterator.getBufferStart()).toBe(0) + expect(tokenIterator.getScopeEnds()).toEqual([]) + expect(tokenIterator.getScopeStarts()).toEqual(['text.broken', 'yellow.broken']) + }) +)