mirror of
https://github.com/atom/atom.git
synced 2026-01-15 01:48:15 -05:00
Merge remote-tracking branch 'origin/master' into cj-update-user-keymap
This commit is contained in:
@@ -11,6 +11,10 @@ packageJson = require './package.json'
|
||||
# TODO Remove once all repositories are public
|
||||
process.env.ATOM_ACCESS_TOKEN ?= '362295be4c5258d3f7b967bbabae662a455ca2a7'
|
||||
|
||||
# Shim harmony collections in case grunt was invoked without harmony
|
||||
# collections enabled
|
||||
_.extend(global, require('harmony-collections')) unless global.WeakMap?
|
||||
|
||||
module.exports = (grunt) ->
|
||||
if not grunt.option('verbose')
|
||||
grunt.log.writeln = (args...) -> grunt.log
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"coffeestack": "0.6.0",
|
||||
"diff": "git://github.com/benogle/jsdiff.git",
|
||||
"emissary": "0.19.0",
|
||||
"first-mate": "0.5.0",
|
||||
"first-mate": "0.10.0",
|
||||
"fs-plus": "0.13.0",
|
||||
"fuzzaldrin": "0.1.0",
|
||||
"git-utils": "0.29.0",
|
||||
@@ -71,7 +71,8 @@
|
||||
"unzip": "~0.1.9",
|
||||
"rcedit": "~0.1.2",
|
||||
"rimraf": "~2.2.2",
|
||||
"github-releases": "~0.2.0"
|
||||
"github-releases": "~0.2.0",
|
||||
"harmony-collections": "~0.3.8"
|
||||
},
|
||||
"packageDependencies": {
|
||||
"atom-dark-syntax": "0.10.0",
|
||||
@@ -110,7 +111,7 @@
|
||||
"release-notes": "0.15.0",
|
||||
"settings-view": "0.52.0",
|
||||
"snippets": "0.17.0",
|
||||
"spell-check": "0.17.0",
|
||||
"spell-check": "0.18.0",
|
||||
"status-bar": "0.27.0",
|
||||
"styleguide": "0.19.0",
|
||||
"symbols-view": "0.27.0",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env node
|
||||
#!/usr/bin/env node --harmony_collections
|
||||
var safeExec = require('./utils/child-process-wrapper.js').safeExec;
|
||||
var fs = require('fs');
|
||||
var path = require('path');
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env node
|
||||
#!/usr/bin/env node --harmony_collections
|
||||
var cp = require('./utils/child-process-wrapper.js');
|
||||
var path = require('path');
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env node
|
||||
#!/usr/bin/env node --harmony_collections
|
||||
var cp = require('./utils/child-process-wrapper.js');
|
||||
var fs = require('fs');
|
||||
var path = require('path');
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env node
|
||||
#!/usr/bin/env node --harmony_collections
|
||||
var cp = require('./utils/child-process-wrapper.js');
|
||||
var path = require('path');
|
||||
var os = require('os');
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env node
|
||||
#!/usr/bin/env node --harmony_collections
|
||||
var safeExec = require('./utils/child-process-wrapper.js').safeExec;
|
||||
var path = require('path');
|
||||
|
||||
|
||||
@@ -2698,3 +2698,55 @@ describe "Editor", ->
|
||||
expect(editor.getCursorBufferPosition()).toEqual [0, 2]
|
||||
editor.moveCursorLeft()
|
||||
expect(editor.getCursorBufferPosition()).toEqual [0, 0]
|
||||
|
||||
describe "when the editor's grammar has an injection selector", ->
|
||||
beforeEach ->
|
||||
atom.packages.activatePackage('language-text', sync: true)
|
||||
atom.packages.activatePackage('language-javascript', sync: true)
|
||||
|
||||
it "includes the grammar's patterns when the selector matches the current scope in other grammars", ->
|
||||
atom.packages.activatePackage('language-hyperlink', sync: true)
|
||||
grammar = atom.syntax.selectGrammar("text.js")
|
||||
{tokens} = grammar.tokenizeLine("var i; // http://github.com")
|
||||
|
||||
expect(tokens[0].value).toBe "var"
|
||||
expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"]
|
||||
|
||||
expect(tokens[6].value).toBe "http://github.com"
|
||||
expect(tokens[6].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
|
||||
|
||||
describe "when the grammar is added", ->
|
||||
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
|
||||
editor = atom.project.openSync('sample.js')
|
||||
editor.setText("// http://github.com")
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[1].value).toBe " http://github.com"
|
||||
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
|
||||
|
||||
atom.packages.activatePackage('language-hyperlink', sync: true)
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[2].value).toBe "http://github.com"
|
||||
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
|
||||
|
||||
describe "when the grammar is updated", ->
|
||||
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
|
||||
editor = atom.project.openSync('sample.js')
|
||||
editor.setText("// SELECT * FROM OCTOCATS")
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
|
||||
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
|
||||
|
||||
atom.packages.activatePackage('package-with-injection-selector', sync: true)
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
|
||||
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
|
||||
|
||||
atom.packages.activatePackage('language-sql', sync: true)
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[2].value).toBe "SELECT"
|
||||
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"]
|
||||
|
||||
4
spec/fixtures/packages/package-with-injection-selector/grammars/grammar.cson
vendored
Normal file
4
spec/fixtures/packages/package-with-injection-selector/grammars/grammar.cson
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
'name': 'test'
|
||||
'scopeName': 'source.test'
|
||||
'injectionSelector': 'comment'
|
||||
'patterns': [{'include': 'source.sql'}]
|
||||
@@ -403,6 +403,7 @@ describe "Project", ->
|
||||
range: [[2, 6], [2, 11]]
|
||||
|
||||
it "works on evil filenames", ->
|
||||
platform.generateEvilFiles()
|
||||
atom.project.setPath(path.join(__dirname, 'fixtures', 'evil-files'))
|
||||
paths = []
|
||||
matches = []
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
path = require 'path'
|
||||
fs = require 'fs-plus'
|
||||
|
||||
{_} = require 'atom'
|
||||
|
||||
## Platform specific helpers
|
||||
module.exports =
|
||||
# Public: Returns true if being run from within Windows
|
||||
@@ -18,20 +16,20 @@ module.exports =
|
||||
fs.removeSync(evilFilesPath) if fs.existsSync(evilFilesPath)
|
||||
fs.mkdirSync(evilFilesPath)
|
||||
|
||||
if (@isWindows())
|
||||
if @isWindows()
|
||||
filenames = [
|
||||
"a_file_with_utf8.txt",
|
||||
"file with spaces.txt",
|
||||
"a_file_with_utf8.txt"
|
||||
"file with spaces.txt"
|
||||
"utfa\u0306.md"
|
||||
]
|
||||
else
|
||||
filenames = [
|
||||
"a_file_with_utf8.txt",
|
||||
"file with spaces.txt",
|
||||
"goddam\nnewlines",
|
||||
"quote\".txt",
|
||||
"a_file_with_utf8.txt"
|
||||
"file with spaces.txt"
|
||||
"goddam\nnewlines"
|
||||
"quote\".txt"
|
||||
"utfa\u0306.md"
|
||||
]
|
||||
|
||||
for filename in filenames
|
||||
fd = fs.writeFileSync(path.join(evilFilesPath, filename), 'evil file!', flag: 'w')
|
||||
fs.writeFileSync(path.join(evilFilesPath, filename), 'evil file!', flag: 'w')
|
||||
|
||||
@@ -13,11 +13,8 @@ Editor = require '../src/editor'
|
||||
EditorView = require '../src/editor-view'
|
||||
TokenizedBuffer = require '../src/tokenized-buffer'
|
||||
pathwatcher = require 'pathwatcher'
|
||||
platform = require './spec-helper-platform'
|
||||
clipboard = require 'clipboard'
|
||||
|
||||
platform.generateEvilFiles()
|
||||
|
||||
atom.themes.loadBaseStylesheets()
|
||||
atom.themes.requireStylesheet '../static/jasmine'
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
{fs} = require 'atom'
|
||||
path = require 'path'
|
||||
temp = require 'temp'
|
||||
TextMateGrammar = require '../src/text-mate-grammar'
|
||||
|
||||
describe "the `syntax` global", ->
|
||||
beforeEach ->
|
||||
@@ -62,20 +61,23 @@ describe "the `syntax` global", ->
|
||||
|
||||
describe "when multiple grammars have matching fileTypes", ->
|
||||
it "selects the grammar with the longest fileType match", ->
|
||||
grammar1 = new TextMateGrammar
|
||||
grammarPath1 = temp.path(suffix: '.json')
|
||||
fs.writeFileSync grammarPath1, JSON.stringify(
|
||||
name: 'test1'
|
||||
scopeName: 'source1'
|
||||
fileTypes: ['test', 'more.test']
|
||||
fileTypes: ['test']
|
||||
)
|
||||
grammar1 = atom.syntax.loadGrammarSync(grammarPath1)
|
||||
expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar1
|
||||
|
||||
grammar2 = new TextMateGrammar
|
||||
grammarPath2 = temp.path(suffix: '.json')
|
||||
fs.writeFileSync grammarPath2, JSON.stringify(
|
||||
name: 'test2'
|
||||
scopeName: 'source2'
|
||||
fileTypes: ['test']
|
||||
|
||||
atom.syntax.addGrammar(grammar1)
|
||||
atom.syntax.addGrammar(grammar2)
|
||||
|
||||
expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar1
|
||||
fileTypes: ['test', 'more.test']
|
||||
)
|
||||
grammar2 = atom.syntax.loadGrammarSync(grammarPath2)
|
||||
expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar2
|
||||
|
||||
describe "when there is no file path", ->
|
||||
it "does not throw an exception (regression)", ->
|
||||
|
||||
@@ -1,704 +0,0 @@
|
||||
TextMateGrammar = require '../src/text-mate-grammar'
|
||||
TextMatePackage = require '../src/text-mate-package'
|
||||
{_, fs} = require 'atom'
|
||||
|
||||
describe "TextMateGrammar", ->
|
||||
grammar = null
|
||||
|
||||
beforeEach ->
|
||||
atom.packages.activatePackage('language-text', sync: true)
|
||||
atom.packages.activatePackage('language-javascript', sync: true)
|
||||
atom.packages.activatePackage('language-coffee-script', sync: true)
|
||||
atom.packages.activatePackage('language-ruby', sync: true)
|
||||
atom.packages.activatePackage('language-html', sync: true)
|
||||
atom.packages.activatePackage('language-php', sync: true)
|
||||
atom.packages.activatePackage('language-python', sync: true)
|
||||
grammar = atom.syntax.selectGrammar("hello.coffee")
|
||||
|
||||
describe "@loadSync(path)", ->
|
||||
it "loads grammars from plists", ->
|
||||
grammar = TextMateGrammar.loadSync(require.resolve('./fixtures/sample.plist'))
|
||||
expect(grammar.scopeName).toBe "text.plain"
|
||||
{tokens} = grammar.tokenizeLine("this text is so plain. i love it.")
|
||||
expect(tokens[0]).toEqual value: "this text is so plain. i love it.", scopes: ["text.plain", "meta.paragraph.text"]
|
||||
|
||||
it "loads grammars from cson files", ->
|
||||
grammar = TextMateGrammar.loadSync(require.resolve('./fixtures/packages/package-with-grammars/grammars/alot.cson'))
|
||||
expect(grammar.scopeName).toBe "source.alot"
|
||||
{tokens} = grammar.tokenizeLine("this is alot of code")
|
||||
expect(tokens[1]).toEqual value: "alot", scopes: ["source.alot", "keyword.alot"]
|
||||
|
||||
describe ".tokenizeLine(line, ruleStack)", ->
|
||||
describe "when the entire line matches a single pattern with no capture groups", ->
|
||||
it "returns a single token with the correct scope", ->
|
||||
{tokens} = grammar.tokenizeLine("return")
|
||||
|
||||
expect(tokens.length).toBe 1
|
||||
[token] = tokens
|
||||
expect(token.scopes).toEqual ['source.coffee', 'keyword.control.coffee']
|
||||
|
||||
describe "when the entire line matches a single pattern with capture groups", ->
|
||||
it "returns a single token with the correct scope", ->
|
||||
{tokens} = grammar.tokenizeLine("new foo.bar.Baz")
|
||||
|
||||
expect(tokens.length).toBe 3
|
||||
[newOperator, whitespace, className] = tokens
|
||||
expect(newOperator).toEqual value: 'new', scopes: ['source.coffee', 'meta.class.instance.constructor', 'keyword.operator.new.coffee']
|
||||
expect(whitespace).toEqual value: ' ', scopes: ['source.coffee', 'meta.class.instance.constructor']
|
||||
expect(className).toEqual value: 'foo.bar.Baz', scopes: ['source.coffee', 'meta.class.instance.constructor', 'entity.name.type.instance.coffee']
|
||||
|
||||
describe "when the line doesn't match any patterns", ->
|
||||
it "returns the entire line as a single simple token with the grammar's scope", ->
|
||||
textGrammar = atom.syntax.selectGrammar('foo.txt')
|
||||
{tokens} = textGrammar.tokenizeLine("abc def")
|
||||
expect(tokens.length).toBe 1
|
||||
|
||||
describe "when the line matches multiple patterns", ->
|
||||
it "returns multiple tokens, filling in regions that don't match patterns with tokens in the grammar's global scope", ->
|
||||
{tokens} = grammar.tokenizeLine(" return new foo.bar.Baz ")
|
||||
|
||||
expect(tokens.length).toBe 7
|
||||
|
||||
expect(tokens[0]).toEqual value: ' ', scopes: ['source.coffee']
|
||||
expect(tokens[1]).toEqual value: 'return', scopes: ['source.coffee', 'keyword.control.coffee']
|
||||
expect(tokens[2]).toEqual value: ' ', scopes: ['source.coffee']
|
||||
expect(tokens[3]).toEqual value: 'new', scopes: ['source.coffee', 'meta.class.instance.constructor', 'keyword.operator.new.coffee']
|
||||
expect(tokens[4]).toEqual value: ' ', scopes: ['source.coffee', 'meta.class.instance.constructor']
|
||||
expect(tokens[5]).toEqual value: 'foo.bar.Baz', scopes: ['source.coffee', 'meta.class.instance.constructor', 'entity.name.type.instance.coffee']
|
||||
expect(tokens[6]).toEqual value: ' ', scopes: ['source.coffee']
|
||||
|
||||
describe "when the line matches a pattern with optional capture groups", ->
|
||||
it "only returns tokens for capture groups that matched", ->
|
||||
{tokens} = grammar.tokenizeLine("class Quicksort")
|
||||
expect(tokens.length).toBe 3
|
||||
expect(tokens[0].value).toBe "class"
|
||||
expect(tokens[1].value).toBe " "
|
||||
expect(tokens[2].value).toBe "Quicksort"
|
||||
|
||||
describe "when the line matches a rule with nested capture groups and lookahead capture groups beyond the scope of the overall match", ->
|
||||
it "creates distinct tokens for nested captures and does not return tokens beyond the scope of the overall capture", ->
|
||||
{tokens} = grammar.tokenizeLine(" destroy: ->")
|
||||
expect(tokens.length).toBe 6
|
||||
expect(tokens[0]).toEqual(value: ' ', scopes: ["source.coffee"])
|
||||
expect(tokens[1]).toEqual(value: 'destro', scopes: ["source.coffee", "meta.function.coffee", "entity.name.function.coffee"])
|
||||
# this dangling 'y' with a duplicated scope looks wrong, but textmate yields the same behavior. probably a quirk in the coffee grammar.
|
||||
expect(tokens[2]).toEqual(value: 'y', scopes: ["source.coffee", "meta.function.coffee", "entity.name.function.coffee", "entity.name.function.coffee"])
|
||||
expect(tokens[3]).toEqual(value: ':', scopes: ["source.coffee", "keyword.operator.coffee"])
|
||||
expect(tokens[4]).toEqual(value: ' ', scopes: ["source.coffee"])
|
||||
expect(tokens[5]).toEqual(value: '->', scopes: ["source.coffee", "storage.type.function.coffee"])
|
||||
|
||||
describe "when the line matches a pattern that includes a rule", ->
|
||||
it "returns tokens based on the included rule", ->
|
||||
{tokens} = grammar.tokenizeLine("7777777")
|
||||
expect(tokens.length).toBe 1
|
||||
expect(tokens[0]).toEqual value: '7777777', scopes: ['source.coffee', 'constant.numeric.coffee']
|
||||
|
||||
describe "when the line is an interpolated string", ->
|
||||
it "returns the correct tokens", ->
|
||||
{tokens} = grammar.tokenizeLine('"the value is #{@x} my friend"')
|
||||
|
||||
expect(tokens[0]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"]
|
||||
expect(tokens[1]).toEqual value: "the value is ", scopes: ["source.coffee","string.quoted.double.coffee"]
|
||||
expect(tokens[2]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
|
||||
expect(tokens[3]).toEqual value: "@x", scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","variable.other.readwrite.instance.coffee"]
|
||||
expect(tokens[4]).toEqual value: "}", scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
|
||||
expect(tokens[5]).toEqual value: " my friend", scopes: ["source.coffee","string.quoted.double.coffee"]
|
||||
expect(tokens[6]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.end.coffee"]
|
||||
|
||||
describe "when the line has an interpolated string inside an interpolated string", ->
|
||||
it "returns the correct tokens", ->
|
||||
{tokens} = grammar.tokenizeLine('"#{"#{@x}"}"')
|
||||
|
||||
expect(tokens[0]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"]
|
||||
expect(tokens[1]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
|
||||
expect(tokens[2]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"]
|
||||
expect(tokens[3]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
|
||||
expect(tokens[4]).toEqual value: '@x', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","variable.other.readwrite.instance.coffee"]
|
||||
expect(tokens[5]).toEqual value: '}', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
|
||||
expect(tokens[6]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","punctuation.definition.string.end.coffee"]
|
||||
expect(tokens[7]).toEqual value: '}', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
|
||||
expect(tokens[8]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.end.coffee"]
|
||||
|
||||
describe "when the line is empty", ->
|
||||
it "returns a single token which has the global scope", ->
|
||||
{tokens} = grammar.tokenizeLine('')
|
||||
expect(tokens[0]).toEqual value: '', scopes: ["source.coffee"]
|
||||
|
||||
describe "when the line matches no patterns", ->
|
||||
it "does not infinitely loop", ->
|
||||
grammar = atom.syntax.selectGrammar("sample.txt")
|
||||
{tokens} = grammar.tokenizeLine('hoo')
|
||||
expect(tokens.length).toBe 1
|
||||
expect(tokens[0]).toEqual value: 'hoo', scopes: ["text.plain", "meta.paragraph.text"]
|
||||
|
||||
describe "when the line matches a pattern with a 'contentName'", ->
|
||||
it "creates tokens using the content of contentName as the token name", ->
|
||||
grammar = atom.syntax.selectGrammar("sample.txt")
|
||||
{tokens} = grammar.tokenizeLine('ok, cool')
|
||||
expect(tokens[0]).toEqual value: 'ok, cool', scopes: ["text.plain", "meta.paragraph.text"]
|
||||
|
||||
describe "when the line matches a pattern with no `name` or `contentName`", ->
|
||||
it "creates tokens without adding a new scope", ->
|
||||
grammar = atom.syntax.selectGrammar('foo.rb')
|
||||
{tokens} = grammar.tokenizeLine('%w|oh \\look|')
|
||||
expect(tokens.length).toBe 5
|
||||
expect(tokens[0]).toEqual value: '%w|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.begin.ruby"]
|
||||
expect(tokens[1]).toEqual value: 'oh ', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"]
|
||||
expect(tokens[2]).toEqual value: '\\l', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"]
|
||||
expect(tokens[3]).toEqual value: 'ook', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"]
|
||||
|
||||
describe "when the line matches a begin/end pattern", ->
|
||||
it "returns tokens based on the beginCaptures, endCaptures and the child scope", ->
|
||||
{tokens} = grammar.tokenizeLine("'''single-quoted heredoc'''")
|
||||
|
||||
expect(tokens.length).toBe 3
|
||||
|
||||
expect(tokens[0]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.begin.coffee']
|
||||
expect(tokens[1]).toEqual value: "single-quoted heredoc", scopes: ['source.coffee', 'string.quoted.heredoc.coffee']
|
||||
expect(tokens[2]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.end.coffee']
|
||||
|
||||
describe "when the pattern spans multiple lines", ->
|
||||
it "uses the ruleStack returned by the first line to parse the second line", ->
|
||||
{tokens: firstTokens, ruleStack} = grammar.tokenizeLine("'''single-quoted")
|
||||
{tokens: secondTokens, ruleStack} = grammar.tokenizeLine("heredoc'''", ruleStack)
|
||||
|
||||
expect(firstTokens.length).toBe 2
|
||||
expect(secondTokens.length).toBe 2
|
||||
|
||||
expect(firstTokens[0]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.begin.coffee']
|
||||
expect(firstTokens[1]).toEqual value: "single-quoted", scopes: ['source.coffee', 'string.quoted.heredoc.coffee']
|
||||
|
||||
expect(secondTokens[0]).toEqual value: "heredoc", scopes: ['source.coffee', 'string.quoted.heredoc.coffee']
|
||||
expect(secondTokens[1]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.end.coffee']
|
||||
|
||||
describe "when the pattern contains sub-patterns", ->
|
||||
it "returns tokens within the begin/end scope based on the sub-patterns", ->
|
||||
{tokens} = grammar.tokenizeLine('"""heredoc with character escape \\t"""')
|
||||
|
||||
expect(tokens.length).toBe 4
|
||||
|
||||
expect(tokens[0]).toEqual value: '"""', scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'punctuation.definition.string.begin.coffee']
|
||||
expect(tokens[1]).toEqual value: "heredoc with character escape ", scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee']
|
||||
expect(tokens[2]).toEqual value: "\\t", scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'constant.character.escape.coffee']
|
||||
expect(tokens[3]).toEqual value: '"""', scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'punctuation.definition.string.end.coffee']
|
||||
|
||||
describe "when the end pattern contains a back reference", ->
|
||||
it "constructs the end rule based on its back-references to captures in the begin rule", ->
|
||||
grammar = atom.syntax.selectGrammar('foo.rb')
|
||||
{tokens} = grammar.tokenizeLine('%w|oh|,')
|
||||
expect(tokens.length).toBe 4
|
||||
expect(tokens[0]).toEqual value: '%w|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.begin.ruby"]
|
||||
expect(tokens[1]).toEqual value: 'oh', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"]
|
||||
expect(tokens[2]).toEqual value: '|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.end.ruby"]
|
||||
expect(tokens[3]).toEqual value: ',', scopes: ["source.ruby", "punctuation.separator.object.ruby"]
|
||||
|
||||
it "allows the rule containing that end pattern to be pushed to the stack multiple times", ->
|
||||
grammar = atom.syntax.selectGrammar('foo.rb')
|
||||
{tokens} = grammar.tokenizeLine('%Q+matz had some #{%Q-crazy ideas-} for ruby syntax+ # damn.')
|
||||
expect(tokens[0]).toEqual value: '%Q+', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.begin.ruby"]
|
||||
expect(tokens[1]).toEqual value: 'matz had some ', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby"]
|
||||
expect(tokens[2]).toEqual value: '#{', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","punctuation.section.embedded.begin.ruby"]
|
||||
expect(tokens[3]).toEqual value: '%Q-', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.begin.ruby"]
|
||||
expect(tokens[4]).toEqual value: 'crazy ideas', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby"]
|
||||
expect(tokens[5]).toEqual value: '-', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.end.ruby"]
|
||||
expect(tokens[6]).toEqual value: '}', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","punctuation.section.embedded.end.ruby", "source.ruby"]
|
||||
expect(tokens[7]).toEqual value: ' for ruby syntax', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby"]
|
||||
expect(tokens[8]).toEqual value: '+', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.end.ruby"]
|
||||
expect(tokens[9]).toEqual value: ' ', scopes: ["source.ruby"]
|
||||
expect(tokens[10]).toEqual value: '#', scopes: ["source.ruby","comment.line.number-sign.ruby","punctuation.definition.comment.ruby"]
|
||||
expect(tokens[11]).toEqual value: ' damn.', scopes: ["source.ruby","comment.line.number-sign.ruby"]
|
||||
|
||||
describe "when the pattern includes rules from another grammar", ->
|
||||
describe "when a grammar matching the desired scope is available", ->
|
||||
it "parses tokens inside the begin/end patterns based on the included grammar's rules", ->
|
||||
atom.packages.activatePackage('language-html', sync: true)
|
||||
atom.packages.activatePackage('language-ruby-on-rails', sync: true)
|
||||
|
||||
grammar = atom.syntax.grammarForScopeName('text.html.ruby')
|
||||
{tokens} = grammar.tokenizeLine("<div class='name'><%= User.find(2).full_name %></div>")
|
||||
|
||||
expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
|
||||
expect(tokens[1]).toEqual value: 'div', scopes: ["text.html.ruby","meta.tag.block.any.html","entity.name.tag.block.any.html"]
|
||||
expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","meta.tag.block.any.html"]
|
||||
expect(tokens[3]).toEqual value: 'class', scopes: ["text.html.ruby","meta.tag.block.any.html", "entity.other.attribute-name.html"]
|
||||
expect(tokens[4]).toEqual value: '=', scopes: ["text.html.ruby","meta.tag.block.any.html"]
|
||||
expect(tokens[5]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.begin.html"]
|
||||
expect(tokens[6]).toEqual value: 'name', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html"]
|
||||
expect(tokens[7]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.end.html"]
|
||||
expect(tokens[8]).toEqual value: '>', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"]
|
||||
expect(tokens[9]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"]
|
||||
expect(tokens[10]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
|
||||
expect(tokens[11]).toEqual value: 'User', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","support.class.ruby"]
|
||||
expect(tokens[12]).toEqual value: '.', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.separator.method.ruby"]
|
||||
expect(tokens[13]).toEqual value: 'find', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
|
||||
expect(tokens[14]).toEqual value: '(', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.function.ruby"]
|
||||
expect(tokens[15]).toEqual value: '2', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","constant.numeric.ruby"]
|
||||
expect(tokens[16]).toEqual value: ')', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.function.ruby"]
|
||||
expect(tokens[17]).toEqual value: '.', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.separator.method.ruby"]
|
||||
expect(tokens[18]).toEqual value: 'full_name ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
|
||||
expect(tokens[19]).toEqual value: '%>', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"]
|
||||
expect(tokens[20]).toEqual value: '</', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
|
||||
expect(tokens[21]).toEqual value: 'div', scopes: ["text.html.ruby","meta.tag.block.any.html","entity.name.tag.block.any.html"]
|
||||
expect(tokens[22]).toEqual value: '>', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"]
|
||||
|
||||
it "updates the grammar if the included grammar is updated later", ->
|
||||
atom.packages.activatePackage('language-html', sync: true)
|
||||
atom.packages.activatePackage('language-ruby-on-rails', sync: true)
|
||||
|
||||
grammar = atom.syntax.selectGrammar('foo.html.erb')
|
||||
grammarUpdatedHandler = jasmine.createSpy("grammarUpdatedHandler")
|
||||
grammar.on 'grammar-updated', grammarUpdatedHandler
|
||||
|
||||
{tokens} = grammar.tokenizeLine("<div class='name'><% <<-SQL select * from users;")
|
||||
expect(tokens[12].value).toBe " select * from users;"
|
||||
|
||||
atom.packages.activatePackage('language-sql', sync: true)
|
||||
expect(grammarUpdatedHandler).toHaveBeenCalled()
|
||||
{tokens} = grammar.tokenizeLine("<div class='name'><% <<-SQL select * from users;")
|
||||
expect(tokens[12].value).toBe " "
|
||||
expect(tokens[13].value).toBe "select"
|
||||
|
||||
describe "when a grammar matching the desired scope is unavailable", ->
|
||||
it "updates the grammar if a matching grammar is added later", ->
|
||||
atom.packages.deactivatePackage('language-html')
|
||||
atom.packages.activatePackage('language-ruby-on-rails', sync: true)
|
||||
|
||||
grammar = atom.syntax.grammarForScopeName('text.html.ruby')
|
||||
{tokens} = grammar.tokenizeLine("<div class='name'><%= User.find(2).full_name %></div>")
|
||||
expect(tokens[0]).toEqual value: "<div class='name'>", scopes: ["text.html.ruby"]
|
||||
expect(tokens[1]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"]
|
||||
expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
|
||||
expect(tokens[3]).toEqual value: 'User', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","support.class.ruby"]
|
||||
|
||||
atom.packages.activatePackage('language-html', sync: true)
|
||||
{tokens} = grammar.tokenizeLine("<div class='name'><%= User.find(2).full_name %></div>")
|
||||
expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
|
||||
expect(tokens[1]).toEqual value: 'div', scopes: ["text.html.ruby","meta.tag.block.any.html","entity.name.tag.block.any.html"]
|
||||
expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","meta.tag.block.any.html"]
|
||||
expect(tokens[3]).toEqual value: 'class', scopes: ["text.html.ruby","meta.tag.block.any.html", "entity.other.attribute-name.html"]
|
||||
expect(tokens[4]).toEqual value: '=', scopes: ["text.html.ruby","meta.tag.block.any.html"]
|
||||
expect(tokens[5]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.begin.html"]
|
||||
expect(tokens[6]).toEqual value: 'name', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html"]
|
||||
expect(tokens[7]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.end.html"]
|
||||
expect(tokens[8]).toEqual value: '>', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"]
|
||||
expect(tokens[9]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"]
|
||||
expect(tokens[10]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
|
||||
|
||||
it "can parse a grammar with newline characters in its regular expressions (regression)", ->
|
||||
grammar = new TextMateGrammar
|
||||
name: "test"
|
||||
scopeName: "source.imaginaryLanguage"
|
||||
repository: {}
|
||||
patterns: [
|
||||
{
|
||||
name: "comment-body"
|
||||
begin: "//"
|
||||
end: "\\n"
|
||||
beginCaptures:
|
||||
"0": { name: "comment-start" }
|
||||
}
|
||||
]
|
||||
|
||||
{tokens, ruleStack} = grammar.tokenizeLine("// a singleLineComment")
|
||||
expect(ruleStack.length).toBe 1
|
||||
expect(ruleStack[0].scopeName).toBe "source.imaginaryLanguage"
|
||||
|
||||
expect(tokens.length).toBe 2
|
||||
expect(tokens[0].value).toBe "//"
|
||||
expect(tokens[1].value).toBe " a singleLineComment"
|
||||
|
||||
it "does not loop infinitely (regression)", ->
|
||||
grammar = atom.syntax.selectGrammar("hello.js")
|
||||
{tokens, ruleStack} = grammar.tokenizeLine("// line comment")
|
||||
{tokens, ruleStack} = grammar.tokenizeLine(" // second line comment with a single leading space", ruleStack)
|
||||
|
||||
describe "when inside a C block", ->
|
||||
beforeEach ->
|
||||
atom.packages.activatePackage('language-c', sync: true)
|
||||
|
||||
it "correctly parses a method. (regression)", ->
|
||||
grammar = atom.syntax.selectGrammar("hello.c")
|
||||
{tokens, ruleStack} = grammar.tokenizeLine("if(1){m()}")
|
||||
expect(tokens[5]).toEqual value: "m", scopes: ["source.c", "meta.block.c", "meta.function-call.c", "support.function.any-method.c"]
|
||||
|
||||
it "correctly parses nested blocks. (regression)", ->
|
||||
grammar = atom.syntax.selectGrammar("hello.c")
|
||||
{tokens, ruleStack} = grammar.tokenizeLine("if(1){if(1){m()}}")
|
||||
expect(tokens[5]).toEqual value: "if", scopes: ["source.c", "meta.block.c", "keyword.control.c"]
|
||||
expect(tokens[10]).toEqual value: "m", scopes: ["source.c", "meta.block.c", "meta.block.c", "meta.function-call.c", "support.function.any-method.c"]
|
||||
|
||||
describe "when the grammar can infinitely loop over a line", ->
|
||||
it "aborts tokenization", ->
|
||||
spyOn(console, 'error')
|
||||
atom.packages.activatePackage("package-with-infinite-loop-grammar")
|
||||
grammar = atom.syntax.selectGrammar("something.package-with-infinite-loop-grammar")
|
||||
{tokens} = grammar.tokenizeLine("abc")
|
||||
expect(tokens[0].value).toBe "a"
|
||||
expect(tokens[1].value).toBe "bc"
|
||||
expect(console.error).toHaveBeenCalled()
|
||||
|
||||
describe "when a grammar has a pattern that has back references in the match value", ->
|
||||
it "does not special handle the back references and instead allows oniguruma to resolve them", ->
|
||||
atom.packages.activatePackage('language-sass', sync: true)
|
||||
grammar = atom.syntax.selectGrammar("style.scss")
|
||||
{tokens} = grammar.tokenizeLine("@mixin x() { -moz-selector: whatever; }")
|
||||
expect(tokens[9]).toEqual value: "-moz-selector", scopes: ["source.css.scss", "meta.property-list.scss", "meta.property-name.scss"]
|
||||
|
||||
describe "when a line has more tokens than `maxTokensPerLine`", ->
|
||||
it "creates a final token with the remaining text and resets the ruleStack to match the begining of the line", ->
|
||||
grammar = atom.syntax.selectGrammar("hello.js")
|
||||
spyOn(grammar, 'getMaxTokensPerLine').andCallFake -> 5
|
||||
originalRuleStack = [grammar.initialRule, grammar.initialRule, grammar.initialRule]
|
||||
{tokens, ruleStack} = grammar.tokenizeLine("one(two(three(four(five(_param_)))))", originalRuleStack)
|
||||
expect(tokens.length).toBe 5
|
||||
expect(tokens[4].value).toBe "three(four(five(_param_)))))"
|
||||
expect(ruleStack).toEqual originalRuleStack
|
||||
|
||||
describe "when a grammar has a capture with patterns", ->
|
||||
it "matches the patterns and includes the scope specified as the pattern's match name", ->
|
||||
grammar = atom.syntax.selectGrammar("hello.php")
|
||||
{tokens} = grammar.tokenizeLine("<?php public final function meth() {} ?>")
|
||||
|
||||
expect(tokens[2].value).toBe "public"
|
||||
expect(tokens[2].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.modifier.php"]
|
||||
|
||||
expect(tokens[3].value).toBe " "
|
||||
expect(tokens[3].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php"]
|
||||
|
||||
expect(tokens[4].value).toBe "final"
|
||||
expect(tokens[4].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.modifier.php"]
|
||||
|
||||
expect(tokens[5].value).toBe " "
|
||||
expect(tokens[5].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php"]
|
||||
|
||||
expect(tokens[6].value).toBe "function"
|
||||
expect(tokens[6].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.type.function.php"]
|
||||
|
||||
it "ignores child captures of a capture with patterns", ->
|
||||
grammar = new TextMateGrammar
|
||||
name: "test"
|
||||
scopeName: "source"
|
||||
repository: {}
|
||||
patterns: [
|
||||
{
|
||||
name: "text"
|
||||
match: "(a(b))"
|
||||
captures:
|
||||
"1":
|
||||
patterns: [
|
||||
{
|
||||
match: "ab"
|
||||
name: "a"
|
||||
}
|
||||
]
|
||||
"2":
|
||||
name: "b"
|
||||
}
|
||||
]
|
||||
{tokens} = grammar.tokenizeLine("ab")
|
||||
|
||||
expect(tokens[0].value).toBe "ab"
|
||||
expect(tokens[0].scopes).toEqual ["source", "text", "a"]
|
||||
|
||||
describe "when the grammar has injections", ->
|
||||
it "correctly includes the injected patterns when tokenizing", ->
|
||||
grammar = atom.syntax.selectGrammar("hello.php")
|
||||
{tokens} = grammar.tokenizeLine("<div><?php function hello() {} ?></div>")
|
||||
|
||||
expect(tokens[3].value).toBe "<?php"
|
||||
expect(tokens[3].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "punctuation.section.embedded.begin.php"]
|
||||
|
||||
expect(tokens[5].value).toBe "function"
|
||||
expect(tokens[5].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.type.function.php"]
|
||||
|
||||
expect(tokens[7].value).toBe "hello"
|
||||
expect(tokens[7].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "entity.name.function.php"]
|
||||
|
||||
expect(tokens[14].value).toBe "?"
|
||||
expect(tokens[14].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "punctuation.section.embedded.end.php", "source.php"]
|
||||
|
||||
expect(tokens[15].value).toBe ">"
|
||||
expect(tokens[15].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "punctuation.section.embedded.end.php"]
|
||||
|
||||
expect(tokens[16].value).toBe "</"
|
||||
expect(tokens[16].scopes).toEqual ["text.html.php", "meta.tag.block.any.html", "punctuation.definition.tag.begin.html"]
|
||||
|
||||
expect(tokens[17].value).toBe "div"
|
||||
expect(tokens[17].scopes).toEqual ["text.html.php", "meta.tag.block.any.html", "entity.name.tag.block.any.html"]
|
||||
|
||||
describe "when the grammar's pattern name has a group number in it", ->
|
||||
it "replaces the group number with the matched captured text", ->
|
||||
atom.packages.activatePackage('language-hyperlink', sync: true)
|
||||
grammar = atom.syntax.grammarForScopeName("text.hyperlink")
|
||||
{tokens} = grammar.tokenizeLine("https://github.com")
|
||||
expect(tokens[0].scopes).toEqual ["text.hyperlink", "markup.underline.link.https.hyperlink"]
|
||||
|
||||
describe "when the grammar has an injection selector", ->
|
||||
it "includes the grammar's patterns when the selector matches the current scope in other grammars", ->
|
||||
atom.packages.activatePackage('language-hyperlink', sync: true)
|
||||
grammar = atom.syntax.selectGrammar("text.js")
|
||||
{tokens} = grammar.tokenizeLine("var i; // http://github.com")
|
||||
|
||||
expect(tokens[0].value).toBe "var"
|
||||
expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"]
|
||||
|
||||
expect(tokens[6].value).toBe "http://github.com"
|
||||
expect(tokens[6].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
|
||||
|
||||
describe "when the grammar is added", ->
|
||||
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
|
||||
editor = atom.project.openSync('sample.js')
|
||||
editor.setText("// http://github.com")
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[1].value).toBe " http://github.com"
|
||||
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
|
||||
|
||||
atom.packages.activatePackage('language-hyperlink', sync: true)
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[2].value).toBe "http://github.com"
|
||||
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
|
||||
|
||||
describe "when the grammar is updated", ->
|
||||
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
|
||||
editor = atom.project.openSync('sample.js')
|
||||
editor.setText("// SELECT * FROM OCTOCATS")
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
|
||||
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
|
||||
|
||||
atom.syntax.addGrammar(new TextMateGrammar(
|
||||
name: "test"
|
||||
scopeName: "source.test"
|
||||
repository: {}
|
||||
injectionSelector: "comment"
|
||||
patterns: [ { include: "source.sql" } ]
|
||||
))
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
|
||||
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
|
||||
|
||||
atom.packages.activatePackage('language-sql', sync: true)
|
||||
|
||||
{tokens} = editor.lineForScreenRow(0)
|
||||
expect(tokens[2].value).toBe "SELECT"
|
||||
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"]
|
||||
|
||||
describe "when the position doesn't advance and rule includes $self and matches itself", ->
|
||||
it "tokenizes the entire line using the rule", ->
|
||||
grammar = new TextMateGrammar
|
||||
name: "test"
|
||||
scopeName: "source"
|
||||
repository: {}
|
||||
patterns: [
|
||||
{
|
||||
name: "text"
|
||||
begin: "(?=forever)"
|
||||
end: "whatevs"
|
||||
patterns: [
|
||||
include: "$self"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
{tokens} = grammar.tokenizeLine("forever and ever")
|
||||
|
||||
expect(tokens.length).toBe 1
|
||||
expect(tokens[0].value).toBe "forever and ever"
|
||||
expect(tokens[0].scopes).toEqual ["source", "text"]
|
||||
|
||||
describe "${capture:/command} style pattern names", ->
|
||||
lines = null
|
||||
|
||||
beforeEach ->
|
||||
atom.packages.activatePackage('language-todo', sync: true)
|
||||
grammar = atom.syntax.selectGrammar('main.rb')
|
||||
lines = grammar.tokenizeLines "# TODO be nicer"
|
||||
|
||||
it "replaces the number with the capture group and translates the text", ->
|
||||
tokens = lines[0]
|
||||
expect(tokens[2].value).toEqual "TODO"
|
||||
expect(tokens[2].scopes).toEqual ["source.ruby", "comment.line.number-sign.ruby", "storage.type.class.todo"]
|
||||
|
||||
describe "language-specific integration tests", ->
|
||||
lines = null
|
||||
|
||||
describe "Git commit messages", ->
|
||||
beforeEach ->
|
||||
atom.packages.activatePackage('language-git', sync: true)
|
||||
grammar = atom.syntax.selectGrammar('COMMIT_EDITMSG')
|
||||
lines = grammar.tokenizeLines """
|
||||
longggggggggggggggggggggggggggggggggggggggggggggggg
|
||||
# Please enter the commit message for your changes. Lines starting
|
||||
"""
|
||||
|
||||
it "correctly parses a long line", ->
|
||||
tokens = lines[0]
|
||||
expect(tokens[0].value).toBe "longggggggggggggggggggggggggggggggggggggggggggggggg"
|
||||
expect(tokens[0].scopes).toEqual ["text.git-commit", "meta.scope.message.git-commit", "invalid.deprecated.line-too-long.git-commit"]
|
||||
|
||||
it "correctly parses the number sign of the first comment line", ->
|
||||
tokens = lines[1]
|
||||
expect(tokens[0].value).toBe "#"
|
||||
expect(tokens[0].scopes).toEqual ["text.git-commit", "meta.scope.metadata.git-commit", "comment.line.number-sign.git-commit", "punctuation.definition.comment.git-commit"]
|
||||
|
||||
describe "C++", ->
|
||||
beforeEach ->
|
||||
atom.packages.activatePackage('language-c', sync: true)
|
||||
grammar = atom.syntax.selectGrammar('includes.cc')
|
||||
lines = grammar.tokenizeLines """
|
||||
#include "a.h"
|
||||
#include "b.h"
|
||||
"""
|
||||
|
||||
it "correctly parses the first include line", ->
|
||||
tokens = lines[0]
|
||||
expect(tokens[0].value).toBe "#"
|
||||
expect(tokens[0].scopes).toEqual ["source.c++", "meta.preprocessor.c.include"]
|
||||
expect(tokens[1].value).toBe 'include'
|
||||
expect(tokens[1].scopes).toEqual ["source.c++", "meta.preprocessor.c.include", "keyword.control.import.include.c"]
|
||||
|
||||
it "correctly parses the second include line", ->
|
||||
tokens = lines[1]
|
||||
expect(tokens[0].value).toBe "#"
|
||||
expect(tokens[0].scopes).toEqual ["source.c++", "meta.preprocessor.c.include"]
|
||||
expect(tokens[1].value).toBe 'include'
|
||||
expect(tokens[1].scopes).toEqual ["source.c++", "meta.preprocessor.c.include", "keyword.control.import.include.c"]
|
||||
|
||||
describe "Ruby", ->
|
||||
beforeEach ->
|
||||
grammar = atom.syntax.selectGrammar('hello.rb')
|
||||
lines = grammar.tokenizeLines """
|
||||
a = {
|
||||
"b" => "c",
|
||||
}
|
||||
"""
|
||||
|
||||
it "doesn't loop infinitely (regression)", ->
|
||||
expect(_.pluck(lines[0], 'value').join('')).toBe 'a = {'
|
||||
expect(_.pluck(lines[1], 'value').join('')).toBe ' "b" => "c",'
|
||||
expect(_.pluck(lines[2], 'value').join('')).toBe '}'
|
||||
expect(_.pluck(lines[3], 'value').join('')).toBe ''
|
||||
|
||||
describe "Objective-C", ->
|
||||
beforeEach ->
|
||||
atom.packages.activatePackage('language-c', sync: true)
|
||||
atom.packages.activatePackage('language-objective-c', sync: true)
|
||||
grammar = atom.syntax.selectGrammar('function.mm')
|
||||
lines = grammar.tokenizeLines """
|
||||
void test() {
|
||||
NSString *a = @"a\\nb";
|
||||
}
|
||||
"""
|
||||
|
||||
it "correctly parses variable type when it is a built-in Cocoa class", ->
|
||||
tokens = lines[1]
|
||||
expect(tokens[0].value).toBe "NSString"
|
||||
expect(tokens[0].scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c", "support.class.cocoa"]
|
||||
|
||||
it "correctly parses the semicolon at the end of the line", ->
|
||||
tokens = lines[1]
|
||||
lastToken = _.last(tokens)
|
||||
expect(lastToken.value).toBe ";"
|
||||
expect(lastToken.scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c"]
|
||||
|
||||
it "correctly parses the string characters before the escaped character", ->
|
||||
tokens = lines[1]
|
||||
expect(tokens[2].value).toBe '@"'
|
||||
expect(tokens[2].scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c", "string.quoted.double.objc", "punctuation.definition.string.begin.objc"]
|
||||
|
||||
describe "Java", ->
|
||||
beforeEach ->
|
||||
atom.packages.activatePackage('language-java', sync: true)
|
||||
grammar = atom.syntax.selectGrammar('Function.java')
|
||||
|
||||
it "correctly parses single line comments", ->
|
||||
lines = grammar.tokenizeLines """
|
||||
public void test() {
|
||||
//comment
|
||||
}
|
||||
"""
|
||||
|
||||
tokens = lines[1]
|
||||
expect(tokens[0].scopes).toEqual ["source.java", "comment.line.double-slash.java", "punctuation.definition.comment.java"]
|
||||
expect(tokens[0].value).toEqual '//'
|
||||
expect(tokens[1].scopes).toEqual ["source.java", "comment.line.double-slash.java"]
|
||||
expect(tokens[1].value).toEqual 'comment'
|
||||
|
||||
it "correctly parses nested method calls", ->
|
||||
tokens = grammar.tokenizeLines('a(b(new Object[0]));')[0]
|
||||
lastToken = _.last(tokens)
|
||||
expect(lastToken.scopes).toEqual ['source.java', 'punctuation.terminator.java']
|
||||
expect(lastToken.value).toEqual ';'
|
||||
|
||||
describe "HTML (Ruby - ERB)", ->
|
||||
it "correctly parses strings inside tags", ->
|
||||
grammar = atom.syntax.selectGrammar('page.erb')
|
||||
lines = grammar.tokenizeLines '<% page_title "My Page" %>'
|
||||
tokens = lines[0]
|
||||
|
||||
expect(tokens[2].value).toEqual '"'
|
||||
expect(tokens[2].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby", "punctuation.definition.string.begin.ruby"]
|
||||
expect(tokens[3].value).toEqual 'My Page'
|
||||
expect(tokens[3].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby"]
|
||||
expect(tokens[4].value).toEqual '"'
|
||||
expect(tokens[4].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby", "punctuation.definition.string.end.ruby"]
|
||||
|
||||
it "does not loop infinitely on <%>", ->
|
||||
atom.packages.activatePackage('language-html', sync: true)
|
||||
atom.packages.activatePackage('language-ruby-on-rails', sync: true)
|
||||
|
||||
grammar = atom.syntax.selectGrammar('foo.html.erb')
|
||||
[tokens] = grammar.tokenizeLines '<%>'
|
||||
expect(tokens.length).toBe 1
|
||||
expect(tokens[0].value).toEqual '<%>'
|
||||
expect(tokens[0].scopes).toEqual ["text.html.erb"]
|
||||
|
||||
describe "Unicode support", ->
|
||||
describe "Surrogate pair characters", ->
|
||||
beforeEach ->
|
||||
grammar = atom.syntax.selectGrammar('main.js')
|
||||
lines = grammar.tokenizeLines "'\uD835\uDF97'"
|
||||
|
||||
it "correctly parses JavaScript strings containing surrogate pair characters", ->
|
||||
tokens = lines[0]
|
||||
expect(tokens.length).toBe 3
|
||||
expect(tokens[0].value).toBe "'"
|
||||
expect(tokens[1].value).toBe "\uD835\uDF97"
|
||||
expect(tokens[2].value).toBe "'"
|
||||
|
||||
describe "when the line contains unicode characters", ->
|
||||
it "correctly parses tokens starting after them", ->
|
||||
atom.packages.activatePackage('language-json', sync: true)
|
||||
grammar = atom.syntax.selectGrammar('package.json')
|
||||
{tokens} = grammar.tokenizeLine '{"\u2026": 1}'
|
||||
|
||||
expect(tokens.length).toBe 8
|
||||
expect(tokens[6].value).toBe '1'
|
||||
expect(tokens[6].scopes).toEqual ["source.json", "meta.structure.dictionary.json", "meta.structure.dictionary.value.json", "constant.numeric.json"]
|
||||
|
||||
describe "python", ->
|
||||
it "parses import blocks correctly", ->
|
||||
grammar = atom.syntax.selectGrammar("file.py")
|
||||
lines = grammar.tokenizeLines "import a\nimport b"
|
||||
|
||||
line1 = lines[0]
|
||||
expect(line1.length).toBe 3
|
||||
expect(line1[0].value).toEqual "import"
|
||||
expect(line1[0].scopes).toEqual ["source.python", "keyword.control.import.python"]
|
||||
expect(line1[1].value).toEqual " "
|
||||
expect(line1[1].scopes).toEqual ["source.python"]
|
||||
expect(line1[2].value).toEqual "a"
|
||||
expect(line1[2].scopes).toEqual ["source.python"]
|
||||
|
||||
line2 = lines[1]
|
||||
expect(line2.length).toBe 3
|
||||
expect(line2[0].value).toEqual "import"
|
||||
expect(line2[0].scopes).toEqual ["source.python", "keyword.control.import.python"]
|
||||
expect(line2[1].value).toEqual " "
|
||||
expect(line2[1].scopes).toEqual ["source.python"]
|
||||
expect(line2[2].value).toEqual "b"
|
||||
expect(line2[2].scopes).toEqual ["source.python"]
|
||||
@@ -1,4 +1,3 @@
|
||||
TextMateGrammar = require './text-mate-grammar'
|
||||
Package = require './package'
|
||||
fs = require 'fs-plus'
|
||||
path = require 'path'
|
||||
@@ -105,7 +104,7 @@ class AtomPackage extends Package
|
||||
atom.keymap.add(keymapPath, map) for [keymapPath, map] in @keymaps
|
||||
atom.contextMenu.add(menuPath, map['context-menu']) for [menuPath, map] in @menus
|
||||
atom.menu.add(map.menu) for [menuPath, map] in @menus when map.menu
|
||||
atom.syntax.addGrammar(grammar) for grammar in @grammars
|
||||
grammar.activate() for grammar in @grammars
|
||||
for [scopedPropertiesPath, selector, properties] in @scopedProperties
|
||||
atom.syntax.addProperties(scopedPropertiesPath, selector, properties)
|
||||
|
||||
@@ -152,7 +151,7 @@ class AtomPackage extends Package
|
||||
@grammars = []
|
||||
grammarsDirPath = path.join(@path, 'grammars')
|
||||
for grammarPath in fs.listSync(grammarsDirPath, ['.json', '.cson'])
|
||||
@grammars.push(TextMateGrammar.loadSync(grammarPath))
|
||||
@grammars.push(atom.syntax.readGrammarSync(grammarPath))
|
||||
|
||||
loadScopedProperties: ->
|
||||
@scopedProperties = []
|
||||
@@ -180,7 +179,7 @@ class AtomPackage extends Package
|
||||
@configActivated = false
|
||||
|
||||
deactivateResources: ->
|
||||
atom.syntax.removeGrammar(grammar) for grammar in @grammars
|
||||
grammar.deactivate() for grammar in @grammars
|
||||
atom.syntax.removeProperties(scopedPropertiesPath) for [scopedPropertiesPath] in @scopedProperties
|
||||
atom.keymap.remove(keymapPath) for [keymapPath] in @keymaps
|
||||
atom.themes.removeStylesheet(stylesheetPath) for [stylesheetPath] in @stylesheets
|
||||
|
||||
@@ -358,7 +358,7 @@ class DisplayBuffer extends Model
|
||||
|
||||
# Get the grammar for this buffer.
|
||||
#
|
||||
# Returns the current {TextMateGrammar} or the {NullGrammar}.
|
||||
# Returns the current {Grammar} or the {NullGrammar}.
|
||||
getGrammar: ->
|
||||
@tokenizedBuffer.grammar
|
||||
|
||||
|
||||
@@ -289,9 +289,6 @@ class LanguageMode
|
||||
if desiredIndentLevel >= 0 and desiredIndentLevel < currentIndentLevel
|
||||
@editor.setIndentationForBufferRow(bufferRow, desiredIndentLevel)
|
||||
|
||||
tokenizeLine: (line, stack, firstLine) ->
|
||||
{tokens, stack} = @grammar.tokenizeLine(line, stack, firstLine)
|
||||
|
||||
getRegexForProperty: (scopes, property) ->
|
||||
if pattern = atom.syntax.getProperty(scopes, property)
|
||||
new OnigRegExp(pattern)
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
Token = require './token'
|
||||
{Emitter} = require 'emissary'
|
||||
|
||||
### Internal ###
|
||||
module.exports =
|
||||
class NullGrammar
|
||||
Emitter.includeInto(this)
|
||||
|
||||
name: 'Null Grammar'
|
||||
scopeName: 'text.plain.null-grammar'
|
||||
|
||||
getScore: -> 0
|
||||
|
||||
tokenizeLine: (line) ->
|
||||
{ tokens: [new Token(value: line, scopes: ['null-grammar.text.plain'])] }
|
||||
|
||||
tokenizeLines: (text) ->
|
||||
lines = text.split('\n')
|
||||
for line, i in lines
|
||||
{tokens} = @tokenizeLine(line)
|
||||
tokens
|
||||
|
||||
grammarUpdated: -> # noop
|
||||
@@ -1,15 +1,15 @@
|
||||
_ = require 'underscore-plus'
|
||||
{specificity} = require 'clear-cut'
|
||||
{Subscriber} = require 'emissary'
|
||||
{GrammarRegistry, ScopeSelector} = require 'first-mate'
|
||||
|
||||
{$, $$} = require './space-pen-extensions'
|
||||
{Emitter} = require 'emissary'
|
||||
NullGrammar = require './null-grammar'
|
||||
TextMateScopeSelector = require('first-mate').ScopeSelector
|
||||
|
||||
### Internal ###
|
||||
Token = require './token'
|
||||
|
||||
### Public ###
|
||||
module.exports =
|
||||
class Syntax
|
||||
Emitter.includeInto(this)
|
||||
class Syntax extends GrammarRegistry
|
||||
Subscriber.includeInto(this)
|
||||
|
||||
atom.deserializers.add(this)
|
||||
|
||||
@@ -19,53 +19,15 @@ class Syntax
|
||||
syntax
|
||||
|
||||
constructor: ->
|
||||
@nullGrammar = new NullGrammar
|
||||
@grammars = [@nullGrammar]
|
||||
@grammarsByScopeName = {}
|
||||
@injectionGrammars = []
|
||||
@grammarOverridesByPath = {}
|
||||
super
|
||||
|
||||
@scopedPropertiesIndex = 0
|
||||
@scopedProperties = []
|
||||
|
||||
serialize: ->
|
||||
{ deserializer: @constructor.name, @grammarOverridesByPath }
|
||||
{deserializer: @constructor.name, @grammarOverridesByPath}
|
||||
|
||||
addGrammar: (grammar) ->
|
||||
previousGrammars = new Array(@grammars...)
|
||||
@grammars.push(grammar)
|
||||
@grammarsByScopeName[grammar.scopeName] = grammar
|
||||
@injectionGrammars.push(grammar) if grammar.injectionSelector?
|
||||
@grammarUpdated(grammar.scopeName)
|
||||
@emit 'grammar-added', grammar
|
||||
|
||||
removeGrammar: (grammar) ->
|
||||
_.remove(@grammars, grammar)
|
||||
delete @grammarsByScopeName[grammar.scopeName]
|
||||
_.remove(@injectionGrammars, grammar)
|
||||
@grammarUpdated(grammar.scopeName)
|
||||
|
||||
grammarUpdated: (scopeName) ->
|
||||
for grammar in @grammars when grammar.scopeName isnt scopeName
|
||||
@emit 'grammar-updated', grammar if grammar.grammarUpdated(scopeName)
|
||||
|
||||
setGrammarOverrideForPath: (path, scopeName) ->
|
||||
@grammarOverridesByPath[path] = scopeName
|
||||
|
||||
clearGrammarOverrideForPath: (path) ->
|
||||
delete @grammarOverridesByPath[path]
|
||||
|
||||
clearGrammarOverrides: ->
|
||||
@grammarOverridesByPath = {}
|
||||
|
||||
selectGrammar: (filePath, fileContents) ->
|
||||
grammar = _.max @grammars, (grammar) -> grammar.getScore(filePath, fileContents)
|
||||
grammar
|
||||
|
||||
grammarOverrideForPath: (path) ->
|
||||
@grammarOverridesByPath[path]
|
||||
|
||||
grammarForScopeName: (scopeName) ->
|
||||
@grammarsByScopeName[scopeName]
|
||||
createToken: (value, scopes) -> new Token({value, scopes})
|
||||
|
||||
addProperties: (args...) ->
|
||||
name = args.shift() if args.length > 2
|
||||
@@ -132,4 +94,4 @@ class Syntax
|
||||
element[0]
|
||||
|
||||
cssSelectorFromScopeSelector: (scopeSelector) ->
|
||||
new TextMateScopeSelector(scopeSelector).toCssSelector()
|
||||
new ScopeSelector(scopeSelector).toCssSelector()
|
||||
|
||||
@@ -1,522 +0,0 @@
|
||||
_ = require 'underscore-plus'
|
||||
fs = require 'fs-plus'
|
||||
Token = require './token'
|
||||
{OnigRegExp, OnigScanner} = require 'oniguruma'
|
||||
path = require 'path'
|
||||
{Emitter} = require 'emissary'
|
||||
{ScopeSelector} = require 'first-mate'
|
||||
|
||||
pathSplitRegex = new RegExp("[#{_.escapeRegExp(path.sep)}.]")
|
||||
|
||||
### Internal ###
|
||||
|
||||
module.exports =
|
||||
class TextMateGrammar
|
||||
Emitter.includeInto(this)
|
||||
|
||||
@load: (grammarPath, done) ->
|
||||
fs.readObject grammarPath, (error, object) ->
|
||||
if error?
|
||||
done(error)
|
||||
else
|
||||
done(null, new TextMateGrammar(object))
|
||||
|
||||
@loadSync: (grammarPath) ->
|
||||
new TextMateGrammar(fs.readObjectSync(grammarPath))
|
||||
|
||||
name: null
|
||||
rawPatterns: null
|
||||
rawRepository: null
|
||||
fileTypes: null
|
||||
scopeName: null
|
||||
repository: null
|
||||
initialRule: null
|
||||
firstLineRegex: null
|
||||
includedGrammarScopes: null
|
||||
maxTokensPerLine: 100
|
||||
|
||||
constructor: ({ @name, @fileTypes, @scopeName, injections, injectionSelector, patterns, repository, @foldingStopMarker, firstLineMatch}) ->
|
||||
@rawPatterns = patterns
|
||||
@rawRepository = repository
|
||||
@injections = new Injections(this, injections)
|
||||
|
||||
if injectionSelector?
|
||||
@injectionSelector = new ScopeSelector(injectionSelector)
|
||||
|
||||
@firstLineRegex = new OnigRegExp(firstLineMatch) if firstLineMatch
|
||||
@fileTypes ?= []
|
||||
@includedGrammarScopes = []
|
||||
|
||||
clearRules: ->
|
||||
@initialRule = null
|
||||
@repository = null
|
||||
|
||||
getInitialRule: ->
|
||||
@initialRule ?= new Rule(this, {@scopeName, patterns: @rawPatterns})
|
||||
|
||||
getRepository: ->
|
||||
@repository ?= do =>
|
||||
repository = {}
|
||||
for name, data of @rawRepository
|
||||
data = {patterns: [data], tempName: name} if data.begin? or data.match?
|
||||
repository[name] = new Rule(this, data)
|
||||
repository
|
||||
|
||||
addIncludedGrammarScope: (scope) ->
|
||||
@includedGrammarScopes.push(scope) unless _.include(@includedGrammarScopes, scope)
|
||||
|
||||
grammarUpdated: (scopeName) ->
|
||||
return false unless _.include(@includedGrammarScopes, scopeName)
|
||||
@clearRules()
|
||||
atom.syntax.grammarUpdated(@scopeName)
|
||||
@emit 'grammar-updated'
|
||||
true
|
||||
|
||||
getScore: (filePath, contents) ->
|
||||
contents = fs.readFileSync(filePath, 'utf8') if not contents? and fs.isFileSync(filePath)
|
||||
|
||||
if atom.syntax.grammarOverrideForPath(filePath) is @scopeName
|
||||
2 + (filePath?.length ? 0)
|
||||
else if @matchesContents(contents)
|
||||
1 + (filePath?.length ? 0)
|
||||
else
|
||||
@getPathScore(filePath)
|
||||
|
||||
matchesContents: (contents) ->
|
||||
return false unless contents? and @firstLineRegex?
|
||||
|
||||
escaped = false
|
||||
numberOfNewlinesInRegex = 0
|
||||
for character in @firstLineRegex.source
|
||||
switch character
|
||||
when '\\'
|
||||
escaped = !escaped
|
||||
when 'n'
|
||||
numberOfNewlinesInRegex++ if escaped
|
||||
escaped = false
|
||||
else
|
||||
escaped = false
|
||||
lines = contents.split('\n')
|
||||
@firstLineRegex.test(lines[0..numberOfNewlinesInRegex].join('\n'))
|
||||
|
||||
getPathScore: (filePath) ->
|
||||
return -1 unless filePath?
|
||||
|
||||
pathComponents = filePath.split(pathSplitRegex)
|
||||
pathScore = -1
|
||||
@fileTypes.forEach (fileType) ->
|
||||
fileTypeComponents = fileType.split(pathSplitRegex)
|
||||
pathSuffix = pathComponents[-fileTypeComponents.length..-1]
|
||||
if _.isEqual(pathSuffix, fileTypeComponents)
|
||||
pathScore = Math.max(pathScore, fileType.length)
|
||||
|
||||
pathScore
|
||||
|
||||
tokenizeLine: (line, ruleStack=[@getInitialRule()], firstLine=false) ->
|
||||
originalRuleStack = ruleStack
|
||||
ruleStack = new Array(ruleStack...) # clone ruleStack
|
||||
tokens = []
|
||||
position = 0
|
||||
|
||||
loop
|
||||
scopes = scopesFromStack(ruleStack)
|
||||
previousRuleStackLength = ruleStack.length
|
||||
previousPosition = position
|
||||
|
||||
if tokens.length >= (@getMaxTokensPerLine() - 1)
|
||||
token = new Token(value: line[position..], scopes: scopes)
|
||||
tokens.push token
|
||||
ruleStack = originalRuleStack
|
||||
break
|
||||
|
||||
break if position == line.length + 1 # include trailing newline position
|
||||
|
||||
if match = _.last(ruleStack).getNextTokens(ruleStack, line, position, firstLine)
|
||||
{ nextTokens, tokensStartPosition, tokensEndPosition } = match
|
||||
if position < tokensStartPosition # unmatched text before next tokens
|
||||
tokens.push(new Token(
|
||||
value: line[position...tokensStartPosition]
|
||||
scopes: scopes
|
||||
))
|
||||
|
||||
tokens.push(nextTokens...)
|
||||
position = tokensEndPosition
|
||||
break if position is line.length and nextTokens.length is 0 and ruleStack.length is previousRuleStackLength
|
||||
|
||||
else # push filler token for unmatched text at end of line
|
||||
if position < line.length or line.length == 0
|
||||
tokens.push(new Token(
|
||||
value: line[position...line.length]
|
||||
scopes: scopes
|
||||
))
|
||||
break
|
||||
|
||||
if position == previousPosition
|
||||
if ruleStack.length == previousRuleStackLength
|
||||
console.error("Popping rule because it loops at column #{position} of line '#{line}'", _.clone(ruleStack))
|
||||
ruleStack.pop()
|
||||
else if ruleStack.length > previousRuleStackLength # Stack size increased with zero length match
|
||||
[penultimateRule, lastRule] = ruleStack[-2..]
|
||||
|
||||
# Same exact rule was pushed but position wasn't advanced
|
||||
if lastRule? and lastRule == penultimateRule
|
||||
popStack = true
|
||||
|
||||
# Rule with same scope name as previous rule was pushed but position wasn't advanced
|
||||
if lastRule?.scopeName? and penultimateRule.scopeName == lastRule.scopeName
|
||||
popStack = true
|
||||
|
||||
if popStack
|
||||
ruleStack.pop()
|
||||
tokens.push(new Token(
|
||||
value: line[position...line.length]
|
||||
scopes: scopes
|
||||
))
|
||||
break
|
||||
|
||||
ruleStack.forEach (rule) -> rule.clearAnchorPosition()
|
||||
{ tokens, ruleStack }
|
||||
|
||||
tokenizeLines: (text) ->
|
||||
lines = text.split('\n')
|
||||
ruleStack = null
|
||||
for line, i in lines
|
||||
{ tokens, ruleStack } = @tokenizeLine(line, ruleStack, i is 0)
|
||||
tokens
|
||||
|
||||
getMaxTokensPerLine: ->
|
||||
@maxTokensPerLine
|
||||
|
||||
class Injections
|
||||
@injections: null
|
||||
|
||||
constructor: (grammar, injections={}) ->
|
||||
@injections = []
|
||||
@scanners = {}
|
||||
for selector, values of injections
|
||||
continue unless values?.patterns?.length > 0
|
||||
patterns = []
|
||||
anchored = false
|
||||
for regex in values.patterns
|
||||
pattern = new Pattern(grammar, regex)
|
||||
anchored = true if pattern.anchored
|
||||
patterns.push(pattern.getIncludedPatterns(grammar, patterns)...)
|
||||
@injections.push
|
||||
anchored: anchored
|
||||
selector: new ScopeSelector(selector)
|
||||
patterns: patterns
|
||||
|
||||
getScanner: (injection, firstLine, position, anchorPosition) ->
|
||||
return injection.scanner if injection.scanner?
|
||||
|
||||
regexes = _.map injection.patterns, (pattern) ->
|
||||
pattern.getRegex(firstLine, position, anchorPosition)
|
||||
scanner = new OnigScanner(regexes)
|
||||
scanner.patterns = injection.patterns
|
||||
scanner.anchored = injection.anchored
|
||||
injection.scanner = scanner unless scanner.anchored
|
||||
scanner
|
||||
|
||||
getScanners: (ruleStack, firstLine, position, anchorPosition) ->
|
||||
scanners = []
|
||||
scopes = scopesFromStack(ruleStack)
|
||||
for injection in @injections
|
||||
if injection.selector.matches(scopes)
|
||||
scanner = @getScanner(injection, firstLine, position, anchorPosition)
|
||||
scanners.push(scanner)
|
||||
scanners
|
||||
|
||||
class Rule
|
||||
grammar: null
|
||||
scopeName: null
|
||||
patterns: null
|
||||
scannersByBaseGrammarName: null
|
||||
createEndPattern: null
|
||||
anchorPosition: -1
|
||||
|
||||
constructor: (@grammar, {@scopeName, patterns, @endPattern}) ->
|
||||
patterns ?= []
|
||||
@patterns = patterns.map (pattern) => new Pattern(grammar, pattern)
|
||||
@patterns.unshift(@endPattern) if @endPattern and !@endPattern.hasBackReferences
|
||||
@scannersByBaseGrammarName = {}
|
||||
|
||||
getIncludedPatterns: (baseGrammar, included=[]) ->
|
||||
return [] if _.include(included, this)
|
||||
|
||||
included = included.concat([this])
|
||||
allPatterns = []
|
||||
for pattern in @patterns
|
||||
allPatterns.push(pattern.getIncludedPatterns(baseGrammar, included)...)
|
||||
allPatterns
|
||||
|
||||
clearAnchorPosition: -> @anchorPosition = -1
|
||||
|
||||
createScanner: (patterns, firstLine, position) ->
|
||||
anchored = false
|
||||
regexes = _.map patterns, (pattern) =>
|
||||
anchored = true if pattern.anchored
|
||||
pattern.getRegex(firstLine, position, @anchorPosition)
|
||||
|
||||
scanner = new OnigScanner(regexes)
|
||||
scanner.patterns = patterns
|
||||
scanner.anchored = anchored
|
||||
scanner
|
||||
|
||||
getScanner: (baseGrammar, position, firstLine) ->
|
||||
return scanner if scanner = @scannersByBaseGrammarName[baseGrammar.name]
|
||||
|
||||
patterns = @getIncludedPatterns(baseGrammar)
|
||||
scanner = @createScanner(patterns, firstLine, position)
|
||||
@scannersByBaseGrammarName[baseGrammar.name] = scanner unless scanner.anchored
|
||||
scanner
|
||||
|
||||
scanInjections: (ruleStack, line, position, firstLine) ->
|
||||
baseGrammar = ruleStack[0].grammar
|
||||
if injections = baseGrammar.injections
|
||||
scanners = injections.getScanners(ruleStack, position, firstLine, @anchorPosition)
|
||||
for scanner in scanners
|
||||
result = scanner.findNextMatch(line, position)
|
||||
return result if result?
|
||||
|
||||
normalizeCaptureIndices: (line, captureIndices) ->
|
||||
lineLength = line.length
|
||||
captureIndices.forEach (capture) ->
|
||||
capture.end = Math.min(capture.end, lineLength)
|
||||
capture.start = Math.min(capture.start, lineLength)
|
||||
|
||||
findNextMatch: (ruleStack, line, position, firstLine) ->
|
||||
lineWithNewline = "#{line}\n"
|
||||
baseGrammar = ruleStack[0].grammar
|
||||
results = []
|
||||
|
||||
scanner = @getScanner(baseGrammar, position, firstLine)
|
||||
if result = scanner.findNextMatch(lineWithNewline, position)
|
||||
results.push(result)
|
||||
|
||||
if result = @scanInjections(ruleStack, lineWithNewline, position, firstLine)
|
||||
results.push(result)
|
||||
|
||||
scopes = scopesFromStack(ruleStack)
|
||||
for injectionGrammar in _.without(atom.syntax.injectionGrammars, @grammar, baseGrammar)
|
||||
if injectionGrammar.injectionSelector.matches(scopes)
|
||||
scanner = injectionGrammar.getInitialRule().getScanner(injectionGrammar, position, firstLine)
|
||||
if result = scanner.findNextMatch(lineWithNewline, position)
|
||||
results.push(result)
|
||||
|
||||
if results.length > 0
|
||||
_.min results, (result) =>
|
||||
@normalizeCaptureIndices(line, result.captureIndices)
|
||||
result.captureIndices[0].start
|
||||
|
||||
getNextTokens: (ruleStack, line, position, firstLine) ->
|
||||
result = @findNextMatch(ruleStack, line, position, firstLine)
|
||||
return null unless result?
|
||||
{ index, captureIndices, scanner } = result
|
||||
firstCapture = captureIndices[0]
|
||||
nextTokens = scanner.patterns[index].handleMatch(ruleStack, line, captureIndices)
|
||||
{ nextTokens, tokensStartPosition: firstCapture.start, tokensEndPosition: firstCapture.end }
|
||||
|
||||
getRuleToPush: (line, beginPatternCaptureIndices) ->
|
||||
if @endPattern.hasBackReferences
|
||||
rule = new Rule(@grammar, {@scopeName})
|
||||
rule.endPattern = @endPattern.resolveBackReferences(line, beginPatternCaptureIndices)
|
||||
rule.patterns = [rule.endPattern, @patterns...]
|
||||
rule
|
||||
else
|
||||
this
|
||||
|
||||
class Pattern
|
||||
grammar: null
|
||||
pushRule: null
|
||||
popRule: false
|
||||
scopeName: null
|
||||
captures: null
|
||||
backReferences: null
|
||||
anchored: false
|
||||
|
||||
constructor: (@grammar, { name, contentName, @include, match, begin, end, captures, beginCaptures, endCaptures, patterns, @popRule, @hasBackReferences}) ->
|
||||
@scopeName = name ? contentName # TODO: We need special treatment of contentName
|
||||
if match
|
||||
if (end or @popRule) and @hasBackReferences ?= /\\\d+/.test(match)
|
||||
@match = match
|
||||
else
|
||||
@regexSource = match
|
||||
@captures = captures
|
||||
else if begin
|
||||
@regexSource = begin
|
||||
@captures = beginCaptures ? captures
|
||||
endPattern = new Pattern(@grammar, { match: end, captures: endCaptures ? captures, popRule: true})
|
||||
@pushRule = new Rule(@grammar, { @scopeName, patterns, endPattern })
|
||||
|
||||
if @captures?
|
||||
for group, capture of @captures
|
||||
if capture.patterns?.length > 0 and not capture.rule
|
||||
capture.scopeName = @scopeName
|
||||
capture.rule = new Rule(@grammar, capture)
|
||||
|
||||
@anchored = @hasAnchor()
|
||||
|
||||
getRegex: (firstLine, position, anchorPosition) ->
|
||||
if @anchored
|
||||
@replaceAnchor(firstLine, position, anchorPosition)
|
||||
else
|
||||
@regexSource
|
||||
|
||||
hasAnchor: ->
|
||||
return false unless @regexSource
|
||||
escape = false
|
||||
for character in @regexSource.split('')
|
||||
return true if escape and 'AGz'.indexOf(character) isnt -1
|
||||
escape = not escape and character is '\\'
|
||||
false
|
||||
|
||||
replaceAnchor: (firstLine, offset, anchor) ->
|
||||
escaped = []
|
||||
placeholder = '\uFFFF'
|
||||
escape = false
|
||||
for character in @regexSource.split('')
|
||||
if escape
|
||||
switch character
|
||||
when 'A'
|
||||
if firstLine
|
||||
escaped.push("\\#{character}")
|
||||
else
|
||||
escaped.push(placeholder)
|
||||
when 'G'
|
||||
if offset is anchor
|
||||
escaped.push("\\#{character}")
|
||||
else
|
||||
escaped.push(placeholder)
|
||||
when 'z' then escaped.push('$(?!\n)(?<!\n)')
|
||||
else escaped.push("\\#{character}")
|
||||
escape = false
|
||||
else if character is '\\'
|
||||
escape = true
|
||||
else
|
||||
escaped.push(character)
|
||||
|
||||
escaped.join('')
|
||||
|
||||
resolveBackReferences: (line, beginCaptureIndices) ->
|
||||
beginCaptures = []
|
||||
|
||||
for {start, end} in beginCaptureIndices
|
||||
beginCaptures.push line[start...end]
|
||||
|
||||
resolvedMatch = @match.replace /\\\d+/g, (match) ->
|
||||
index = parseInt(match[1..])
|
||||
_.escapeRegExp(beginCaptures[index] ? "\\#{index}")
|
||||
|
||||
new Pattern(@grammar, { hasBackReferences: false, match: resolvedMatch, @captures, @popRule })
|
||||
|
||||
ruleForInclude: (baseGrammar, name) ->
|
||||
if name[0] == "#"
|
||||
@grammar.getRepository()[name[1..]]
|
||||
else if name == "$self"
|
||||
@grammar.getInitialRule()
|
||||
else if name == "$base"
|
||||
baseGrammar.getInitialRule()
|
||||
else
|
||||
@grammar.addIncludedGrammarScope(name)
|
||||
atom.syntax.grammarForScopeName(name)?.getInitialRule()
|
||||
|
||||
getIncludedPatterns: (baseGrammar, included) ->
|
||||
if @include
|
||||
rule = @ruleForInclude(baseGrammar, @include)
|
||||
rule?.getIncludedPatterns(baseGrammar, included) ? []
|
||||
else
|
||||
[this]
|
||||
|
||||
resolveScopeName: (line, captureIndices) ->
|
||||
resolvedScopeName = @scopeName.replace /\${(\d+):\/(downcase|upcase)}/, (match, index, command) ->
|
||||
capture = captureIndices[parseInt(index)]
|
||||
if capture?
|
||||
replacement = line.substring(capture.start, capture.end)
|
||||
switch command
|
||||
when 'downcase' then replacement.toLowerCase()
|
||||
when 'upcase' then replacement.toUpperCase()
|
||||
else replacement
|
||||
else
|
||||
match
|
||||
|
||||
resolvedScopeName.replace /\$(\d+)/, (match, index) ->
|
||||
capture = captureIndices[parseInt(index)]
|
||||
if capture?
|
||||
line.substring(capture.start, capture.end)
|
||||
else
|
||||
match
|
||||
|
||||
handleMatch: (stack, line, captureIndices) ->
|
||||
scopes = scopesFromStack(stack)
|
||||
if @scopeName and not @popRule
|
||||
scopes.push(@resolveScopeName(line, captureIndices))
|
||||
|
||||
if @captures
|
||||
tokens = @getTokensForCaptureIndices(line, _.clone(captureIndices), scopes, stack)
|
||||
else
|
||||
{start, end} = captureIndices[0]
|
||||
zeroLengthMatch = end == start
|
||||
if zeroLengthMatch
|
||||
tokens = []
|
||||
else
|
||||
tokens = [new Token(value: line[start...end], scopes: scopes)]
|
||||
if @pushRule
|
||||
ruleToPush = @pushRule.getRuleToPush(line, captureIndices)
|
||||
ruleToPush.anchorPosition = captureIndices[0].end
|
||||
stack.push(ruleToPush)
|
||||
else if @popRule
|
||||
stack.pop()
|
||||
|
||||
tokens
|
||||
|
||||
getTokensForCaptureRule: (rule, line, captureStart, captureEnd, scopes, stack) ->
|
||||
captureText = line.substring(captureStart, captureEnd)
|
||||
{tokens} = rule.grammar.tokenizeLine(captureText, [stack..., rule])
|
||||
tokens
|
||||
|
||||
getTokensForCaptureIndices: (line, captureIndices, scopes, stack) ->
|
||||
parentCapture = captureIndices.shift()
|
||||
|
||||
tokens = []
|
||||
if scope = @captures[parentCapture.index]?.name
|
||||
scopes = scopes.concat(scope)
|
||||
|
||||
if captureRule = @captures[parentCapture.index]?.rule
|
||||
captureTokens = @getTokensForCaptureRule(captureRule, line, parentCapture.start, parentCapture.end, scopes, stack)
|
||||
tokens.push(captureTokens...)
|
||||
# Consume child captures
|
||||
while captureIndices.length and captureIndices[0].start < parentCapture.end
|
||||
captureIndices.shift()
|
||||
else
|
||||
previousChildCaptureEnd = parentCapture.start
|
||||
while captureIndices.length and captureIndices[0].start < parentCapture.end
|
||||
childCapture = captureIndices[0]
|
||||
|
||||
emptyCapture = childCapture.end - childCapture.start == 0
|
||||
captureHasNoScope = not @captures[childCapture.index]
|
||||
if emptyCapture or captureHasNoScope
|
||||
captureIndices.shift()
|
||||
continue
|
||||
|
||||
if childCapture.start > previousChildCaptureEnd
|
||||
tokens.push(new Token(
|
||||
value: line[previousChildCaptureEnd...childCapture.start]
|
||||
scopes: scopes
|
||||
))
|
||||
|
||||
captureTokens = @getTokensForCaptureIndices(line, captureIndices, scopes, stack)
|
||||
tokens.push(captureTokens...)
|
||||
previousChildCaptureEnd = childCapture.end
|
||||
|
||||
if parentCapture.end > previousChildCaptureEnd
|
||||
tokens.push(new Token(
|
||||
value: line[previousChildCaptureEnd...parentCapture.end]
|
||||
scopes: scopes
|
||||
))
|
||||
|
||||
tokens
|
||||
|
||||
### Internal ###
|
||||
|
||||
scopesFromStack = (stack) ->
|
||||
_.compact(_.pluck(stack, "scopeName"))
|
||||
@@ -2,7 +2,6 @@ Package = require './package'
|
||||
path = require 'path'
|
||||
_ = require 'underscore-plus'
|
||||
fs = require 'fs-plus'
|
||||
TextMateGrammar = require './text-mate-grammar'
|
||||
async = require 'async'
|
||||
|
||||
### Internal ###
|
||||
@@ -41,14 +40,14 @@ class TextMatePackage extends Package
|
||||
|
||||
activate: ->
|
||||
@measure 'activateTime', =>
|
||||
atom.syntax.addGrammar(grammar) for grammar in @grammars
|
||||
grammar.activate() for grammar in @grammars
|
||||
for { selector, properties } in @scopedProperties
|
||||
atom.syntax.addProperties(@path, selector, properties)
|
||||
|
||||
activateConfig: -> # noop
|
||||
|
||||
deactivate: ->
|
||||
atom.syntax.removeGrammar(grammar) for grammar in @grammars
|
||||
grammar.deactivate() for grammar in @grammars
|
||||
atom.syntax.removeProperties(@path)
|
||||
|
||||
legalGrammarExtensions: ['plist', 'tmLanguage', 'tmlanguage', 'json', 'cson']
|
||||
@@ -66,18 +65,20 @@ class TextMatePackage extends Package
|
||||
done()
|
||||
|
||||
loadGrammarAtPath: (grammarPath, done) =>
|
||||
TextMateGrammar.load grammarPath, (err, grammar) =>
|
||||
return console.log("Error loading grammar at path '#{grammarPath}':", err.stack ? err) if err
|
||||
@addGrammar(grammar)
|
||||
done()
|
||||
atom.syntax.readGrammar grammarPath, (error, grammar) =>
|
||||
if error?
|
||||
console.log("Error loading grammar at path '#{grammarPath}':", error.stack ? error)
|
||||
else
|
||||
@addGrammar(grammar)
|
||||
done?()
|
||||
|
||||
loadGrammarsSync: ->
|
||||
for grammarPath in fs.listSync(@getSyntaxesPath(), @legalGrammarExtensions)
|
||||
@addGrammar(TextMateGrammar.loadSync(grammarPath))
|
||||
@addGrammar(atom.syntax.readGrammarSync(grammarPath))
|
||||
|
||||
addGrammar: (grammar) ->
|
||||
@grammars.push(grammar)
|
||||
atom.syntax.addGrammar(grammar) if @isActive()
|
||||
grammar.activate() if @isActive()
|
||||
|
||||
getGrammars: -> @grammars
|
||||
|
||||
|
||||
Reference in New Issue
Block a user