From 871b7406cd457ade45667d240faf43543e00c9da Mon Sep 17 00:00:00 2001 From: Ben Ogle Date: Thu, 19 Dec 2013 15:41:29 -0800 Subject: [PATCH 01/39] Terminate the old search if another is run. --- src/project.coffee | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/project.coffee b/src/project.coffee index a5eb712db..16ddb91d7 100644 --- a/src/project.coffee +++ b/src/project.coffee @@ -275,14 +275,19 @@ class Project extends Model excludeVcsIgnores: atom.config.get('core.excludeVcsIgnoredPaths') exclusions: atom.config.get('core.ignoredNames') - task = Task.once require.resolve('./scan-handler'), @getPath(), regex.source, searchOptions, -> + if @scanTask? + console.log 'terminating!' + @scanTask.terminate() + + @scanTask = Task.once require.resolve('./scan-handler'), @getPath(), regex.source, searchOptions, => + @scanTask = null deferred.resolve() - task.on 'scan:result-found', (result) => + @scanTask.on 'scan:result-found', (result) => iterator(result) unless @isPathModified(result.filePath) if _.isFunction(options.onPathsSearched) - task.on 'scan:paths-searched', (numberOfPathsSearched) -> + @scanTask.on 'scan:paths-searched', (numberOfPathsSearched) -> options.onPathsSearched(numberOfPathsSearched) for buffer in @buffers.getValues() when buffer.isModified() From ffbd15eb98e73630834a67efbdc4647b3d6aff2c Mon Sep 17 00:00:00 2001 From: Ben Ogle Date: Thu, 19 Dec 2013 16:12:29 -0800 Subject: [PATCH 02/39] Upgrade scandal@0.9.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index dd91d532c..20ea6851a 100644 --- a/package.json +++ b/package.json @@ -40,7 +40,7 @@ "pathwatcher": "0.11.0", "pegjs": "0.7.0", "q": "0.9.7", - "scandal": "0.8.0", + "scandal": "0.9.0", "season": "0.14.0", "semver": "1.1.4", "space-pen": "2.0.2", From 8763a49dc6afe7b5e32a4e293eb8d2dc9260828f Mon Sep 17 00:00:00 2001 From: Ben Ogle Date: Thu, 19 Dec 2013 16:39:01 -0800 Subject: [PATCH 03/39] Remove log line --- src/project.coffee | 1 - 1 file changed, 1 deletion(-) diff --git a/src/project.coffee b/src/project.coffee index 16ddb91d7..6357ee449 100644 --- a/src/project.coffee +++ b/src/project.coffee @@ -276,7 +276,6 @@ class Project extends Model exclusions: atom.config.get('core.ignoredNames') if @scanTask? - console.log 'terminating!' @scanTask.terminate() @scanTask = Task.once require.resolve('./scan-handler'), @getPath(), regex.source, searchOptions, => From 5fdb3196a3f54541b10c6117af72ea6630cbb27b Mon Sep 17 00:00:00 2001 From: Ben Ogle Date: Thu, 19 Dec 2013 17:08:18 -0800 Subject: [PATCH 04/39] Add cancelScan() --- src/project.coffee | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/project.coffee b/src/project.coffee index 6357ee449..86e1393d3 100644 --- a/src/project.coffee +++ b/src/project.coffee @@ -275,8 +275,7 @@ class Project extends Model excludeVcsIgnores: atom.config.get('core.excludeVcsIgnoredPaths') exclusions: atom.config.get('core.ignoredNames') - if @scanTask? - @scanTask.terminate() + @cancelScan() @scanTask = Task.once require.resolve('./scan-handler'), @getPath(), regex.source, searchOptions, => @scanTask = null @@ -297,6 +296,12 @@ class Project extends Model deferred.promise + # Public: Cancels the current scan task if there is one running. + cancelScan: -> + if @scanTask? + @scanTask.terminate() + @scanTask = null + # Public: Performs a replace across all the specified files in the project. # # * regex: A RegExp to search with From 11ec93992443c0d5b72cd04cc5936c547c0ebd08 Mon Sep 17 00:00:00 2001 From: Ben Ogle Date: Fri, 20 Dec 2013 15:03:39 -0800 Subject: [PATCH 05/39] Add a cancel() function to the promise --- src/project.coffee | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/project.coffee b/src/project.coffee index 86e1393d3..81a330c0e 100644 --- a/src/project.coffee +++ b/src/project.coffee @@ -275,17 +275,14 @@ class Project extends Model excludeVcsIgnores: atom.config.get('core.excludeVcsIgnoredPaths') exclusions: atom.config.get('core.ignoredNames') - @cancelScan() - - @scanTask = Task.once require.resolve('./scan-handler'), @getPath(), regex.source, searchOptions, => - @scanTask = null + task = Task.once require.resolve('./scan-handler'), @getPath(), regex.source, searchOptions, => deferred.resolve() - @scanTask.on 'scan:result-found', (result) => + task.on 'scan:result-found', (result) => iterator(result) unless @isPathModified(result.filePath) if _.isFunction(options.onPathsSearched) - @scanTask.on 'scan:paths-searched', (numberOfPathsSearched) -> + task.on 'scan:paths-searched', (numberOfPathsSearched) -> options.onPathsSearched(numberOfPathsSearched) for buffer in @buffers.getValues() when buffer.isModified() @@ -294,13 +291,11 @@ class Project extends Model buffer.scan regex, (match) -> matches.push match iterator {filePath, matches} if matches.length > 0 - deferred.promise - - # Public: Cancels the current scan task if there is one running. - cancelScan: -> - if @scanTask? - @scanTask.terminate() - @scanTask = null + promise = deferred.promise + promise.cancel = -> + task.terminate() + deferred.reject('cancelled') + promise # Public: Performs a replace across all the specified files in the project. # From 458d3b3d3cca031ad7b950766811df326fc09713 Mon Sep 17 00:00:00 2001 From: Ben Ogle Date: Fri, 20 Dec 2013 15:04:17 -0800 Subject: [PATCH 06/39] Remove fat arrow --- src/project.coffee | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/project.coffee b/src/project.coffee index 81a330c0e..49effe0bb 100644 --- a/src/project.coffee +++ b/src/project.coffee @@ -275,7 +275,7 @@ class Project extends Model excludeVcsIgnores: atom.config.get('core.excludeVcsIgnoredPaths') exclusions: atom.config.get('core.ignoredNames') - task = Task.once require.resolve('./scan-handler'), @getPath(), regex.source, searchOptions, => + task = Task.once require.resolve('./scan-handler'), @getPath(), regex.source, searchOptions, -> deferred.resolve() task.on 'scan:result-found', (result) => From f5ca836e49e37ac4bcbb6118576febaba2188e1c Mon Sep 17 00:00:00 2001 From: Ben Ogle Date: Sat, 21 Dec 2013 09:00:08 -0800 Subject: [PATCH 07/39] Add background-tips package Fixes #1217 --- package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/package.json b/package.json index 20ea6851a..545f9ec00 100644 --- a/package.json +++ b/package.json @@ -85,6 +85,7 @@ "autocomplete": "0.19.0", "autoflow": "0.11.0", "autosave": "0.10.0", + "background-tips": "0.1.0", "bookmarks": "0.15.0", "bracket-matcher": "0.15.0", "command-logger": "0.8.0", From f59080ec74d1061898a9c6a900225f19d92ab42d Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 24 Dec 2013 09:14:16 -0800 Subject: [PATCH 08/39] Upgrade to language-gfm@0.11.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 545f9ec00..41e637599 100644 --- a/package.json +++ b/package.json @@ -127,7 +127,7 @@ "language-clojure": "0.1.0", "language-coffee-script": "0.4.0", "language-css": "0.2.0", - "language-gfm": "0.10.0", + "language-gfm": "0.11.0", "language-git": "0.3.0", "language-go": "0.2.0", "language-html": "0.2.0", From 470ce7bd22a1989cc8ec36f7ccb637b6a57e9468 Mon Sep 17 00:00:00 2001 From: Jason Rudolph Date: Fri, 27 Dec 2013 08:24:46 -0500 Subject: [PATCH 09/39] Use default OS X keyboard shortcut to hide other apps --- keymaps/darwin.cson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keymaps/darwin.cson b/keymaps/darwin.cson index d19ae9ac3..ae2c5a8a9 100644 --- a/keymaps/darwin.cson +++ b/keymaps/darwin.cson @@ -2,7 +2,7 @@ # Apple specific 'cmd-q': 'application:quit' 'cmd-h': 'application:hide' - 'cmd-H': 'application:hide-other-applications' + 'cmd-alt-h': 'application:hide-other-applications' 'cmd-m': 'application:minimize' 'alt-cmd-ctrl-m': 'application:zoom' From 8e970b64b8aecad0052e8e252c28deff58495580 Mon Sep 17 00:00:00 2001 From: probablycorey Date: Mon, 30 Dec 2013 10:45:01 -0800 Subject: [PATCH 10/39] Change window:reload keybinding to `ctrl-alt-cmd-l` Fixes #1157 --- keymaps/darwin.cson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keymaps/darwin.cson b/keymaps/darwin.cson index d19ae9ac3..f5aa53e8f 100644 --- a/keymaps/darwin.cson +++ b/keymaps/darwin.cson @@ -26,7 +26,7 @@ 'down': 'core:move-down' 'left': 'core:move-left' 'right': 'core:move-right' - 'ctrl-alt-cmd-r': 'window:reload' + 'ctrl-alt-cmd-l': 'window:reload' 'alt-cmd-i': 'window:toggle-dev-tools' 'cmd-alt-ctrl-p': 'window:run-package-specs' From bac76784e04ac3bb4060d9ed1ed45bca125b8603 Mon Sep 17 00:00:00 2001 From: probablycorey Date: Mon, 30 Dec 2013 13:54:04 -0800 Subject: [PATCH 11/39] cmd-left moves the cursor to column 0 on lines only containing whitespace Fixes #1344 --- spec/editor-spec.coffee | 7 +++++++ src/cursor.coffee | 4 +--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/spec/editor-spec.coffee b/spec/editor-spec.coffee index 48116684d..69c6e2b04 100644 --- a/spec/editor-spec.coffee +++ b/spec/editor-spec.coffee @@ -432,6 +432,13 @@ describe "Editor", -> expect(cursor1.getBufferPosition()).toEqual [0,0] expect(cursor2.getBufferPosition()).toEqual [1,0] + it "moves to the beginning of the line if it only contains whitespace ", -> + editor.setText("first\n \nthird") + editor.setCursorScreenPosition [1,2] + editor.moveCursorToFirstCharacterOfLine() + cursor = editor.getCursor() + expect(cursor.getBufferPosition()).toEqual [1,0] + describe ".moveCursorToBeginningOfWord()", -> it "moves the cursor to the beginning of the word", -> editor.setCursorBufferPosition [0, 8] diff --git a/src/cursor.coffee b/src/cursor.coffee index 46ba11619..b334b19ff 100644 --- a/src/cursor.coffee +++ b/src/cursor.coffee @@ -261,9 +261,7 @@ class Cursor screenline = @editor.lineForScreenRow(row) goalColumn = screenline.text.search(/\S/) - return if goalColumn == -1 - - goalColumn = 0 if goalColumn == column + goalColumn = 0 if goalColumn == column or goalColumn == -1 @setScreenPosition([row, goalColumn]) # Public: Moves the cursor to the beginning of the buffer line, skipping all From 97aed1f680544b5ab1ab2e0e4c9f80dd2773518f Mon Sep 17 00:00:00 2001 From: probablycorey Date: Mon, 30 Dec 2013 14:00:38 -0800 Subject: [PATCH 12/39] ctrl-a moves the cursor to the beginning of the line on OS X Keybinding now matches default OS behavior --- keymaps/darwin.cson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keymaps/darwin.cson b/keymaps/darwin.cson index f5aa53e8f..f0fd71bac 100644 --- a/keymaps/darwin.cson +++ b/keymaps/darwin.cson @@ -99,7 +99,7 @@ 'cmd-shift-right': 'editor:select-to-end-of-line' 'alt-backspace': 'editor:backspace-to-beginning-of-word' 'alt-delete': 'editor:delete-to-end-of-word' - 'ctrl-a': 'editor:move-to-first-character-of-line' + 'ctrl-a': 'editor:move-to-beginning-of-line' 'ctrl-e': 'editor:move-to-end-of-line' 'ctrl-k': 'editor:cut-to-end-of-line' From c37b8840078d3e14b69286adf278b2e70572abbd Mon Sep 17 00:00:00 2001 From: probablycorey Date: Mon, 30 Dec 2013 17:53:21 -0800 Subject: [PATCH 13/39] Add moveCursorToEnd/BeginningOfBufferLine Fixes #1123 --- keymaps/base.cson | 2 +- keymaps/darwin.cson | 2 +- spec/editor-spec.coffee | 30 ++++++++++++++++++++++++------ src/cursor.coffee | 14 +++++++++++--- src/editor-view.coffee | 2 ++ src/editor.coffee | 8 ++++++++ 6 files changed, 47 insertions(+), 11 deletions(-) diff --git a/keymaps/base.cson b/keymaps/base.cson index 689ad1732..980a42969 100644 --- a/keymaps/base.cson +++ b/keymaps/base.cson @@ -5,7 +5,7 @@ 'alt-shift-left': 'editor:select-to-beginning-of-word' 'alt-shift-right': 'editor:select-to-end-of-word' 'home': 'editor:move-to-first-character-of-line' - 'end': 'editor:move-to-end-of-line' + 'end': 'editor:move-to-end-of-screen-line' 'shift-home': 'editor:select-to-first-character-of-line' 'shift-end': 'editor:select-to-end-of-line' diff --git a/keymaps/darwin.cson b/keymaps/darwin.cson index f0fd71bac..d5b1f4680 100644 --- a/keymaps/darwin.cson +++ b/keymaps/darwin.cson @@ -94,7 +94,7 @@ 'ctrl-A': 'editor:select-to-first-character-of-line' 'ctrl-E': 'editor:select-to-end-of-line' 'cmd-left': 'editor:move-to-first-character-of-line' - 'cmd-right': 'editor:move-to-end-of-line' + 'cmd-right': 'editor:move-to-end-of-screen-line' 'cmd-shift-left': 'editor:select-to-first-character-of-line' 'cmd-shift-right': 'editor:select-to-end-of-line' 'alt-backspace': 'editor:backspace-to-beginning-of-word' diff --git a/spec/editor-spec.coffee b/spec/editor-spec.coffee index 69c6e2b04..a770214aa 100644 --- a/spec/editor-spec.coffee +++ b/spec/editor-spec.coffee @@ -361,13 +361,13 @@ describe "Editor", -> expect(editor.getCursors().length).toBe 1 expect(editor.getCursorBufferPosition()).toEqual [12,2] - describe ".moveCursorToBeginningOfLine()", -> + describe ".moveCursorToBeginningOfScreenLine()", -> describe "when soft wrap is on", -> it "moves cursor to the beginning of the screen line", -> editor.setSoftWrap(true) editor.setEditorWidthInChars(10) editor.setCursorScreenPosition([1, 2]) - editor.moveCursorToBeginningOfLine() + editor.moveCursorToBeginningOfScreenLine() cursor = editor.getCursor() expect(cursor.getScreenPosition()).toEqual [1, 0] @@ -375,19 +375,19 @@ describe "Editor", -> it "moves cursor to the beginning of then line", -> editor.setCursorScreenPosition [0,5] editor.addCursorAtScreenPosition [1,7] - editor.moveCursorToBeginningOfLine() + editor.moveCursorToBeginningOfScreenLine() expect(editor.getCursors().length).toBe 2 [cursor1, cursor2] = editor.getCursors() expect(cursor1.getBufferPosition()).toEqual [0,0] expect(cursor2.getBufferPosition()).toEqual [1,0] - describe ".moveCursorToEndOfLine()", -> + describe ".moveCursorToEndOfScreenLine()", -> describe "when soft wrap is on", -> it "moves cursor to the beginning of the screen line", -> editor.setSoftWrap(true) editor.setEditorWidthInChars(10) editor.setCursorScreenPosition([1, 2]) - editor.moveCursorToEndOfLine() + editor.moveCursorToEndOfScreenLine() cursor = editor.getCursor() expect(cursor.getScreenPosition()).toEqual [1, 9] @@ -395,12 +395,30 @@ describe "Editor", -> it "moves cursor to the end of line", -> editor.setCursorScreenPosition [0,0] editor.addCursorAtScreenPosition [1,0] - editor.moveCursorToEndOfLine() + editor.moveCursorToEndOfScreenLine() expect(editor.getCursors().length).toBe 2 [cursor1, cursor2] = editor.getCursors() expect(cursor1.getBufferPosition()).toEqual [0,29] expect(cursor2.getBufferPosition()).toEqual [1,30] + describe ".moveCursorToBeginningOfLine()", -> + it "moves cursor to the beginning of the buffer line", -> + editor.setSoftWrap(true) + editor.setEditorWidthInChars(10) + editor.setCursorScreenPosition([1, 2]) + editor.moveCursorToBeginningOfLine() + cursor = editor.getCursor() + expect(cursor.getScreenPosition()).toEqual [0, 0] + + describe ".moveCursorToEndOfLine()", -> + it "moves cursor to the end of the buffer line", -> + editor.setSoftWrap(true) + editor.setEditorWidthInChars(10) + editor.setCursorScreenPosition([0, 2]) + editor.moveCursorToEndOfLine() + cursor = editor.getCursor() + expect(cursor.getScreenPosition()).toEqual [3, 4] + describe ".moveCursorToFirstCharacterOfLine()", -> describe "when soft wrap is on", -> it "moves to the first character of the current screen line or the beginning of the screen line if it's already on the first character", -> diff --git a/src/cursor.coffee b/src/cursor.coffee index b334b19ff..718d260b1 100644 --- a/src/cursor.coffee +++ b/src/cursor.coffee @@ -250,10 +250,14 @@ class Cursor moveToBottom: -> @setBufferPosition(@editor.getEofBufferPosition()) - # Public: Moves the cursor to the beginning of the screen line. - moveToBeginningOfLine: -> + # Public: Moves the cursor to the beginning of the line. + moveToBeginningOfScreenLine: -> @setScreenPosition([@getScreenRow(), 0]) + # Public: Moves the cursor to the beginning of the buffer line. + moveToBeginningOfLine: -> + @setBufferPosition([@getBufferRow(), 0]) + # Public: Moves the cursor to the beginning of the first character in the # line. moveToFirstCharacterOfLine: -> @@ -275,9 +279,13 @@ class Cursor @setBufferPosition(endOfLeadingWhitespace) if endOfLeadingWhitespace.isGreaterThan(position) + # Public: Moves the cursor to the end of the line. + moveToEndOfScreenLine: -> + @setScreenPosition([@getScreenRow(), Infinity]) + # Public: Moves the cursor to the end of the buffer line. moveToEndOfLine: -> - @setScreenPosition([@getScreenRow(), Infinity]) + @setBufferPosition([@getBufferRow(), Infinity]) # Public: Moves the cursor to the beginning of the word. moveToBeginningOfWord: -> diff --git a/src/editor-view.coffee b/src/editor-view.coffee index 67a5d3ce7..85f7025e3 100644 --- a/src/editor-view.coffee +++ b/src/editor-view.coffee @@ -139,7 +139,9 @@ class EditorView extends View 'editor:delete-to-end-of-word': @deleteToEndOfWord 'editor:delete-line': @deleteLine 'editor:cut-to-end-of-line': @cutToEndOfLine + 'editor:move-to-beginning-of-screen-line': => @editor.moveCursorToBeginningOfScreenLine() 'editor:move-to-beginning-of-line': @moveCursorToBeginningOfLine + 'editor:move-to-end-of-screen-line': => @editor.moveCursorToEndOfScreenLine() 'editor:move-to-end-of-line': @moveCursorToEndOfLine 'editor:move-to-first-character-of-line': @moveCursorToFirstCharacterOfLine 'editor:move-to-beginning-of-word': @moveCursorToBeginningOfWord diff --git a/src/editor.coffee b/src/editor.coffee index f0ec6dcf5..a5c23e2c2 100644 --- a/src/editor.coffee +++ b/src/editor.coffee @@ -1120,6 +1120,10 @@ class Editor extends Model @moveCursors (cursor) -> cursor.moveToBottom() # Public: Moves every local cursor to the beginning of the line. + moveCursorToBeginningOfScreenLine: -> + @moveCursors (cursor) -> cursor.moveToBeginningOfScreenLine() + + # Public: Moves every local cursor to the beginning of the buffer line. moveCursorToBeginningOfLine: -> @moveCursors (cursor) -> cursor.moveToBeginningOfLine() @@ -1128,6 +1132,10 @@ class Editor extends Model @moveCursors (cursor) -> cursor.moveToFirstCharacterOfLine() # Public: Moves every local cursor to the end of the line. + moveCursorToEndOfScreenLine: -> + @moveCursors (cursor) -> cursor.moveToEndOfScreenLine() + + # Public: Moves every local cursor to the end of the buffer line. moveCursorToEndOfLine: -> @moveCursors (cursor) -> cursor.moveToEndOfLine() From 8d2e1b7e4356d1ab21e8d2af07cd50dee5cdc281 Mon Sep 17 00:00:00 2001 From: probablycorey Date: Tue, 31 Dec 2013 08:38:27 -0800 Subject: [PATCH 14/39] Selection::selectToEndOfLine acts on screen lines. --- src/selection.coffee | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/selection.coffee b/src/selection.coffee index 984d29849..cf4e54725 100644 --- a/src/selection.coffee +++ b/src/selection.coffee @@ -216,7 +216,7 @@ class Selection # Public: Selects all the text from the current cursor position to the end of # the line. selectToEndOfLine: -> - @modifySelection => @cursor.moveToEndOfLine() + @modifySelection => @cursor.moveToEndOfScreenLine() # Public: Selects all the text from the current cursor position to the # beginning of the word. From 30a175230a6b63859df21c87c66c71cdc23636b7 Mon Sep 17 00:00:00 2001 From: probablycorey Date: Tue, 31 Dec 2013 11:11:10 -0800 Subject: [PATCH 15/39] Update feedback package --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 41e637599..5b1b98edb 100644 --- a/package.json +++ b/package.json @@ -93,7 +93,7 @@ "dev-live-reload": "0.20.0", "editor-stats": "0.9.0", "exception-reporting": "0.9.0", - "feedback": "0.20.0", + "feedback": "0.21.0", "find-and-replace": "0.63.0", "fuzzy-finder": "0.28.0", "gists": "0.13.0", From 686ebf87596b552d856604825e1debf878d70226 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 11:58:08 -0800 Subject: [PATCH 16/39] Upgrade to spell-check@0.18.0 for spec description tweaks --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5b1b98edb..946b67b57 100644 --- a/package.json +++ b/package.json @@ -110,7 +110,7 @@ "release-notes": "0.15.0", "settings-view": "0.52.0", "snippets": "0.17.0", - "spell-check": "0.17.0", + "spell-check": "0.18.0", "status-bar": "0.27.0", "styleguide": "0.19.0", "symbols-view": "0.27.0", From 902406c5728924bb4df17cbefdb2ccb15d223fc1 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 14:40:45 -0800 Subject: [PATCH 17/39] Enable harmony collections in all scripts --- script/bootstrap | 2 +- script/build | 2 +- script/cibuild | 2 +- script/clean | 2 +- script/test | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/script/bootstrap b/script/bootstrap index b666fd8d8..87c18f88b 100755 --- a/script/bootstrap +++ b/script/bootstrap @@ -1,4 +1,4 @@ -#!/usr/bin/env node +#!/usr/bin/env node --harmony_collections var safeExec = require('./utils/child-process-wrapper.js').safeExec; var fs = require('fs'); var path = require('path'); diff --git a/script/build b/script/build index bb65d7607..b2022c6ae 100755 --- a/script/build +++ b/script/build @@ -1,4 +1,4 @@ -#!/usr/bin/env node +#!/usr/bin/env node --harmony_collections var cp = require('./utils/child-process-wrapper.js'); var path = require('path'); diff --git a/script/cibuild b/script/cibuild index f79fe7f40..8999f0af9 100755 --- a/script/cibuild +++ b/script/cibuild @@ -1,4 +1,4 @@ -#!/usr/bin/env node +#!/usr/bin/env node --harmony_collections var cp = require('./utils/child-process-wrapper.js'); var fs = require('fs'); var path = require('path'); diff --git a/script/clean b/script/clean index 859efb00c..1c6590ae3 100755 --- a/script/clean +++ b/script/clean @@ -1,4 +1,4 @@ -#!/usr/bin/env node +#!/usr/bin/env node --harmony_collections var cp = require('./utils/child-process-wrapper.js'); var path = require('path'); var os = require('os'); diff --git a/script/test b/script/test index 2c3b37147..cb32a470b 100755 --- a/script/test +++ b/script/test @@ -1,4 +1,4 @@ -#!/usr/bin/env node +#!/usr/bin/env node --harmony_collections var safeExec = require('./utils/child-process-wrapper.js').safeExec; var path = require('path'); From 22a7c25104c4b1020c5871fbb41a57a82fded3c2 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 15:04:04 -0800 Subject: [PATCH 18/39] :lipstick: Remove unneeded requires, variables, commas, and parens --- spec/spec-helper-platform.coffee | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/spec/spec-helper-platform.coffee b/spec/spec-helper-platform.coffee index d7d7de10b..90c5e7a5d 100644 --- a/spec/spec-helper-platform.coffee +++ b/spec/spec-helper-platform.coffee @@ -1,8 +1,6 @@ path = require 'path' fs = require 'fs-plus' -{_} = require 'atom' - ## Platform specific helpers module.exports = # Public: Returns true if being run from within Windows @@ -18,20 +16,20 @@ module.exports = fs.removeSync(evilFilesPath) if fs.existsSync(evilFilesPath) fs.mkdirSync(evilFilesPath) - if (@isWindows()) + if @isWindows() filenames = [ - "a_file_with_utf8.txt", - "file with spaces.txt", + "a_file_with_utf8.txt" + "file with spaces.txt" "utfa\u0306.md" ] else filenames = [ - "a_file_with_utf8.txt", - "file with spaces.txt", - "goddam\nnewlines", - "quote\".txt", + "a_file_with_utf8.txt" + "file with spaces.txt" + "goddam\nnewlines" + "quote\".txt" "utfa\u0306.md" ] for filename in filenames - fd = fs.writeFileSync(path.join(evilFilesPath, filename), 'evil file!', flag: 'w') + fs.writeFileSync(path.join(evilFilesPath, filename), 'evil file!', flag: 'w') From 33c135350081e0572a96ea4de3614162f9efe9c4 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 15:11:29 -0800 Subject: [PATCH 19/39] Only generate evil files when needed --- spec/project-spec.coffee | 1 + spec/spec-helper.coffee | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/spec/project-spec.coffee b/spec/project-spec.coffee index 5f199b575..36de21747 100644 --- a/spec/project-spec.coffee +++ b/spec/project-spec.coffee @@ -403,6 +403,7 @@ describe "Project", -> range: [[2, 6], [2, 11]] it "works on evil filenames", -> + platform.generateEvilFiles() atom.project.setPath(path.join(__dirname, 'fixtures', 'evil-files')) paths = [] matches = [] diff --git a/spec/spec-helper.coffee b/spec/spec-helper.coffee index 618f81503..59766b88d 100644 --- a/spec/spec-helper.coffee +++ b/spec/spec-helper.coffee @@ -13,11 +13,8 @@ Editor = require '../src/editor' EditorView = require '../src/editor-view' TokenizedBuffer = require '../src/tokenized-buffer' pathwatcher = require 'pathwatcher' -platform = require './spec-helper-platform' clipboard = require 'clipboard' -platform.generateEvilFiles() - atom.themes.loadBaseStylesheets() atom.themes.requireStylesheet '../static/jasmine' From a78613b7e5acbd92af390682245edce0d971baba Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 09:13:06 -0800 Subject: [PATCH 20/39] Upgrade to first-mate@0.6.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 946b67b57..6ab168562 100644 --- a/package.json +++ b/package.json @@ -25,7 +25,7 @@ "coffeestack": "0.6.0", "diff": "git://github.com/benogle/jsdiff.git", "emissary": "0.19.0", - "first-mate": "0.5.0", + "first-mate": "0.6.0", "fs-plus": "0.13.0", "fuzzaldrin": "0.1.0", "git-utils": "0.29.0", From a59c01c6bedb64fe4064385215ed681f30d71f25 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 09:43:05 -0800 Subject: [PATCH 21/39] Move editor-specific grammar specs to editor-spec --- spec/editor-spec.coffee | 58 ++++++++++++++++++++++++++++++ spec/text-mate-grammar-spec.coffee | 54 ---------------------------- 2 files changed, 58 insertions(+), 54 deletions(-) diff --git a/spec/editor-spec.coffee b/spec/editor-spec.coffee index a770214aa..d05896226 100644 --- a/spec/editor-spec.coffee +++ b/spec/editor-spec.coffee @@ -1,4 +1,5 @@ clipboard = require 'clipboard' +TextMateGrammar = require '../src/text-mate-grammar' describe "Editor", -> [buffer, editor, lineLengths] = [] @@ -2698,3 +2699,60 @@ describe "Editor", -> expect(editor.getCursorBufferPosition()).toEqual [0, 2] editor.moveCursorLeft() expect(editor.getCursorBufferPosition()).toEqual [0, 0] + + describe "when the editor's grammar has an injection selector", -> + beforeEach -> + atom.packages.activatePackage('language-javascript', sync: true) + + it "includes the grammar's patterns when the selector matches the current scope in other grammars", -> + atom.packages.activatePackage('language-hyperlink', sync: true) + grammar = atom.syntax.selectGrammar("text.js") + {tokens} = grammar.tokenizeLine("var i; // http://github.com") + + expect(tokens[0].value).toBe "var" + expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"] + + expect(tokens[6].value).toBe "http://github.com" + expect(tokens[6].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"] + + describe "when the grammar is added", -> + it "retokenizes existing buffers that contain tokens that match the injection selector", -> + editor = atom.project.openSync('sample.js') + editor.setText("// http://github.com") + + {tokens} = editor.lineForScreenRow(0) + expect(tokens[1].value).toBe " http://github.com" + expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"] + + atom.packages.activatePackage('language-hyperlink', sync: true) + + {tokens} = editor.lineForScreenRow(0) + expect(tokens[2].value).toBe "http://github.com" + expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"] + + describe "when the grammar is updated", -> + it "retokenizes existing buffers that contain tokens that match the injection selector", -> + editor = atom.project.openSync('sample.js') + editor.setText("// SELECT * FROM OCTOCATS") + + {tokens} = editor.lineForScreenRow(0) + expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS" + expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"] + + atom.syntax.addGrammar(new TextMateGrammar( + name: "test" + scopeName: "source.test" + repository: {} + injectionSelector: "comment" + patterns: [ { include: "source.sql" } ] + )) + + {tokens} = editor.lineForScreenRow(0) + expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS" + expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"] + + atom.packages.activatePackage('language-sql', sync: true) + + {tokens} = editor.lineForScreenRow(0) + expect(tokens[2].value).toBe "SELECT" + expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"] diff --git a/spec/text-mate-grammar-spec.coffee b/spec/text-mate-grammar-spec.coffee index f4984be8e..011450815 100644 --- a/spec/text-mate-grammar-spec.coffee +++ b/spec/text-mate-grammar-spec.coffee @@ -433,60 +433,6 @@ describe "TextMateGrammar", -> {tokens} = grammar.tokenizeLine("https://github.com") expect(tokens[0].scopes).toEqual ["text.hyperlink", "markup.underline.link.https.hyperlink"] - describe "when the grammar has an injection selector", -> - it "includes the grammar's patterns when the selector matches the current scope in other grammars", -> - atom.packages.activatePackage('language-hyperlink', sync: true) - grammar = atom.syntax.selectGrammar("text.js") - {tokens} = grammar.tokenizeLine("var i; // http://github.com") - - expect(tokens[0].value).toBe "var" - expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"] - - expect(tokens[6].value).toBe "http://github.com" - expect(tokens[6].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"] - - describe "when the grammar is added", -> - it "retokenizes existing buffers that contain tokens that match the injection selector", -> - editor = atom.project.openSync('sample.js') - editor.setText("// http://github.com") - - {tokens} = editor.lineForScreenRow(0) - expect(tokens[1].value).toBe " http://github.com" - expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"] - - atom.packages.activatePackage('language-hyperlink', sync: true) - - {tokens} = editor.lineForScreenRow(0) - expect(tokens[2].value).toBe "http://github.com" - expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"] - - describe "when the grammar is updated", -> - it "retokenizes existing buffers that contain tokens that match the injection selector", -> - editor = atom.project.openSync('sample.js') - editor.setText("// SELECT * FROM OCTOCATS") - - {tokens} = editor.lineForScreenRow(0) - expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS" - expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"] - - atom.syntax.addGrammar(new TextMateGrammar( - name: "test" - scopeName: "source.test" - repository: {} - injectionSelector: "comment" - patterns: [ { include: "source.sql" } ] - )) - - {tokens} = editor.lineForScreenRow(0) - expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS" - expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"] - - atom.packages.activatePackage('language-sql', sync: true) - - {tokens} = editor.lineForScreenRow(0) - expect(tokens[2].value).toBe "SELECT" - expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"] - describe "when the position doesn't advance and rule includes $self and matches itself", -> it "tokenizes the entire line using the rule", -> grammar = new TextMateGrammar From 6f5d85edb924741eb00e83f155a04c46007501e3 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 10:32:24 -0800 Subject: [PATCH 22/39] Use grammar registry from first-mate --- package.json | 2 +- spec/editor-spec.coffee | 10 +--- .../grammars/grammar.cson | 4 ++ src/atom-package.coffee | 3 +- src/display-buffer.coffee | 2 +- src/syntax.coffee | 53 ++++++++----------- src/text-mate-package.coffee | 10 ++-- src/tokenized-line.coffee | 2 + 8 files changed, 38 insertions(+), 48 deletions(-) create mode 100644 spec/fixtures/packages/package-with-injection-selector/grammars/grammar.cson diff --git a/package.json b/package.json index 6ab168562..70791dc4d 100644 --- a/package.json +++ b/package.json @@ -25,7 +25,7 @@ "coffeestack": "0.6.0", "diff": "git://github.com/benogle/jsdiff.git", "emissary": "0.19.0", - "first-mate": "0.6.0", + "first-mate": "0.7.0", "fs-plus": "0.13.0", "fuzzaldrin": "0.1.0", "git-utils": "0.29.0", diff --git a/spec/editor-spec.coffee b/spec/editor-spec.coffee index d05896226..0a93eb91b 100644 --- a/spec/editor-spec.coffee +++ b/spec/editor-spec.coffee @@ -1,5 +1,4 @@ clipboard = require 'clipboard' -TextMateGrammar = require '../src/text-mate-grammar' describe "Editor", -> [buffer, editor, lineLengths] = [] @@ -2702,6 +2701,7 @@ describe "Editor", -> describe "when the editor's grammar has an injection selector", -> beforeEach -> + atom.packages.activatePackage('language-text', sync: true) atom.packages.activatePackage('language-javascript', sync: true) it "includes the grammar's patterns when the selector matches the current scope in other grammars", -> @@ -2739,13 +2739,7 @@ describe "Editor", -> expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS" expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"] - atom.syntax.addGrammar(new TextMateGrammar( - name: "test" - scopeName: "source.test" - repository: {} - injectionSelector: "comment" - patterns: [ { include: "source.sql" } ] - )) + atom.packages.activatePackage('package-with-injection-selector', sync: true) {tokens} = editor.lineForScreenRow(0) expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS" diff --git a/spec/fixtures/packages/package-with-injection-selector/grammars/grammar.cson b/spec/fixtures/packages/package-with-injection-selector/grammars/grammar.cson new file mode 100644 index 000000000..3cee5379e --- /dev/null +++ b/spec/fixtures/packages/package-with-injection-selector/grammars/grammar.cson @@ -0,0 +1,4 @@ +'name': 'test' +'scopeName': 'source.test' +'injectionSelector': 'comment' +'patterns': [{'include': 'source.sql'}] diff --git a/src/atom-package.coffee b/src/atom-package.coffee index 06f85f5e3..204364d15 100644 --- a/src/atom-package.coffee +++ b/src/atom-package.coffee @@ -1,4 +1,3 @@ -TextMateGrammar = require './text-mate-grammar' Package = require './package' fs = require 'fs-plus' path = require 'path' @@ -152,7 +151,7 @@ class AtomPackage extends Package @grammars = [] grammarsDirPath = path.join(@path, 'grammars') for grammarPath in fs.listSync(grammarsDirPath, ['.json', '.cson']) - @grammars.push(TextMateGrammar.loadSync(grammarPath)) + @grammars.push(atom.syntax.registry.loadGrammarSync(grammarPath)) loadScopedProperties: -> @scopedProperties = [] diff --git a/src/display-buffer.coffee b/src/display-buffer.coffee index 3e7d64383..2dca4dea6 100644 --- a/src/display-buffer.coffee +++ b/src/display-buffer.coffee @@ -358,7 +358,7 @@ class DisplayBuffer extends Model # Get the grammar for this buffer. # - # Returns the current {TextMateGrammar} or the {NullGrammar}. + # Returns the current {Grammar} or the {NullGrammar}. getGrammar: -> @tokenizedBuffer.grammar diff --git a/src/syntax.coffee b/src/syntax.coffee index 12a5ab6e9..9dd21fb38 100644 --- a/src/syntax.coffee +++ b/src/syntax.coffee @@ -1,71 +1,64 @@ _ = require 'underscore-plus' {specificity} = require 'clear-cut' {$, $$} = require './space-pen-extensions' -{Emitter} = require 'emissary' -NullGrammar = require './null-grammar' -TextMateScopeSelector = require('first-mate').ScopeSelector +{Emitter, Subscriber} = require 'emissary' + +FirstMate = require 'first-mate' +TextMateScopeSelector = FirstMate.ScopeSelector +TextMateGrammarRegistry = FirstMate.GrammarRegistry ### Internal ### module.exports = class Syntax Emitter.includeInto(this) + Subscriber.includeInto(this) atom.deserializers.add(this) @deserialize: ({grammarOverridesByPath}) -> syntax = new Syntax() - syntax.grammarOverridesByPath = grammarOverridesByPath + syntax.registry.grammarOverridesByPath = grammarOverridesByPath syntax constructor: -> - @nullGrammar = new NullGrammar - @grammars = [@nullGrammar] - @grammarsByScopeName = {} - @injectionGrammars = [] - @grammarOverridesByPath = {} + @registry = new TextMateGrammarRegistry() + @subscribe @registry, 'grammar-added', (grammar) => + @emit 'grammar-added', grammar + @subscribe @registry, 'grammar-updated', (grammar) => + @emit 'grammar-updated', grammar + + @nullGrammar = @registry.nullGrammar @scopedPropertiesIndex = 0 @scopedProperties = [] serialize: -> - { deserializer: @constructor.name, @grammarOverridesByPath } + deserializer: @constructor.name + grammarOverridesByPath: @registry.grammarOverridesByPath addGrammar: (grammar) -> - previousGrammars = new Array(@grammars...) - @grammars.push(grammar) - @grammarsByScopeName[grammar.scopeName] = grammar - @injectionGrammars.push(grammar) if grammar.injectionSelector? - @grammarUpdated(grammar.scopeName) - @emit 'grammar-added', grammar + @registry.addGrammar(grammar) removeGrammar: (grammar) -> - _.remove(@grammars, grammar) - delete @grammarsByScopeName[grammar.scopeName] - _.remove(@injectionGrammars, grammar) - @grammarUpdated(grammar.scopeName) - - grammarUpdated: (scopeName) -> - for grammar in @grammars when grammar.scopeName isnt scopeName - @emit 'grammar-updated', grammar if grammar.grammarUpdated(scopeName) + @registry.removeGrammar(grammar) setGrammarOverrideForPath: (path, scopeName) -> - @grammarOverridesByPath[path] = scopeName + @registry.setGrammarOverrideForPath(path, scopeName) clearGrammarOverrideForPath: (path) -> - delete @grammarOverridesByPath[path] + @registry.clearGrammarOverrideForPath(path) clearGrammarOverrides: -> - @grammarOverridesByPath = {} + @registry.clearGrammarOverrides() selectGrammar: (filePath, fileContents) -> - grammar = _.max @grammars, (grammar) -> grammar.getScore(filePath, fileContents) - grammar + @registry.selectGrammar(filePath, fileContents) grammarOverrideForPath: (path) -> @grammarOverridesByPath[path] grammarForScopeName: (scopeName) -> - @grammarsByScopeName[scopeName] + @registry.grammarForScopeName(scopeName) addProperties: (args...) -> name = args.shift() if args.length > 2 diff --git a/src/text-mate-package.coffee b/src/text-mate-package.coffee index 036d47577..e8be61f5c 100644 --- a/src/text-mate-package.coffee +++ b/src/text-mate-package.coffee @@ -2,7 +2,6 @@ Package = require './package' path = require 'path' _ = require 'underscore-plus' fs = require 'fs-plus' -TextMateGrammar = require './text-mate-grammar' async = require 'async' ### Internal ### @@ -41,14 +40,14 @@ class TextMatePackage extends Package activate: -> @measure 'activateTime', => - atom.syntax.addGrammar(grammar) for grammar in @grammars + grammar.activate() for grammar in @grammars for { selector, properties } in @scopedProperties atom.syntax.addProperties(@path, selector, properties) activateConfig: -> # noop deactivate: -> - atom.syntax.removeGrammar(grammar) for grammar in @grammars + grammar.deactivate() for grammar in @grammars atom.syntax.removeProperties(@path) legalGrammarExtensions: ['plist', 'tmLanguage', 'tmlanguage', 'json', 'cson'] @@ -66,18 +65,17 @@ class TextMatePackage extends Package done() loadGrammarAtPath: (grammarPath, done) => - TextMateGrammar.load grammarPath, (err, grammar) => + atom.syntax.registry.readGrammar (error, grammar) => return console.log("Error loading grammar at path '#{grammarPath}':", err.stack ? err) if err @addGrammar(grammar) done() loadGrammarsSync: -> for grammarPath in fs.listSync(@getSyntaxesPath(), @legalGrammarExtensions) - @addGrammar(TextMateGrammar.loadSync(grammarPath)) + @addGrammar(atom.syntax.registry.readGrammarSync(grammarPath)) addGrammar: (grammar) -> @grammars.push(grammar) - atom.syntax.addGrammar(grammar) if @isActive() getGrammars: -> @grammars diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee index 826fdfd5c..26d24fb0b 100644 --- a/src/tokenized-line.coffee +++ b/src/tokenized-line.coffee @@ -1,10 +1,12 @@ _ = require 'underscore-plus' +Token = require './token' ### Internal ### module.exports = class TokenizedLine constructor: ({tokens, @lineEnding, @ruleStack, @startBufferColumn, @fold, tabLength}) -> + tokens = (new Token(token) for token in tokens) @tokens = @breakOutAtomicTokens(tokens, tabLength) @startBufferColumn ?= 0 @text = _.pluck(@tokens, 'value').join('') From 2681dcc63cd51b850855d91704c73d84334c55f8 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 10:33:46 -0800 Subject: [PATCH 23/39] Remove TextMateGrammar class now in first-mate --- spec/text-mate-grammar-spec.coffee | 650 ----------------------------- src/text-mate-grammar.coffee | 522 ----------------------- 2 files changed, 1172 deletions(-) delete mode 100644 spec/text-mate-grammar-spec.coffee delete mode 100644 src/text-mate-grammar.coffee diff --git a/spec/text-mate-grammar-spec.coffee b/spec/text-mate-grammar-spec.coffee deleted file mode 100644 index 011450815..000000000 --- a/spec/text-mate-grammar-spec.coffee +++ /dev/null @@ -1,650 +0,0 @@ -TextMateGrammar = require '../src/text-mate-grammar' -TextMatePackage = require '../src/text-mate-package' -{_, fs} = require 'atom' - -describe "TextMateGrammar", -> - grammar = null - - beforeEach -> - atom.packages.activatePackage('language-text', sync: true) - atom.packages.activatePackage('language-javascript', sync: true) - atom.packages.activatePackage('language-coffee-script', sync: true) - atom.packages.activatePackage('language-ruby', sync: true) - atom.packages.activatePackage('language-html', sync: true) - atom.packages.activatePackage('language-php', sync: true) - atom.packages.activatePackage('language-python', sync: true) - grammar = atom.syntax.selectGrammar("hello.coffee") - - describe "@loadSync(path)", -> - it "loads grammars from plists", -> - grammar = TextMateGrammar.loadSync(require.resolve('./fixtures/sample.plist')) - expect(grammar.scopeName).toBe "text.plain" - {tokens} = grammar.tokenizeLine("this text is so plain. i love it.") - expect(tokens[0]).toEqual value: "this text is so plain. i love it.", scopes: ["text.plain", "meta.paragraph.text"] - - it "loads grammars from cson files", -> - grammar = TextMateGrammar.loadSync(require.resolve('./fixtures/packages/package-with-grammars/grammars/alot.cson')) - expect(grammar.scopeName).toBe "source.alot" - {tokens} = grammar.tokenizeLine("this is alot of code") - expect(tokens[1]).toEqual value: "alot", scopes: ["source.alot", "keyword.alot"] - - describe ".tokenizeLine(line, ruleStack)", -> - describe "when the entire line matches a single pattern with no capture groups", -> - it "returns a single token with the correct scope", -> - {tokens} = grammar.tokenizeLine("return") - - expect(tokens.length).toBe 1 - [token] = tokens - expect(token.scopes).toEqual ['source.coffee', 'keyword.control.coffee'] - - describe "when the entire line matches a single pattern with capture groups", -> - it "returns a single token with the correct scope", -> - {tokens} = grammar.tokenizeLine("new foo.bar.Baz") - - expect(tokens.length).toBe 3 - [newOperator, whitespace, className] = tokens - expect(newOperator).toEqual value: 'new', scopes: ['source.coffee', 'meta.class.instance.constructor', 'keyword.operator.new.coffee'] - expect(whitespace).toEqual value: ' ', scopes: ['source.coffee', 'meta.class.instance.constructor'] - expect(className).toEqual value: 'foo.bar.Baz', scopes: ['source.coffee', 'meta.class.instance.constructor', 'entity.name.type.instance.coffee'] - - describe "when the line doesn't match any patterns", -> - it "returns the entire line as a single simple token with the grammar's scope", -> - textGrammar = atom.syntax.selectGrammar('foo.txt') - {tokens} = textGrammar.tokenizeLine("abc def") - expect(tokens.length).toBe 1 - - describe "when the line matches multiple patterns", -> - it "returns multiple tokens, filling in regions that don't match patterns with tokens in the grammar's global scope", -> - {tokens} = grammar.tokenizeLine(" return new foo.bar.Baz ") - - expect(tokens.length).toBe 7 - - expect(tokens[0]).toEqual value: ' ', scopes: ['source.coffee'] - expect(tokens[1]).toEqual value: 'return', scopes: ['source.coffee', 'keyword.control.coffee'] - expect(tokens[2]).toEqual value: ' ', scopes: ['source.coffee'] - expect(tokens[3]).toEqual value: 'new', scopes: ['source.coffee', 'meta.class.instance.constructor', 'keyword.operator.new.coffee'] - expect(tokens[4]).toEqual value: ' ', scopes: ['source.coffee', 'meta.class.instance.constructor'] - expect(tokens[5]).toEqual value: 'foo.bar.Baz', scopes: ['source.coffee', 'meta.class.instance.constructor', 'entity.name.type.instance.coffee'] - expect(tokens[6]).toEqual value: ' ', scopes: ['source.coffee'] - - describe "when the line matches a pattern with optional capture groups", -> - it "only returns tokens for capture groups that matched", -> - {tokens} = grammar.tokenizeLine("class Quicksort") - expect(tokens.length).toBe 3 - expect(tokens[0].value).toBe "class" - expect(tokens[1].value).toBe " " - expect(tokens[2].value).toBe "Quicksort" - - describe "when the line matches a rule with nested capture groups and lookahead capture groups beyond the scope of the overall match", -> - it "creates distinct tokens for nested captures and does not return tokens beyond the scope of the overall capture", -> - {tokens} = grammar.tokenizeLine(" destroy: ->") - expect(tokens.length).toBe 6 - expect(tokens[0]).toEqual(value: ' ', scopes: ["source.coffee"]) - expect(tokens[1]).toEqual(value: 'destro', scopes: ["source.coffee", "meta.function.coffee", "entity.name.function.coffee"]) - # this dangling 'y' with a duplicated scope looks wrong, but textmate yields the same behavior. probably a quirk in the coffee grammar. - expect(tokens[2]).toEqual(value: 'y', scopes: ["source.coffee", "meta.function.coffee", "entity.name.function.coffee", "entity.name.function.coffee"]) - expect(tokens[3]).toEqual(value: ':', scopes: ["source.coffee", "keyword.operator.coffee"]) - expect(tokens[4]).toEqual(value: ' ', scopes: ["source.coffee"]) - expect(tokens[5]).toEqual(value: '->', scopes: ["source.coffee", "storage.type.function.coffee"]) - - describe "when the line matches a pattern that includes a rule", -> - it "returns tokens based on the included rule", -> - {tokens} = grammar.tokenizeLine("7777777") - expect(tokens.length).toBe 1 - expect(tokens[0]).toEqual value: '7777777', scopes: ['source.coffee', 'constant.numeric.coffee'] - - describe "when the line is an interpolated string", -> - it "returns the correct tokens", -> - {tokens} = grammar.tokenizeLine('"the value is #{@x} my friend"') - - expect(tokens[0]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"] - expect(tokens[1]).toEqual value: "the value is ", scopes: ["source.coffee","string.quoted.double.coffee"] - expect(tokens[2]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"] - expect(tokens[3]).toEqual value: "@x", scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","variable.other.readwrite.instance.coffee"] - expect(tokens[4]).toEqual value: "}", scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"] - expect(tokens[5]).toEqual value: " my friend", scopes: ["source.coffee","string.quoted.double.coffee"] - expect(tokens[6]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.end.coffee"] - - describe "when the line has an interpolated string inside an interpolated string", -> - it "returns the correct tokens", -> - {tokens} = grammar.tokenizeLine('"#{"#{@x}"}"') - - expect(tokens[0]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"] - expect(tokens[1]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"] - expect(tokens[2]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"] - expect(tokens[3]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"] - expect(tokens[4]).toEqual value: '@x', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","variable.other.readwrite.instance.coffee"] - expect(tokens[5]).toEqual value: '}', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"] - expect(tokens[6]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","punctuation.definition.string.end.coffee"] - expect(tokens[7]).toEqual value: '}', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"] - expect(tokens[8]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.end.coffee"] - - describe "when the line is empty", -> - it "returns a single token which has the global scope", -> - {tokens} = grammar.tokenizeLine('') - expect(tokens[0]).toEqual value: '', scopes: ["source.coffee"] - - describe "when the line matches no patterns", -> - it "does not infinitely loop", -> - grammar = atom.syntax.selectGrammar("sample.txt") - {tokens} = grammar.tokenizeLine('hoo') - expect(tokens.length).toBe 1 - expect(tokens[0]).toEqual value: 'hoo', scopes: ["text.plain", "meta.paragraph.text"] - - describe "when the line matches a pattern with a 'contentName'", -> - it "creates tokens using the content of contentName as the token name", -> - grammar = atom.syntax.selectGrammar("sample.txt") - {tokens} = grammar.tokenizeLine('ok, cool') - expect(tokens[0]).toEqual value: 'ok, cool', scopes: ["text.plain", "meta.paragraph.text"] - - describe "when the line matches a pattern with no `name` or `contentName`", -> - it "creates tokens without adding a new scope", -> - grammar = atom.syntax.selectGrammar('foo.rb') - {tokens} = grammar.tokenizeLine('%w|oh \\look|') - expect(tokens.length).toBe 5 - expect(tokens[0]).toEqual value: '%w|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.begin.ruby"] - expect(tokens[1]).toEqual value: 'oh ', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"] - expect(tokens[2]).toEqual value: '\\l', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"] - expect(tokens[3]).toEqual value: 'ook', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"] - - describe "when the line matches a begin/end pattern", -> - it "returns tokens based on the beginCaptures, endCaptures and the child scope", -> - {tokens} = grammar.tokenizeLine("'''single-quoted heredoc'''") - - expect(tokens.length).toBe 3 - - expect(tokens[0]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.begin.coffee'] - expect(tokens[1]).toEqual value: "single-quoted heredoc", scopes: ['source.coffee', 'string.quoted.heredoc.coffee'] - expect(tokens[2]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.end.coffee'] - - describe "when the pattern spans multiple lines", -> - it "uses the ruleStack returned by the first line to parse the second line", -> - {tokens: firstTokens, ruleStack} = grammar.tokenizeLine("'''single-quoted") - {tokens: secondTokens, ruleStack} = grammar.tokenizeLine("heredoc'''", ruleStack) - - expect(firstTokens.length).toBe 2 - expect(secondTokens.length).toBe 2 - - expect(firstTokens[0]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.begin.coffee'] - expect(firstTokens[1]).toEqual value: "single-quoted", scopes: ['source.coffee', 'string.quoted.heredoc.coffee'] - - expect(secondTokens[0]).toEqual value: "heredoc", scopes: ['source.coffee', 'string.quoted.heredoc.coffee'] - expect(secondTokens[1]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.end.coffee'] - - describe "when the pattern contains sub-patterns", -> - it "returns tokens within the begin/end scope based on the sub-patterns", -> - {tokens} = grammar.tokenizeLine('"""heredoc with character escape \\t"""') - - expect(tokens.length).toBe 4 - - expect(tokens[0]).toEqual value: '"""', scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'punctuation.definition.string.begin.coffee'] - expect(tokens[1]).toEqual value: "heredoc with character escape ", scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee'] - expect(tokens[2]).toEqual value: "\\t", scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'constant.character.escape.coffee'] - expect(tokens[3]).toEqual value: '"""', scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'punctuation.definition.string.end.coffee'] - - describe "when the end pattern contains a back reference", -> - it "constructs the end rule based on its back-references to captures in the begin rule", -> - grammar = atom.syntax.selectGrammar('foo.rb') - {tokens} = grammar.tokenizeLine('%w|oh|,') - expect(tokens.length).toBe 4 - expect(tokens[0]).toEqual value: '%w|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.begin.ruby"] - expect(tokens[1]).toEqual value: 'oh', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"] - expect(tokens[2]).toEqual value: '|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.end.ruby"] - expect(tokens[3]).toEqual value: ',', scopes: ["source.ruby", "punctuation.separator.object.ruby"] - - it "allows the rule containing that end pattern to be pushed to the stack multiple times", -> - grammar = atom.syntax.selectGrammar('foo.rb') - {tokens} = grammar.tokenizeLine('%Q+matz had some #{%Q-crazy ideas-} for ruby syntax+ # damn.') - expect(tokens[0]).toEqual value: '%Q+', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.begin.ruby"] - expect(tokens[1]).toEqual value: 'matz had some ', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby"] - expect(tokens[2]).toEqual value: '#{', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","punctuation.section.embedded.begin.ruby"] - expect(tokens[3]).toEqual value: '%Q-', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.begin.ruby"] - expect(tokens[4]).toEqual value: 'crazy ideas', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby"] - expect(tokens[5]).toEqual value: '-', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.end.ruby"] - expect(tokens[6]).toEqual value: '}', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","punctuation.section.embedded.end.ruby", "source.ruby"] - expect(tokens[7]).toEqual value: ' for ruby syntax', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby"] - expect(tokens[8]).toEqual value: '+', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.end.ruby"] - expect(tokens[9]).toEqual value: ' ', scopes: ["source.ruby"] - expect(tokens[10]).toEqual value: '#', scopes: ["source.ruby","comment.line.number-sign.ruby","punctuation.definition.comment.ruby"] - expect(tokens[11]).toEqual value: ' damn.', scopes: ["source.ruby","comment.line.number-sign.ruby"] - - describe "when the pattern includes rules from another grammar", -> - describe "when a grammar matching the desired scope is available", -> - it "parses tokens inside the begin/end patterns based on the included grammar's rules", -> - atom.packages.activatePackage('language-html', sync: true) - atom.packages.activatePackage('language-ruby-on-rails', sync: true) - - grammar = atom.syntax.grammarForScopeName('text.html.ruby') - {tokens} = grammar.tokenizeLine("
<%= User.find(2).full_name %>
") - - expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"] - expect(tokens[1]).toEqual value: 'div', scopes: ["text.html.ruby","meta.tag.block.any.html","entity.name.tag.block.any.html"] - expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","meta.tag.block.any.html"] - expect(tokens[3]).toEqual value: 'class', scopes: ["text.html.ruby","meta.tag.block.any.html", "entity.other.attribute-name.html"] - expect(tokens[4]).toEqual value: '=', scopes: ["text.html.ruby","meta.tag.block.any.html"] - expect(tokens[5]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.begin.html"] - expect(tokens[6]).toEqual value: 'name', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html"] - expect(tokens[7]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.end.html"] - expect(tokens[8]).toEqual value: '>', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"] - expect(tokens[9]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"] - expect(tokens[10]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"] - expect(tokens[11]).toEqual value: 'User', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","support.class.ruby"] - expect(tokens[12]).toEqual value: '.', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.separator.method.ruby"] - expect(tokens[13]).toEqual value: 'find', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"] - expect(tokens[14]).toEqual value: '(', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.function.ruby"] - expect(tokens[15]).toEqual value: '2', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","constant.numeric.ruby"] - expect(tokens[16]).toEqual value: ')', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.function.ruby"] - expect(tokens[17]).toEqual value: '.', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.separator.method.ruby"] - expect(tokens[18]).toEqual value: 'full_name ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"] - expect(tokens[19]).toEqual value: '%>', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"] - expect(tokens[20]).toEqual value: '', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"] - - it "updates the grammar if the included grammar is updated later", -> - atom.packages.activatePackage('language-html', sync: true) - atom.packages.activatePackage('language-ruby-on-rails', sync: true) - - grammar = atom.syntax.selectGrammar('foo.html.erb') - grammarUpdatedHandler = jasmine.createSpy("grammarUpdatedHandler") - grammar.on 'grammar-updated', grammarUpdatedHandler - - {tokens} = grammar.tokenizeLine("
<% <<-SQL select * from users;") - expect(tokens[12].value).toBe " select * from users;" - - atom.packages.activatePackage('language-sql', sync: true) - expect(grammarUpdatedHandler).toHaveBeenCalled() - {tokens} = grammar.tokenizeLine("
<% <<-SQL select * from users;") - expect(tokens[12].value).toBe " " - expect(tokens[13].value).toBe "select" - - describe "when a grammar matching the desired scope is unavailable", -> - it "updates the grammar if a matching grammar is added later", -> - atom.packages.deactivatePackage('language-html') - atom.packages.activatePackage('language-ruby-on-rails', sync: true) - - grammar = atom.syntax.grammarForScopeName('text.html.ruby') - {tokens} = grammar.tokenizeLine("
<%= User.find(2).full_name %>
") - expect(tokens[0]).toEqual value: "
", scopes: ["text.html.ruby"] - expect(tokens[1]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"] - expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"] - expect(tokens[3]).toEqual value: 'User', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","support.class.ruby"] - - atom.packages.activatePackage('language-html', sync: true) - {tokens} = grammar.tokenizeLine("
<%= User.find(2).full_name %>
") - expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"] - expect(tokens[1]).toEqual value: 'div', scopes: ["text.html.ruby","meta.tag.block.any.html","entity.name.tag.block.any.html"] - expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","meta.tag.block.any.html"] - expect(tokens[3]).toEqual value: 'class', scopes: ["text.html.ruby","meta.tag.block.any.html", "entity.other.attribute-name.html"] - expect(tokens[4]).toEqual value: '=', scopes: ["text.html.ruby","meta.tag.block.any.html"] - expect(tokens[5]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.begin.html"] - expect(tokens[6]).toEqual value: 'name', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html"] - expect(tokens[7]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.end.html"] - expect(tokens[8]).toEqual value: '>', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"] - expect(tokens[9]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"] - expect(tokens[10]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"] - - it "can parse a grammar with newline characters in its regular expressions (regression)", -> - grammar = new TextMateGrammar - name: "test" - scopeName: "source.imaginaryLanguage" - repository: {} - patterns: [ - { - name: "comment-body" - begin: "//" - end: "\\n" - beginCaptures: - "0": { name: "comment-start" } - } - ] - - {tokens, ruleStack} = grammar.tokenizeLine("// a singleLineComment") - expect(ruleStack.length).toBe 1 - expect(ruleStack[0].scopeName).toBe "source.imaginaryLanguage" - - expect(tokens.length).toBe 2 - expect(tokens[0].value).toBe "//" - expect(tokens[1].value).toBe " a singleLineComment" - - it "does not loop infinitely (regression)", -> - grammar = atom.syntax.selectGrammar("hello.js") - {tokens, ruleStack} = grammar.tokenizeLine("// line comment") - {tokens, ruleStack} = grammar.tokenizeLine(" // second line comment with a single leading space", ruleStack) - - describe "when inside a C block", -> - beforeEach -> - atom.packages.activatePackage('language-c', sync: true) - - it "correctly parses a method. (regression)", -> - grammar = atom.syntax.selectGrammar("hello.c") - {tokens, ruleStack} = grammar.tokenizeLine("if(1){m()}") - expect(tokens[5]).toEqual value: "m", scopes: ["source.c", "meta.block.c", "meta.function-call.c", "support.function.any-method.c"] - - it "correctly parses nested blocks. (regression)", -> - grammar = atom.syntax.selectGrammar("hello.c") - {tokens, ruleStack} = grammar.tokenizeLine("if(1){if(1){m()}}") - expect(tokens[5]).toEqual value: "if", scopes: ["source.c", "meta.block.c", "keyword.control.c"] - expect(tokens[10]).toEqual value: "m", scopes: ["source.c", "meta.block.c", "meta.block.c", "meta.function-call.c", "support.function.any-method.c"] - - describe "when the grammar can infinitely loop over a line", -> - it "aborts tokenization", -> - spyOn(console, 'error') - atom.packages.activatePackage("package-with-infinite-loop-grammar") - grammar = atom.syntax.selectGrammar("something.package-with-infinite-loop-grammar") - {tokens} = grammar.tokenizeLine("abc") - expect(tokens[0].value).toBe "a" - expect(tokens[1].value).toBe "bc" - expect(console.error).toHaveBeenCalled() - - describe "when a grammar has a pattern that has back references in the match value", -> - it "does not special handle the back references and instead allows oniguruma to resolve them", -> - atom.packages.activatePackage('language-sass', sync: true) - grammar = atom.syntax.selectGrammar("style.scss") - {tokens} = grammar.tokenizeLine("@mixin x() { -moz-selector: whatever; }") - expect(tokens[9]).toEqual value: "-moz-selector", scopes: ["source.css.scss", "meta.property-list.scss", "meta.property-name.scss"] - - describe "when a line has more tokens than `maxTokensPerLine`", -> - it "creates a final token with the remaining text and resets the ruleStack to match the begining of the line", -> - grammar = atom.syntax.selectGrammar("hello.js") - spyOn(grammar, 'getMaxTokensPerLine').andCallFake -> 5 - originalRuleStack = [grammar.initialRule, grammar.initialRule, grammar.initialRule] - {tokens, ruleStack} = grammar.tokenizeLine("one(two(three(four(five(_param_)))))", originalRuleStack) - expect(tokens.length).toBe 5 - expect(tokens[4].value).toBe "three(four(five(_param_)))))" - expect(ruleStack).toEqual originalRuleStack - - describe "when a grammar has a capture with patterns", -> - it "matches the patterns and includes the scope specified as the pattern's match name", -> - grammar = atom.syntax.selectGrammar("hello.php") - {tokens} = grammar.tokenizeLine("") - - expect(tokens[2].value).toBe "public" - expect(tokens[2].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.modifier.php"] - - expect(tokens[3].value).toBe " " - expect(tokens[3].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php"] - - expect(tokens[4].value).toBe "final" - expect(tokens[4].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.modifier.php"] - - expect(tokens[5].value).toBe " " - expect(tokens[5].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php"] - - expect(tokens[6].value).toBe "function" - expect(tokens[6].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.type.function.php"] - - it "ignores child captures of a capture with patterns", -> - grammar = new TextMateGrammar - name: "test" - scopeName: "source" - repository: {} - patterns: [ - { - name: "text" - match: "(a(b))" - captures: - "1": - patterns: [ - { - match: "ab" - name: "a" - } - ] - "2": - name: "b" - } - ] - {tokens} = grammar.tokenizeLine("ab") - - expect(tokens[0].value).toBe "ab" - expect(tokens[0].scopes).toEqual ["source", "text", "a"] - - describe "when the grammar has injections", -> - it "correctly includes the injected patterns when tokenizing", -> - grammar = atom.syntax.selectGrammar("hello.php") - {tokens} = grammar.tokenizeLine("
") - - expect(tokens[3].value).toBe "" - expect(tokens[15].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "punctuation.section.embedded.end.php"] - - expect(tokens[16].value).toBe " - it "replaces the group number with the matched captured text", -> - atom.packages.activatePackage('language-hyperlink', sync: true) - grammar = atom.syntax.grammarForScopeName("text.hyperlink") - {tokens} = grammar.tokenizeLine("https://github.com") - expect(tokens[0].scopes).toEqual ["text.hyperlink", "markup.underline.link.https.hyperlink"] - - describe "when the position doesn't advance and rule includes $self and matches itself", -> - it "tokenizes the entire line using the rule", -> - grammar = new TextMateGrammar - name: "test" - scopeName: "source" - repository: {} - patterns: [ - { - name: "text" - begin: "(?=forever)" - end: "whatevs" - patterns: [ - include: "$self" - ] - } - ] - - {tokens} = grammar.tokenizeLine("forever and ever") - - expect(tokens.length).toBe 1 - expect(tokens[0].value).toBe "forever and ever" - expect(tokens[0].scopes).toEqual ["source", "text"] - - describe "${capture:/command} style pattern names", -> - lines = null - - beforeEach -> - atom.packages.activatePackage('language-todo', sync: true) - grammar = atom.syntax.selectGrammar('main.rb') - lines = grammar.tokenizeLines "# TODO be nicer" - - it "replaces the number with the capture group and translates the text", -> - tokens = lines[0] - expect(tokens[2].value).toEqual "TODO" - expect(tokens[2].scopes).toEqual ["source.ruby", "comment.line.number-sign.ruby", "storage.type.class.todo"] - - describe "language-specific integration tests", -> - lines = null - - describe "Git commit messages", -> - beforeEach -> - atom.packages.activatePackage('language-git', sync: true) - grammar = atom.syntax.selectGrammar('COMMIT_EDITMSG') - lines = grammar.tokenizeLines """ - longggggggggggggggggggggggggggggggggggggggggggggggg - # Please enter the commit message for your changes. Lines starting - """ - - it "correctly parses a long line", -> - tokens = lines[0] - expect(tokens[0].value).toBe "longggggggggggggggggggggggggggggggggggggggggggggggg" - expect(tokens[0].scopes).toEqual ["text.git-commit", "meta.scope.message.git-commit", "invalid.deprecated.line-too-long.git-commit"] - - it "correctly parses the number sign of the first comment line", -> - tokens = lines[1] - expect(tokens[0].value).toBe "#" - expect(tokens[0].scopes).toEqual ["text.git-commit", "meta.scope.metadata.git-commit", "comment.line.number-sign.git-commit", "punctuation.definition.comment.git-commit"] - - describe "C++", -> - beforeEach -> - atom.packages.activatePackage('language-c', sync: true) - grammar = atom.syntax.selectGrammar('includes.cc') - lines = grammar.tokenizeLines """ - #include "a.h" - #include "b.h" - """ - - it "correctly parses the first include line", -> - tokens = lines[0] - expect(tokens[0].value).toBe "#" - expect(tokens[0].scopes).toEqual ["source.c++", "meta.preprocessor.c.include"] - expect(tokens[1].value).toBe 'include' - expect(tokens[1].scopes).toEqual ["source.c++", "meta.preprocessor.c.include", "keyword.control.import.include.c"] - - it "correctly parses the second include line", -> - tokens = lines[1] - expect(tokens[0].value).toBe "#" - expect(tokens[0].scopes).toEqual ["source.c++", "meta.preprocessor.c.include"] - expect(tokens[1].value).toBe 'include' - expect(tokens[1].scopes).toEqual ["source.c++", "meta.preprocessor.c.include", "keyword.control.import.include.c"] - - describe "Ruby", -> - beforeEach -> - grammar = atom.syntax.selectGrammar('hello.rb') - lines = grammar.tokenizeLines """ - a = { - "b" => "c", - } - """ - - it "doesn't loop infinitely (regression)", -> - expect(_.pluck(lines[0], 'value').join('')).toBe 'a = {' - expect(_.pluck(lines[1], 'value').join('')).toBe ' "b" => "c",' - expect(_.pluck(lines[2], 'value').join('')).toBe '}' - expect(_.pluck(lines[3], 'value').join('')).toBe '' - - describe "Objective-C", -> - beforeEach -> - atom.packages.activatePackage('language-c', sync: true) - atom.packages.activatePackage('language-objective-c', sync: true) - grammar = atom.syntax.selectGrammar('function.mm') - lines = grammar.tokenizeLines """ - void test() { - NSString *a = @"a\\nb"; - } - """ - - it "correctly parses variable type when it is a built-in Cocoa class", -> - tokens = lines[1] - expect(tokens[0].value).toBe "NSString" - expect(tokens[0].scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c", "support.class.cocoa"] - - it "correctly parses the semicolon at the end of the line", -> - tokens = lines[1] - lastToken = _.last(tokens) - expect(lastToken.value).toBe ";" - expect(lastToken.scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c"] - - it "correctly parses the string characters before the escaped character", -> - tokens = lines[1] - expect(tokens[2].value).toBe '@"' - expect(tokens[2].scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c", "string.quoted.double.objc", "punctuation.definition.string.begin.objc"] - - describe "Java", -> - beforeEach -> - atom.packages.activatePackage('language-java', sync: true) - grammar = atom.syntax.selectGrammar('Function.java') - - it "correctly parses single line comments", -> - lines = grammar.tokenizeLines """ - public void test() { - //comment - } - """ - - tokens = lines[1] - expect(tokens[0].scopes).toEqual ["source.java", "comment.line.double-slash.java", "punctuation.definition.comment.java"] - expect(tokens[0].value).toEqual '//' - expect(tokens[1].scopes).toEqual ["source.java", "comment.line.double-slash.java"] - expect(tokens[1].value).toEqual 'comment' - - it "correctly parses nested method calls", -> - tokens = grammar.tokenizeLines('a(b(new Object[0]));')[0] - lastToken = _.last(tokens) - expect(lastToken.scopes).toEqual ['source.java', 'punctuation.terminator.java'] - expect(lastToken.value).toEqual ';' - - describe "HTML (Ruby - ERB)", -> - it "correctly parses strings inside tags", -> - grammar = atom.syntax.selectGrammar('page.erb') - lines = grammar.tokenizeLines '<% page_title "My Page" %>' - tokens = lines[0] - - expect(tokens[2].value).toEqual '"' - expect(tokens[2].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby", "punctuation.definition.string.begin.ruby"] - expect(tokens[3].value).toEqual 'My Page' - expect(tokens[3].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby"] - expect(tokens[4].value).toEqual '"' - expect(tokens[4].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby", "punctuation.definition.string.end.ruby"] - - it "does not loop infinitely on <%>", -> - atom.packages.activatePackage('language-html', sync: true) - atom.packages.activatePackage('language-ruby-on-rails', sync: true) - - grammar = atom.syntax.selectGrammar('foo.html.erb') - [tokens] = grammar.tokenizeLines '<%>' - expect(tokens.length).toBe 1 - expect(tokens[0].value).toEqual '<%>' - expect(tokens[0].scopes).toEqual ["text.html.erb"] - - describe "Unicode support", -> - describe "Surrogate pair characters", -> - beforeEach -> - grammar = atom.syntax.selectGrammar('main.js') - lines = grammar.tokenizeLines "'\uD835\uDF97'" - - it "correctly parses JavaScript strings containing surrogate pair characters", -> - tokens = lines[0] - expect(tokens.length).toBe 3 - expect(tokens[0].value).toBe "'" - expect(tokens[1].value).toBe "\uD835\uDF97" - expect(tokens[2].value).toBe "'" - - describe "when the line contains unicode characters", -> - it "correctly parses tokens starting after them", -> - atom.packages.activatePackage('language-json', sync: true) - grammar = atom.syntax.selectGrammar('package.json') - {tokens} = grammar.tokenizeLine '{"\u2026": 1}' - - expect(tokens.length).toBe 8 - expect(tokens[6].value).toBe '1' - expect(tokens[6].scopes).toEqual ["source.json", "meta.structure.dictionary.json", "meta.structure.dictionary.value.json", "constant.numeric.json"] - - describe "python", -> - it "parses import blocks correctly", -> - grammar = atom.syntax.selectGrammar("file.py") - lines = grammar.tokenizeLines "import a\nimport b" - - line1 = lines[0] - expect(line1.length).toBe 3 - expect(line1[0].value).toEqual "import" - expect(line1[0].scopes).toEqual ["source.python", "keyword.control.import.python"] - expect(line1[1].value).toEqual " " - expect(line1[1].scopes).toEqual ["source.python"] - expect(line1[2].value).toEqual "a" - expect(line1[2].scopes).toEqual ["source.python"] - - line2 = lines[1] - expect(line2.length).toBe 3 - expect(line2[0].value).toEqual "import" - expect(line2[0].scopes).toEqual ["source.python", "keyword.control.import.python"] - expect(line2[1].value).toEqual " " - expect(line2[1].scopes).toEqual ["source.python"] - expect(line2[2].value).toEqual "b" - expect(line2[2].scopes).toEqual ["source.python"] diff --git a/src/text-mate-grammar.coffee b/src/text-mate-grammar.coffee deleted file mode 100644 index 09c756817..000000000 --- a/src/text-mate-grammar.coffee +++ /dev/null @@ -1,522 +0,0 @@ -_ = require 'underscore-plus' -fs = require 'fs-plus' -Token = require './token' -{OnigRegExp, OnigScanner} = require 'oniguruma' -path = require 'path' -{Emitter} = require 'emissary' -{ScopeSelector} = require 'first-mate' - -pathSplitRegex = new RegExp("[#{_.escapeRegExp(path.sep)}.]") - -### Internal ### - -module.exports = -class TextMateGrammar - Emitter.includeInto(this) - - @load: (grammarPath, done) -> - fs.readObject grammarPath, (error, object) -> - if error? - done(error) - else - done(null, new TextMateGrammar(object)) - - @loadSync: (grammarPath) -> - new TextMateGrammar(fs.readObjectSync(grammarPath)) - - name: null - rawPatterns: null - rawRepository: null - fileTypes: null - scopeName: null - repository: null - initialRule: null - firstLineRegex: null - includedGrammarScopes: null - maxTokensPerLine: 100 - - constructor: ({ @name, @fileTypes, @scopeName, injections, injectionSelector, patterns, repository, @foldingStopMarker, firstLineMatch}) -> - @rawPatterns = patterns - @rawRepository = repository - @injections = new Injections(this, injections) - - if injectionSelector? - @injectionSelector = new ScopeSelector(injectionSelector) - - @firstLineRegex = new OnigRegExp(firstLineMatch) if firstLineMatch - @fileTypes ?= [] - @includedGrammarScopes = [] - - clearRules: -> - @initialRule = null - @repository = null - - getInitialRule: -> - @initialRule ?= new Rule(this, {@scopeName, patterns: @rawPatterns}) - - getRepository: -> - @repository ?= do => - repository = {} - for name, data of @rawRepository - data = {patterns: [data], tempName: name} if data.begin? or data.match? - repository[name] = new Rule(this, data) - repository - - addIncludedGrammarScope: (scope) -> - @includedGrammarScopes.push(scope) unless _.include(@includedGrammarScopes, scope) - - grammarUpdated: (scopeName) -> - return false unless _.include(@includedGrammarScopes, scopeName) - @clearRules() - atom.syntax.grammarUpdated(@scopeName) - @emit 'grammar-updated' - true - - getScore: (filePath, contents) -> - contents = fs.readFileSync(filePath, 'utf8') if not contents? and fs.isFileSync(filePath) - - if atom.syntax.grammarOverrideForPath(filePath) is @scopeName - 2 + (filePath?.length ? 0) - else if @matchesContents(contents) - 1 + (filePath?.length ? 0) - else - @getPathScore(filePath) - - matchesContents: (contents) -> - return false unless contents? and @firstLineRegex? - - escaped = false - numberOfNewlinesInRegex = 0 - for character in @firstLineRegex.source - switch character - when '\\' - escaped = !escaped - when 'n' - numberOfNewlinesInRegex++ if escaped - escaped = false - else - escaped = false - lines = contents.split('\n') - @firstLineRegex.test(lines[0..numberOfNewlinesInRegex].join('\n')) - - getPathScore: (filePath) -> - return -1 unless filePath? - - pathComponents = filePath.split(pathSplitRegex) - pathScore = -1 - @fileTypes.forEach (fileType) -> - fileTypeComponents = fileType.split(pathSplitRegex) - pathSuffix = pathComponents[-fileTypeComponents.length..-1] - if _.isEqual(pathSuffix, fileTypeComponents) - pathScore = Math.max(pathScore, fileType.length) - - pathScore - - tokenizeLine: (line, ruleStack=[@getInitialRule()], firstLine=false) -> - originalRuleStack = ruleStack - ruleStack = new Array(ruleStack...) # clone ruleStack - tokens = [] - position = 0 - - loop - scopes = scopesFromStack(ruleStack) - previousRuleStackLength = ruleStack.length - previousPosition = position - - if tokens.length >= (@getMaxTokensPerLine() - 1) - token = new Token(value: line[position..], scopes: scopes) - tokens.push token - ruleStack = originalRuleStack - break - - break if position == line.length + 1 # include trailing newline position - - if match = _.last(ruleStack).getNextTokens(ruleStack, line, position, firstLine) - { nextTokens, tokensStartPosition, tokensEndPosition } = match - if position < tokensStartPosition # unmatched text before next tokens - tokens.push(new Token( - value: line[position...tokensStartPosition] - scopes: scopes - )) - - tokens.push(nextTokens...) - position = tokensEndPosition - break if position is line.length and nextTokens.length is 0 and ruleStack.length is previousRuleStackLength - - else # push filler token for unmatched text at end of line - if position < line.length or line.length == 0 - tokens.push(new Token( - value: line[position...line.length] - scopes: scopes - )) - break - - if position == previousPosition - if ruleStack.length == previousRuleStackLength - console.error("Popping rule because it loops at column #{position} of line '#{line}'", _.clone(ruleStack)) - ruleStack.pop() - else if ruleStack.length > previousRuleStackLength # Stack size increased with zero length match - [penultimateRule, lastRule] = ruleStack[-2..] - - # Same exact rule was pushed but position wasn't advanced - if lastRule? and lastRule == penultimateRule - popStack = true - - # Rule with same scope name as previous rule was pushed but position wasn't advanced - if lastRule?.scopeName? and penultimateRule.scopeName == lastRule.scopeName - popStack = true - - if popStack - ruleStack.pop() - tokens.push(new Token( - value: line[position...line.length] - scopes: scopes - )) - break - - ruleStack.forEach (rule) -> rule.clearAnchorPosition() - { tokens, ruleStack } - - tokenizeLines: (text) -> - lines = text.split('\n') - ruleStack = null - for line, i in lines - { tokens, ruleStack } = @tokenizeLine(line, ruleStack, i is 0) - tokens - - getMaxTokensPerLine: -> - @maxTokensPerLine - -class Injections - @injections: null - - constructor: (grammar, injections={}) -> - @injections = [] - @scanners = {} - for selector, values of injections - continue unless values?.patterns?.length > 0 - patterns = [] - anchored = false - for regex in values.patterns - pattern = new Pattern(grammar, regex) - anchored = true if pattern.anchored - patterns.push(pattern.getIncludedPatterns(grammar, patterns)...) - @injections.push - anchored: anchored - selector: new ScopeSelector(selector) - patterns: patterns - - getScanner: (injection, firstLine, position, anchorPosition) -> - return injection.scanner if injection.scanner? - - regexes = _.map injection.patterns, (pattern) -> - pattern.getRegex(firstLine, position, anchorPosition) - scanner = new OnigScanner(regexes) - scanner.patterns = injection.patterns - scanner.anchored = injection.anchored - injection.scanner = scanner unless scanner.anchored - scanner - - getScanners: (ruleStack, firstLine, position, anchorPosition) -> - scanners = [] - scopes = scopesFromStack(ruleStack) - for injection in @injections - if injection.selector.matches(scopes) - scanner = @getScanner(injection, firstLine, position, anchorPosition) - scanners.push(scanner) - scanners - -class Rule - grammar: null - scopeName: null - patterns: null - scannersByBaseGrammarName: null - createEndPattern: null - anchorPosition: -1 - - constructor: (@grammar, {@scopeName, patterns, @endPattern}) -> - patterns ?= [] - @patterns = patterns.map (pattern) => new Pattern(grammar, pattern) - @patterns.unshift(@endPattern) if @endPattern and !@endPattern.hasBackReferences - @scannersByBaseGrammarName = {} - - getIncludedPatterns: (baseGrammar, included=[]) -> - return [] if _.include(included, this) - - included = included.concat([this]) - allPatterns = [] - for pattern in @patterns - allPatterns.push(pattern.getIncludedPatterns(baseGrammar, included)...) - allPatterns - - clearAnchorPosition: -> @anchorPosition = -1 - - createScanner: (patterns, firstLine, position) -> - anchored = false - regexes = _.map patterns, (pattern) => - anchored = true if pattern.anchored - pattern.getRegex(firstLine, position, @anchorPosition) - - scanner = new OnigScanner(regexes) - scanner.patterns = patterns - scanner.anchored = anchored - scanner - - getScanner: (baseGrammar, position, firstLine) -> - return scanner if scanner = @scannersByBaseGrammarName[baseGrammar.name] - - patterns = @getIncludedPatterns(baseGrammar) - scanner = @createScanner(patterns, firstLine, position) - @scannersByBaseGrammarName[baseGrammar.name] = scanner unless scanner.anchored - scanner - - scanInjections: (ruleStack, line, position, firstLine) -> - baseGrammar = ruleStack[0].grammar - if injections = baseGrammar.injections - scanners = injections.getScanners(ruleStack, position, firstLine, @anchorPosition) - for scanner in scanners - result = scanner.findNextMatch(line, position) - return result if result? - - normalizeCaptureIndices: (line, captureIndices) -> - lineLength = line.length - captureIndices.forEach (capture) -> - capture.end = Math.min(capture.end, lineLength) - capture.start = Math.min(capture.start, lineLength) - - findNextMatch: (ruleStack, line, position, firstLine) -> - lineWithNewline = "#{line}\n" - baseGrammar = ruleStack[0].grammar - results = [] - - scanner = @getScanner(baseGrammar, position, firstLine) - if result = scanner.findNextMatch(lineWithNewline, position) - results.push(result) - - if result = @scanInjections(ruleStack, lineWithNewline, position, firstLine) - results.push(result) - - scopes = scopesFromStack(ruleStack) - for injectionGrammar in _.without(atom.syntax.injectionGrammars, @grammar, baseGrammar) - if injectionGrammar.injectionSelector.matches(scopes) - scanner = injectionGrammar.getInitialRule().getScanner(injectionGrammar, position, firstLine) - if result = scanner.findNextMatch(lineWithNewline, position) - results.push(result) - - if results.length > 0 - _.min results, (result) => - @normalizeCaptureIndices(line, result.captureIndices) - result.captureIndices[0].start - - getNextTokens: (ruleStack, line, position, firstLine) -> - result = @findNextMatch(ruleStack, line, position, firstLine) - return null unless result? - { index, captureIndices, scanner } = result - firstCapture = captureIndices[0] - nextTokens = scanner.patterns[index].handleMatch(ruleStack, line, captureIndices) - { nextTokens, tokensStartPosition: firstCapture.start, tokensEndPosition: firstCapture.end } - - getRuleToPush: (line, beginPatternCaptureIndices) -> - if @endPattern.hasBackReferences - rule = new Rule(@grammar, {@scopeName}) - rule.endPattern = @endPattern.resolveBackReferences(line, beginPatternCaptureIndices) - rule.patterns = [rule.endPattern, @patterns...] - rule - else - this - -class Pattern - grammar: null - pushRule: null - popRule: false - scopeName: null - captures: null - backReferences: null - anchored: false - - constructor: (@grammar, { name, contentName, @include, match, begin, end, captures, beginCaptures, endCaptures, patterns, @popRule, @hasBackReferences}) -> - @scopeName = name ? contentName # TODO: We need special treatment of contentName - if match - if (end or @popRule) and @hasBackReferences ?= /\\\d+/.test(match) - @match = match - else - @regexSource = match - @captures = captures - else if begin - @regexSource = begin - @captures = beginCaptures ? captures - endPattern = new Pattern(@grammar, { match: end, captures: endCaptures ? captures, popRule: true}) - @pushRule = new Rule(@grammar, { @scopeName, patterns, endPattern }) - - if @captures? - for group, capture of @captures - if capture.patterns?.length > 0 and not capture.rule - capture.scopeName = @scopeName - capture.rule = new Rule(@grammar, capture) - - @anchored = @hasAnchor() - - getRegex: (firstLine, position, anchorPosition) -> - if @anchored - @replaceAnchor(firstLine, position, anchorPosition) - else - @regexSource - - hasAnchor: -> - return false unless @regexSource - escape = false - for character in @regexSource.split('') - return true if escape and 'AGz'.indexOf(character) isnt -1 - escape = not escape and character is '\\' - false - - replaceAnchor: (firstLine, offset, anchor) -> - escaped = [] - placeholder = '\uFFFF' - escape = false - for character in @regexSource.split('') - if escape - switch character - when 'A' - if firstLine - escaped.push("\\#{character}") - else - escaped.push(placeholder) - when 'G' - if offset is anchor - escaped.push("\\#{character}") - else - escaped.push(placeholder) - when 'z' then escaped.push('$(?!\n)(? - beginCaptures = [] - - for {start, end} in beginCaptureIndices - beginCaptures.push line[start...end] - - resolvedMatch = @match.replace /\\\d+/g, (match) -> - index = parseInt(match[1..]) - _.escapeRegExp(beginCaptures[index] ? "\\#{index}") - - new Pattern(@grammar, { hasBackReferences: false, match: resolvedMatch, @captures, @popRule }) - - ruleForInclude: (baseGrammar, name) -> - if name[0] == "#" - @grammar.getRepository()[name[1..]] - else if name == "$self" - @grammar.getInitialRule() - else if name == "$base" - baseGrammar.getInitialRule() - else - @grammar.addIncludedGrammarScope(name) - atom.syntax.grammarForScopeName(name)?.getInitialRule() - - getIncludedPatterns: (baseGrammar, included) -> - if @include - rule = @ruleForInclude(baseGrammar, @include) - rule?.getIncludedPatterns(baseGrammar, included) ? [] - else - [this] - - resolveScopeName: (line, captureIndices) -> - resolvedScopeName = @scopeName.replace /\${(\d+):\/(downcase|upcase)}/, (match, index, command) -> - capture = captureIndices[parseInt(index)] - if capture? - replacement = line.substring(capture.start, capture.end) - switch command - when 'downcase' then replacement.toLowerCase() - when 'upcase' then replacement.toUpperCase() - else replacement - else - match - - resolvedScopeName.replace /\$(\d+)/, (match, index) -> - capture = captureIndices[parseInt(index)] - if capture? - line.substring(capture.start, capture.end) - else - match - - handleMatch: (stack, line, captureIndices) -> - scopes = scopesFromStack(stack) - if @scopeName and not @popRule - scopes.push(@resolveScopeName(line, captureIndices)) - - if @captures - tokens = @getTokensForCaptureIndices(line, _.clone(captureIndices), scopes, stack) - else - {start, end} = captureIndices[0] - zeroLengthMatch = end == start - if zeroLengthMatch - tokens = [] - else - tokens = [new Token(value: line[start...end], scopes: scopes)] - if @pushRule - ruleToPush = @pushRule.getRuleToPush(line, captureIndices) - ruleToPush.anchorPosition = captureIndices[0].end - stack.push(ruleToPush) - else if @popRule - stack.pop() - - tokens - - getTokensForCaptureRule: (rule, line, captureStart, captureEnd, scopes, stack) -> - captureText = line.substring(captureStart, captureEnd) - {tokens} = rule.grammar.tokenizeLine(captureText, [stack..., rule]) - tokens - - getTokensForCaptureIndices: (line, captureIndices, scopes, stack) -> - parentCapture = captureIndices.shift() - - tokens = [] - if scope = @captures[parentCapture.index]?.name - scopes = scopes.concat(scope) - - if captureRule = @captures[parentCapture.index]?.rule - captureTokens = @getTokensForCaptureRule(captureRule, line, parentCapture.start, parentCapture.end, scopes, stack) - tokens.push(captureTokens...) - # Consume child captures - while captureIndices.length and captureIndices[0].start < parentCapture.end - captureIndices.shift() - else - previousChildCaptureEnd = parentCapture.start - while captureIndices.length and captureIndices[0].start < parentCapture.end - childCapture = captureIndices[0] - - emptyCapture = childCapture.end - childCapture.start == 0 - captureHasNoScope = not @captures[childCapture.index] - if emptyCapture or captureHasNoScope - captureIndices.shift() - continue - - if childCapture.start > previousChildCaptureEnd - tokens.push(new Token( - value: line[previousChildCaptureEnd...childCapture.start] - scopes: scopes - )) - - captureTokens = @getTokensForCaptureIndices(line, captureIndices, scopes, stack) - tokens.push(captureTokens...) - previousChildCaptureEnd = childCapture.end - - if parentCapture.end > previousChildCaptureEnd - tokens.push(new Token( - value: line[previousChildCaptureEnd...parentCapture.end] - scopes: scopes - )) - - tokens - -### Internal ### - -scopesFromStack = (stack) -> - _.compact(_.pluck(stack, "scopeName")) From 19212f99ee5550cb8eb6e8fcff16206f88664232 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 10:50:35 -0800 Subject: [PATCH 24/39] Remove TextMateGrammar use in specs --- spec/syntax-spec.coffee | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/spec/syntax-spec.coffee b/spec/syntax-spec.coffee index 9e4795d6d..b2d0e8eac 100644 --- a/spec/syntax-spec.coffee +++ b/spec/syntax-spec.coffee @@ -1,7 +1,6 @@ {fs} = require 'atom' path = require 'path' temp = require 'temp' -TextMateGrammar = require '../src/text-mate-grammar' describe "the `syntax` global", -> beforeEach -> @@ -62,20 +61,23 @@ describe "the `syntax` global", -> describe "when multiple grammars have matching fileTypes", -> it "selects the grammar with the longest fileType match", -> - grammar1 = new TextMateGrammar + grammarPath1 = temp.path(suffix: '.json') + fs.writeFileSync grammarPath1, JSON.stringify( name: 'test1' scopeName: 'source1' - fileTypes: ['test', 'more.test'] + fileTypes: ['test'] + ) + grammar1 = atom.syntax.registry.loadGrammarSync(grammarPath1) + expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar1 - grammar2 = new TextMateGrammar + grammarPath2 = temp.path(suffix: '.json') + fs.writeFileSync grammarPath2, JSON.stringify( name: 'test2' scopeName: 'source2' - fileTypes: ['test'] - - atom.syntax.addGrammar(grammar1) - atom.syntax.addGrammar(grammar2) - - expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar1 + fileTypes: ['test', 'more.test'] + ) + grammar2 = atom.syntax.registry.loadGrammarSync(grammarPath2) + expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar2 describe "when there is no file path", -> it "does not throw an exception (regression)", -> From c1fc09e5102ee6b3cc9aae57fd3565339eb80f4f Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 10:52:30 -0800 Subject: [PATCH 25/39] Use grammars from registry --- spec/syntax-spec.coffee | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/syntax-spec.coffee b/spec/syntax-spec.coffee index b2d0e8eac..66fbc9824 100644 --- a/spec/syntax-spec.coffee +++ b/spec/syntax-spec.coffee @@ -15,7 +15,7 @@ describe "the `syntax` global", -> expect(atom.syntax.selectGrammar(filePath).name).not.toBe 'Ruby' atom.syntax.setGrammarOverrideForPath(filePath, 'source.ruby') syntax2 = atom.deserializers.deserialize(atom.syntax.serialize()) - syntax2.addGrammar(grammar) for grammar in atom.syntax.grammars when grammar isnt atom.syntax.nullGrammar + syntax2.addGrammar(grammar) for grammar in atom.syntax.registry.grammars when grammar isnt atom.syntax.registry.nullGrammar expect(syntax2.selectGrammar(filePath).name).toBe 'Ruby' describe ".selectGrammar(filePath)", -> From e1aec57ffe3ffb48e8c3cdda4e582f7545b24262 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 10:57:51 -0800 Subject: [PATCH 26/39] Create tokens in TokenizedBuffer --- src/tokenized-buffer.coffee | 1 + src/tokenized-line.coffee | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index ac3cf01b9..e747d3062 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -203,6 +203,7 @@ class TokenizedBuffer extends Model lineEnding = @buffer.lineEndingForRow(row) tabLength = @getTabLength() { tokens, ruleStack } = @grammar.tokenizeLine(line, ruleStack, row is 0) + tokens = (new Token(token) for token in tokens) new TokenizedLine({tokens, ruleStack, tabLength, lineEnding}) # FIXME: benogle says: These are actually buffer rows as all buffer rows are diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee index 26d24fb0b..826fdfd5c 100644 --- a/src/tokenized-line.coffee +++ b/src/tokenized-line.coffee @@ -1,12 +1,10 @@ _ = require 'underscore-plus' -Token = require './token' ### Internal ### module.exports = class TokenizedLine constructor: ({tokens, @lineEnding, @ruleStack, @startBufferColumn, @fold, tabLength}) -> - tokens = (new Token(token) for token in tokens) @tokens = @breakOutAtomicTokens(tokens, tabLength) @startBufferColumn ?= 0 @text = _.pluck(@tokens, 'value').join('') From ccc6eed3da5f98a1c49d946e5ff79dd5c05212c4 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 10:58:15 -0800 Subject: [PATCH 27/39] Remove unused method --- src/language-mode.coffee | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/language-mode.coffee b/src/language-mode.coffee index 28ee1dc70..2da82e8dc 100644 --- a/src/language-mode.coffee +++ b/src/language-mode.coffee @@ -289,9 +289,6 @@ class LanguageMode if desiredIndentLevel >= 0 and desiredIndentLevel < currentIndentLevel @editor.setIndentationForBufferRow(bufferRow, desiredIndentLevel) - tokenizeLine: (line, stack, firstLine) -> - {tokens, stack} = @grammar.tokenizeLine(line, stack, firstLine) - getRegexForProperty: (scopes, property) -> if pattern = atom.syntax.getProperty(scopes, property) new OnigRegExp(pattern) From c99c2af6aec2f11b3d1b78196b8fda072dfb0098 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 10:58:40 -0800 Subject: [PATCH 28/39] Remove NullGrammar class now in first-mate --- src/null-grammar.coffee | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 src/null-grammar.coffee diff --git a/src/null-grammar.coffee b/src/null-grammar.coffee deleted file mode 100644 index 9fa7b5c6e..000000000 --- a/src/null-grammar.coffee +++ /dev/null @@ -1,23 +0,0 @@ -Token = require './token' -{Emitter} = require 'emissary' - -### Internal ### -module.exports = -class NullGrammar - Emitter.includeInto(this) - - name: 'Null Grammar' - scopeName: 'text.plain.null-grammar' - - getScore: -> 0 - - tokenizeLine: (line) -> - { tokens: [new Token(value: line, scopes: ['null-grammar.text.plain'])] } - - tokenizeLines: (text) -> - lines = text.split('\n') - for line, i in lines - {tokens} = @tokenizeLine(line) - tokens - - grammarUpdated: -> # noop From 826d536c09cf83583ad8b3f85a7f2320e7d32565 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 11:08:08 -0800 Subject: [PATCH 29/39] Add atom.syntax.grammars shim --- src/syntax.coffee | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/syntax.coffee b/src/syntax.coffee index 9dd21fb38..7f0c3cb5c 100644 --- a/src/syntax.coffee +++ b/src/syntax.coffee @@ -28,6 +28,9 @@ class Syntax @subscribe @registry, 'grammar-updated', (grammar) => @emit 'grammar-updated', grammar + #TODO Remove once packages have been updated + @__defineGetter__ 'grammars', -> @registry.grammars + @nullGrammar = @registry.nullGrammar @scopedPropertiesIndex = 0 @scopedProperties = [] From 88c9275bff1b50dbe0c82fc3b32396da6140524f Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 11:08:33 -0800 Subject: [PATCH 30/39] Activate grammar when already active --- src/text-mate-package.coffee | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/text-mate-package.coffee b/src/text-mate-package.coffee index e8be61f5c..3bccbde94 100644 --- a/src/text-mate-package.coffee +++ b/src/text-mate-package.coffee @@ -65,10 +65,13 @@ class TextMatePackage extends Package done() loadGrammarAtPath: (grammarPath, done) => - atom.syntax.registry.readGrammar (error, grammar) => - return console.log("Error loading grammar at path '#{grammarPath}':", err.stack ? err) if err - @addGrammar(grammar) - done() + atom.syntax.registry.readGrammar grammarPath, (error, grammar) => + console.log error, grammar + if error? + console.log("Error loading grammar at path '#{grammarPath}':", error.stack ? error) + else + @addGrammar(grammar) + done?() loadGrammarsSync: -> for grammarPath in fs.listSync(@getSyntaxesPath(), @legalGrammarExtensions) @@ -76,6 +79,7 @@ class TextMatePackage extends Package addGrammar: (grammar) -> @grammars.push(grammar) + grammar.activate() if @isActive() getGrammars: -> @grammars From 3ff702581ac8ace67078a8c525e605b835b5dc20 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 11:10:02 -0800 Subject: [PATCH 31/39] Group shim lines --- src/syntax.coffee | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/syntax.coffee b/src/syntax.coffee index 7f0c3cb5c..e935629d6 100644 --- a/src/syntax.coffee +++ b/src/syntax.coffee @@ -23,15 +23,15 @@ class Syntax constructor: -> @registry = new TextMateGrammarRegistry() + + #TODO Remove once packages have been updated @subscribe @registry, 'grammar-added', (grammar) => @emit 'grammar-added', grammar @subscribe @registry, 'grammar-updated', (grammar) => @emit 'grammar-updated', grammar - - #TODO Remove once packages have been updated @__defineGetter__ 'grammars', -> @registry.grammars - @nullGrammar = @registry.nullGrammar + @scopedPropertiesIndex = 0 @scopedProperties = [] From e8edc83e392ae9a77eade1ed9cef4e8402a82787 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 11:18:06 -0800 Subject: [PATCH 32/39] :lipstick: Sort requires --- src/syntax.coffee | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/syntax.coffee b/src/syntax.coffee index e935629d6..1e1908a4a 100644 --- a/src/syntax.coffee +++ b/src/syntax.coffee @@ -1,14 +1,13 @@ _ = require 'underscore-plus' {specificity} = require 'clear-cut' -{$, $$} = require './space-pen-extensions' {Emitter, Subscriber} = require 'emissary' - FirstMate = require 'first-mate' TextMateScopeSelector = FirstMate.ScopeSelector TextMateGrammarRegistry = FirstMate.GrammarRegistry -### Internal ### +{$, $$} = require './space-pen-extensions' +### Internal ### module.exports = class Syntax Emitter.includeInto(this) From a95fdce85f45b93d5d4e2b3eb9987ce2ee44cb61 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 11:18:53 -0800 Subject: [PATCH 33/39] :memo: Mark Syntax class as public --- src/syntax.coffee | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/syntax.coffee b/src/syntax.coffee index 1e1908a4a..b394dfa40 100644 --- a/src/syntax.coffee +++ b/src/syntax.coffee @@ -7,7 +7,7 @@ TextMateGrammarRegistry = FirstMate.GrammarRegistry {$, $$} = require './space-pen-extensions' -### Internal ### +### Public ### module.exports = class Syntax Emitter.includeInto(this) From fa9aa3691befec1d4c2be7824b697260d57b261f Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 11:36:26 -0800 Subject: [PATCH 34/39] Extend GrammarRegistry in Syntax --- spec/syntax-spec.coffee | 6 ++--- src/atom-package.coffee | 2 +- src/syntax.coffee | 44 ++++-------------------------------- src/text-mate-package.coffee | 5 ++-- 4 files changed, 11 insertions(+), 46 deletions(-) diff --git a/spec/syntax-spec.coffee b/spec/syntax-spec.coffee index 66fbc9824..92373c53d 100644 --- a/spec/syntax-spec.coffee +++ b/spec/syntax-spec.coffee @@ -15,7 +15,7 @@ describe "the `syntax` global", -> expect(atom.syntax.selectGrammar(filePath).name).not.toBe 'Ruby' atom.syntax.setGrammarOverrideForPath(filePath, 'source.ruby') syntax2 = atom.deserializers.deserialize(atom.syntax.serialize()) - syntax2.addGrammar(grammar) for grammar in atom.syntax.registry.grammars when grammar isnt atom.syntax.registry.nullGrammar + syntax2.addGrammar(grammar) for grammar in atom.syntax.grammars when grammar isnt atom.syntax.nullGrammar expect(syntax2.selectGrammar(filePath).name).toBe 'Ruby' describe ".selectGrammar(filePath)", -> @@ -67,7 +67,7 @@ describe "the `syntax` global", -> scopeName: 'source1' fileTypes: ['test'] ) - grammar1 = atom.syntax.registry.loadGrammarSync(grammarPath1) + grammar1 = atom.syntax.loadGrammarSync(grammarPath1) expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar1 grammarPath2 = temp.path(suffix: '.json') @@ -76,7 +76,7 @@ describe "the `syntax` global", -> scopeName: 'source2' fileTypes: ['test', 'more.test'] ) - grammar2 = atom.syntax.registry.loadGrammarSync(grammarPath2) + grammar2 = atom.syntax.loadGrammarSync(grammarPath2) expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar2 describe "when there is no file path", -> diff --git a/src/atom-package.coffee b/src/atom-package.coffee index 204364d15..4349cde9c 100644 --- a/src/atom-package.coffee +++ b/src/atom-package.coffee @@ -151,7 +151,7 @@ class AtomPackage extends Package @grammars = [] grammarsDirPath = path.join(@path, 'grammars') for grammarPath in fs.listSync(grammarsDirPath, ['.json', '.cson']) - @grammars.push(atom.syntax.registry.loadGrammarSync(grammarPath)) + @grammars.push(atom.syntax.loadGrammarSync(grammarPath)) loadScopedProperties: -> @scopedProperties = [] diff --git a/src/syntax.coffee b/src/syntax.coffee index b394dfa40..85c4d7b02 100644 --- a/src/syntax.coffee +++ b/src/syntax.coffee @@ -1,6 +1,6 @@ _ = require 'underscore-plus' {specificity} = require 'clear-cut' -{Emitter, Subscriber} = require 'emissary' +{Subscriber} = require 'emissary' FirstMate = require 'first-mate' TextMateScopeSelector = FirstMate.ScopeSelector TextMateGrammarRegistry = FirstMate.GrammarRegistry @@ -9,58 +9,24 @@ TextMateGrammarRegistry = FirstMate.GrammarRegistry ### Public ### module.exports = -class Syntax - Emitter.includeInto(this) +class Syntax extends TextMateGrammarRegistry Subscriber.includeInto(this) atom.deserializers.add(this) @deserialize: ({grammarOverridesByPath}) -> syntax = new Syntax() - syntax.registry.grammarOverridesByPath = grammarOverridesByPath + syntax.grammarOverridesByPath = grammarOverridesByPath syntax constructor: -> - @registry = new TextMateGrammarRegistry() - - #TODO Remove once packages have been updated - @subscribe @registry, 'grammar-added', (grammar) => - @emit 'grammar-added', grammar - @subscribe @registry, 'grammar-updated', (grammar) => - @emit 'grammar-updated', grammar - @__defineGetter__ 'grammars', -> @registry.grammars - @nullGrammar = @registry.nullGrammar + super @scopedPropertiesIndex = 0 @scopedProperties = [] serialize: -> - deserializer: @constructor.name - grammarOverridesByPath: @registry.grammarOverridesByPath - - addGrammar: (grammar) -> - @registry.addGrammar(grammar) - - removeGrammar: (grammar) -> - @registry.removeGrammar(grammar) - - setGrammarOverrideForPath: (path, scopeName) -> - @registry.setGrammarOverrideForPath(path, scopeName) - - clearGrammarOverrideForPath: (path) -> - @registry.clearGrammarOverrideForPath(path) - - clearGrammarOverrides: -> - @registry.clearGrammarOverrides() - - selectGrammar: (filePath, fileContents) -> - @registry.selectGrammar(filePath, fileContents) - - grammarOverrideForPath: (path) -> - @grammarOverridesByPath[path] - - grammarForScopeName: (scopeName) -> - @registry.grammarForScopeName(scopeName) + {deserializer: @constructor.name, @grammarOverridesByPath} addProperties: (args...) -> name = args.shift() if args.length > 2 diff --git a/src/text-mate-package.coffee b/src/text-mate-package.coffee index 3bccbde94..3daf8d353 100644 --- a/src/text-mate-package.coffee +++ b/src/text-mate-package.coffee @@ -65,8 +65,7 @@ class TextMatePackage extends Package done() loadGrammarAtPath: (grammarPath, done) => - atom.syntax.registry.readGrammar grammarPath, (error, grammar) => - console.log error, grammar + atom.syntax.readGrammar grammarPath, (error, grammar) => if error? console.log("Error loading grammar at path '#{grammarPath}':", error.stack ? error) else @@ -75,7 +74,7 @@ class TextMatePackage extends Package loadGrammarsSync: -> for grammarPath in fs.listSync(@getSyntaxesPath(), @legalGrammarExtensions) - @addGrammar(atom.syntax.registry.readGrammarSync(grammarPath)) + @addGrammar(atom.syntax.readGrammarSync(grammarPath)) addGrammar: (grammar) -> @grammars.push(grammar) From 959401f5a7e044af36583bc41b7454c90f3ed38a Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 11:37:09 -0800 Subject: [PATCH 35/39] Drop TextMate prefix from class names --- src/syntax.coffee | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/syntax.coffee b/src/syntax.coffee index 85c4d7b02..554c02ba7 100644 --- a/src/syntax.coffee +++ b/src/syntax.coffee @@ -1,15 +1,13 @@ _ = require 'underscore-plus' {specificity} = require 'clear-cut' {Subscriber} = require 'emissary' -FirstMate = require 'first-mate' -TextMateScopeSelector = FirstMate.ScopeSelector -TextMateGrammarRegistry = FirstMate.GrammarRegistry +{GrammarRegistry, ScopeSelector} = require 'first-mate' {$, $$} = require './space-pen-extensions' ### Public ### module.exports = -class Syntax extends TextMateGrammarRegistry +class Syntax extends GrammarRegistry Subscriber.includeInto(this) atom.deserializers.add(this) @@ -93,4 +91,4 @@ class Syntax extends TextMateGrammarRegistry element[0] cssSelectorFromScopeSelector: (scopeSelector) -> - new TextMateScopeSelector(scopeSelector).toCssSelector() + new ScopeSelector(scopeSelector).toCssSelector() From 882d7666893e63ec4c1c37e16c544d790ea4b26f Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 12:48:08 -0800 Subject: [PATCH 36/39] Activate/deactivate grammars directly --- src/atom-package.coffee | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/atom-package.coffee b/src/atom-package.coffee index 4349cde9c..aa62359e9 100644 --- a/src/atom-package.coffee +++ b/src/atom-package.coffee @@ -104,7 +104,7 @@ class AtomPackage extends Package atom.keymap.add(keymapPath, map) for [keymapPath, map] in @keymaps atom.contextMenu.add(menuPath, map['context-menu']) for [menuPath, map] in @menus atom.menu.add(map.menu) for [menuPath, map] in @menus when map.menu - atom.syntax.addGrammar(grammar) for grammar in @grammars + grammar.activate() for grammar in @grammars for [scopedPropertiesPath, selector, properties] in @scopedProperties atom.syntax.addProperties(scopedPropertiesPath, selector, properties) @@ -151,7 +151,7 @@ class AtomPackage extends Package @grammars = [] grammarsDirPath = path.join(@path, 'grammars') for grammarPath in fs.listSync(grammarsDirPath, ['.json', '.cson']) - @grammars.push(atom.syntax.loadGrammarSync(grammarPath)) + @grammars.push(atom.syntax.readGrammarSync(grammarPath)) loadScopedProperties: -> @scopedProperties = [] @@ -179,7 +179,7 @@ class AtomPackage extends Package @configActivated = false deactivateResources: -> - atom.syntax.removeGrammar(grammar) for grammar in @grammars + grammar.deactivate() for grammar in @grammars atom.syntax.removeProperties(scopedPropertiesPath) for [scopedPropertiesPath] in @scopedProperties atom.keymap.remove(keymapPath) for [keymapPath] in @keymaps atom.themes.removeStylesheet(stylesheetPath) for [stylesheetPath] in @stylesheets From 9962ce98593120af25a3c7ab04ad32c19def53a8 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 13:42:35 -0800 Subject: [PATCH 37/39] Implement createToken in Syntax --- package.json | 2 +- src/syntax.coffee | 3 +++ src/tokenized-buffer.coffee | 1 - 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 70791dc4d..b1db26b84 100644 --- a/package.json +++ b/package.json @@ -25,7 +25,7 @@ "coffeestack": "0.6.0", "diff": "git://github.com/benogle/jsdiff.git", "emissary": "0.19.0", - "first-mate": "0.7.0", + "first-mate": "0.9.0", "fs-plus": "0.13.0", "fuzzaldrin": "0.1.0", "git-utils": "0.29.0", diff --git a/src/syntax.coffee b/src/syntax.coffee index 554c02ba7..20d69a9d8 100644 --- a/src/syntax.coffee +++ b/src/syntax.coffee @@ -4,6 +4,7 @@ _ = require 'underscore-plus' {GrammarRegistry, ScopeSelector} = require 'first-mate' {$, $$} = require './space-pen-extensions' +Token = require './token' ### Public ### module.exports = @@ -26,6 +27,8 @@ class Syntax extends GrammarRegistry serialize: -> {deserializer: @constructor.name, @grammarOverridesByPath} + createToken: (value, scopes) -> new Token({value, scopes}) + addProperties: (args...) -> name = args.shift() if args.length > 2 [selector, properties] = args diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index e747d3062..ac3cf01b9 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -203,7 +203,6 @@ class TokenizedBuffer extends Model lineEnding = @buffer.lineEndingForRow(row) tabLength = @getTabLength() { tokens, ruleStack } = @grammar.tokenizeLine(line, ruleStack, row is 0) - tokens = (new Token(token) for token in tokens) new TokenizedLine({tokens, ruleStack, tabLength, lineEnding}) # FIXME: benogle says: These are actually buffer rows as all buffer rows are From b5c8e3e1fee25f9a5e2e1d0703f333b044170e96 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 14:13:28 -0800 Subject: [PATCH 38/39] Upgrade to first-mate@0.10.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b1db26b84..19e336360 100644 --- a/package.json +++ b/package.json @@ -25,7 +25,7 @@ "coffeestack": "0.6.0", "diff": "git://github.com/benogle/jsdiff.git", "emissary": "0.19.0", - "first-mate": "0.9.0", + "first-mate": "0.10.0", "fs-plus": "0.13.0", "fuzzaldrin": "0.1.0", "git-utils": "0.29.0", From b9395d2946032a1a62a8b3d1627917ec854bd226 Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Tue, 31 Dec 2013 17:01:23 -0800 Subject: [PATCH 39/39] Add harmony collections when unavailable grunt could be invoked without harmony collections enabled which would previously cause exceptions to be logged when modules requiring them (emissary) were loaded. --- Gruntfile.coffee | 4 ++++ package.json | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Gruntfile.coffee b/Gruntfile.coffee index c42a15b8c..024c01e48 100644 --- a/Gruntfile.coffee +++ b/Gruntfile.coffee @@ -11,6 +11,10 @@ packageJson = require './package.json' # TODO Remove once all repositories are public process.env.ATOM_ACCESS_TOKEN ?= '362295be4c5258d3f7b967bbabae662a455ca2a7' +# Shim harmony collections in case grunt was invoked without harmony +# collections enabled +_.extend(global, require('harmony-collections')) unless global.WeakMap? + module.exports = (grunt) -> if not grunt.option('verbose') grunt.log.writeln = (args...) -> grunt.log diff --git a/package.json b/package.json index 19e336360..f15021933 100644 --- a/package.json +++ b/package.json @@ -71,7 +71,8 @@ "unzip": "~0.1.9", "rcedit": "~0.1.2", "rimraf": "~2.2.2", - "github-releases": "~0.2.0" + "github-releases": "~0.2.0", + "harmony-collections": "~0.3.8" }, "packageDependencies": { "atom-dark-syntax": "0.10.0",