diff --git a/spec/text-editor-component-spec.coffee b/spec/text-editor-component-spec.coffee
index 68cb388e1..2144eb988 100644
--- a/spec/text-editor-component-spec.coffee
+++ b/spec/text-editor-component-spec.coffee
@@ -277,7 +277,7 @@ describe "TextEditorComponent", ->
expect(leafNodes[0].classList.contains('invisible-character')).toBe true
expect(leafNodes[leafNodes.length - 1].classList.contains('invisible-character')).toBe true
- it "displays newlines as their own token outside of the other tokens' scopes", ->
+ it "displays newlines as their own token outside of the other tokens' scopeDescriptor", ->
editor.setText "var\n"
nextAnimationFrame()
expect(component.lineNodeForScreenRow(0).innerHTML).toBe "var#{invisibles.eol}"
diff --git a/spec/text-editor-spec.coffee b/spec/text-editor-spec.coffee
index 68722a90f..98f1bbef8 100644
--- a/spec/text-editor-spec.coffee
+++ b/spec/text-editor-spec.coffee
@@ -3650,10 +3650,10 @@ describe "TextEditor", ->
{tokens} = grammar.tokenizeLine("var i; // http://github.com")
expect(tokens[0].value).toBe "var"
- expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"]
+ expect(tokens[0].scopeDescriptor).toEqual ["source.js", "storage.modifier.js"]
expect(tokens[6].value).toBe "http://github.com"
- expect(tokens[6].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
+ expect(tokens[6].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
describe "when the grammar is added", ->
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
@@ -3665,7 +3665,7 @@ describe "TextEditor", ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[1].value).toBe " http://github.com"
- expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
+ expect(tokens[1].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js"]
waitsForPromise ->
atom.packages.activatePackage('language-hyperlink')
@@ -3673,7 +3673,7 @@ describe "TextEditor", ->
runs ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[2].value).toBe "http://github.com"
- expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
+ expect(tokens[2].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
describe "when the grammar is updated", ->
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
@@ -3685,7 +3685,7 @@ describe "TextEditor", ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
- expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
+ expect(tokens[1].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js"]
waitsForPromise ->
atom.packages.activatePackage('package-with-injection-selector')
@@ -3693,7 +3693,7 @@ describe "TextEditor", ->
runs ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
- expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
+ expect(tokens[1].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js"]
waitsForPromise ->
atom.packages.activatePackage('language-sql')
@@ -3701,7 +3701,7 @@ describe "TextEditor", ->
runs ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[2].value).toBe "SELECT"
- expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"]
+ expect(tokens[2].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"]
describe ".normalizeTabsInBufferRange()", ->
it "normalizes tabs depending on the editor's soft tab/tab length settings", ->
diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee
index 51d48b5eb..626092830 100644
--- a/spec/tokenized-buffer-spec.coffee
+++ b/spec/tokenized-buffer-spec.coffee
@@ -51,12 +51,12 @@ describe "TokenizedBuffer", ->
it "initially creates un-tokenized screen lines, then tokenizes lines chunk at a time in the background", ->
line0 = tokenizedBuffer.tokenizedLineForRow(0)
expect(line0.tokens.length).toBe 1
- expect(line0.tokens[0]).toEqual(value: line0.text, scopes: ['source.js'])
+ expect(line0.tokens[0]).toEqual(value: line0.text, scopeDescriptor: ['source.js'])
line11 = tokenizedBuffer.tokenizedLineForRow(11)
expect(line11.tokens.length).toBe 2
- expect(line11.tokens[0]).toEqual(value: " ", scopes: ['source.js'], isAtomic: true)
- expect(line11.tokens[1]).toEqual(value: "return sort(Array.apply(this, arguments));", scopes: ['source.js'])
+ expect(line11.tokens[0]).toEqual(value: " ", scopeDescriptor: ['source.js'], isAtomic: true)
+ expect(line11.tokens[1]).toEqual(value: "return sort(Array.apply(this, arguments));", scopeDescriptor: ['source.js'])
# background tokenization has not begun
expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack).toBeUndefined()
@@ -149,10 +149,10 @@ describe "TokenizedBuffer", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[0, 0], [2, 0]], "foo()\n7\n")
- expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.brace.round.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1]).toEqual(value: '(', scopeDescriptor: ['source.js', 'meta.brace.round.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: '7', scopeDescriptor: ['source.js', 'constant.numeric.js'])
# line 2 is unchanged
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'if', scopeDescriptor: ['source.js', 'keyword.control.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -164,7 +164,7 @@ describe "TokenizedBuffer", ->
buffer.insert([5, 30], '/* */')
changeHandler.reset()
buffer.insert([2, 0], '/*')
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
@@ -172,9 +172,9 @@ describe "TokenizedBuffer", ->
changeHandler.reset()
advanceClock()
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
@@ -185,23 +185,23 @@ describe "TokenizedBuffer", ->
buffer.insert([5, 0], '*/')
buffer.insert([1, 0], 'var ')
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
describe "when lines are both updated and removed", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[1, 0], [3, 0]], "foo()")
# previous line 0 remains
- expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual(value: 'var', scopes: ['source.js', 'storage.modifier.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual(value: 'var', scopeDescriptor: ['source.js', 'storage.modifier.js'])
# previous line 3 should be combined with input to form line 1
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopeDescriptor: ['source.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[6]).toEqual(value: '=', scopeDescriptor: ['source.js', 'keyword.operator.js'])
# lines below deleted regions should be shifted upward
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[4]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'while', scopeDescriptor: ['source.js', 'keyword.control.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[4]).toEqual(value: '=', scopeDescriptor: ['source.js', 'keyword.operator.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: '<', scopeDescriptor: ['source.js', 'keyword.operator.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -214,8 +214,8 @@ describe "TokenizedBuffer", ->
changeHandler.reset()
buffer.setTextInRange([[2, 0], [3, 0]], '/*')
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
@@ -223,8 +223,8 @@ describe "TokenizedBuffer", ->
changeHandler.reset()
advanceClock()
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
@@ -235,19 +235,19 @@ describe "TokenizedBuffer", ->
buffer.setTextInRange([[1, 0], [2, 0]], "foo()\nbar()\nbaz()\nquux()")
# previous line 0 remains
- expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual( value: 'var', scopes: ['source.js', 'storage.modifier.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual( value: 'var', scopeDescriptor: ['source.js', 'storage.modifier.js'])
# 3 new lines inserted
- expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0]).toEqual(value: 'bar', scopes: ['source.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0]).toEqual(value: 'baz', scopes: ['source.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopeDescriptor: ['source.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0]).toEqual(value: 'bar', scopeDescriptor: ['source.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0]).toEqual(value: 'baz', scopeDescriptor: ['source.js'])
# previous line 2 is joined with quux() on line 4
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0]).toEqual(value: 'quux', scopes: ['source.js'])
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0]).toEqual(value: 'quux', scopeDescriptor: ['source.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: 'if', scopeDescriptor: ['source.js', 'keyword.control.js'])
# previous line 3 is pushed down to become line 5
- expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[4]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.js'])
+ expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[4]).toEqual(value: '=', scopeDescriptor: ['source.js', 'keyword.operator.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -264,17 +264,17 @@ describe "TokenizedBuffer", ->
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
expect(event).toEqual(start: 2, end: 2, delta: 2)
- expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
- expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopeDescriptor).toEqual ['source.js']
changeHandler.reset()
advanceClock() # tokenize invalidated lines in background
- expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(6).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(7).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
- expect(tokenizedBuffer.tokenizedLineForRow(8).tokens[0].scopes).not.toBe ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(6).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(7).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
+ expect(tokenizedBuffer.tokenizedLineForRow(8).tokens[0].scopeDescriptor).not.toBe ['source.js', 'comment.block.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -343,7 +343,7 @@ describe "TokenizedBuffer", ->
expect(tokens[0].value).toBe "#"
expect(tokens[1].value).toBe " Econ 101"
expect(tokens[2].value).toBe tabAsSpaces
- expect(tokens[2].scopes).toEqual tokens[1].scopes
+ expect(tokens[2].scopeDescriptor).toEqual tokens[1].scopeDescriptor
expect(tokens[2].isAtomic).toBeTruthy()
expect(tokens[3].value).toBe ""
@@ -526,7 +526,7 @@ describe "TokenizedBuffer", ->
fullyTokenize(tokenizedBuffer)
{tokens} = tokenizedBuffer.tokenizedLineForRow(0)
- expect(tokens[0]).toEqual value: "
", scopes: ["text.html.ruby"]
+ expect(tokens[0]).toEqual value: "
", scopeDescriptor: ["text.html.ruby"]
waitsForPromise ->
atom.packages.activatePackage('language-html')
@@ -534,7 +534,7 @@ describe "TokenizedBuffer", ->
runs ->
fullyTokenize(tokenizedBuffer)
{tokens} = tokenizedBuffer.tokenizedLineForRow(0)
- expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
+ expect(tokens[0]).toEqual value: '<', scopeDescriptor: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
describe ".tokenForPosition(position)", ->
afterEach ->
@@ -545,9 +545,9 @@ describe "TokenizedBuffer", ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({buffer})
fullyTokenize(tokenizedBuffer)
- expect(tokenizedBuffer.tokenForPosition([1,0]).scopes).toEqual ["source.js"]
- expect(tokenizedBuffer.tokenForPosition([1,1]).scopes).toEqual ["source.js"]
- expect(tokenizedBuffer.tokenForPosition([1,2]).scopes).toEqual ["source.js", "storage.modifier.js"]
+ expect(tokenizedBuffer.tokenForPosition([1,0]).scopeDescriptor).toEqual ["source.js"]
+ expect(tokenizedBuffer.tokenForPosition([1,1]).scopeDescriptor).toEqual ["source.js"]
+ expect(tokenizedBuffer.tokenForPosition([1,2]).scopeDescriptor).toEqual ["source.js", "storage.modifier.js"]
describe ".bufferRangeForScopeAtPosition(selector, position)", ->
beforeEach ->
diff --git a/spec/tokenized-line-spec.coffee b/spec/tokenized-line-spec.coffee
index ec03524be..8604cc336 100644
--- a/spec/tokenized-line-spec.coffee
+++ b/spec/tokenized-line-spec.coffee
@@ -19,16 +19,16 @@ describe "TokenizedLine", ->
expect(editor.tokenizedLineForScreenRow(2).isOnlyWhitespace()).toBe false
describe "::getScopeTree()", ->
- it "returns a tree whose inner nodes are scopes and whose leaf nodes are tokens in those scopes", ->
+ it "returns a tree whose inner nodes are scopeDescriptor and whose leaf nodes are tokens in those scopeDescriptor", ->
[tokens, tokenIndex] = []
- ensureValidScopeTree = (scopeTree, scopes=[]) ->
+ ensureValidScopeTree = (scopeTree, scopeDescriptor=[]) ->
if scopeTree.children?
for child in scopeTree.children
- ensureValidScopeTree(child, scopes.concat([scopeTree.scope]))
+ ensureValidScopeTree(child, scopeDescriptor.concat([scopeTree.scope]))
else
expect(scopeTree).toBe tokens[tokenIndex++]
- expect(scopes).toEqual scopeTree.scopes
+ expect(scopeDescriptor).toEqual scopeTree.scopeDescriptor
waitsForPromise ->
atom.project.open('coffee.coffee').then (o) -> editor = o
diff --git a/src/display-buffer.coffee b/src/display-buffer.coffee
index 4a4fbf709..d64af3270 100644
--- a/src/display-buffer.coffee
+++ b/src/display-buffer.coffee
@@ -650,7 +650,7 @@ class DisplayBuffer extends Model
left = 0
column = 0
for token in @tokenizedLineForScreenRow(targetRow).tokens
- charWidths = @getScopedCharWidths(token.scopes)
+ charWidths = @getScopedCharWidths(token.scopeDescriptor)
for char in token.value
return {top, left} if column is targetColumn
left += charWidths[char] ? defaultCharWidth unless char is '\0'
@@ -668,7 +668,7 @@ class DisplayBuffer extends Model
left = 0
column = 0
for token in @tokenizedLineForScreenRow(row).tokens
- charWidths = @getScopedCharWidths(token.scopes)
+ charWidths = @getScopedCharWidths(token.scopeDescriptor)
for char in token.value
charWidth = charWidths[char] ? defaultCharWidth
break if targetLeft <= left + (charWidth / 2)
@@ -752,7 +752,7 @@ class DisplayBuffer extends Model
[bufferRow] = @rowMap.bufferRowRangeForScreenRow(row)
new Point(bufferRow, @screenLines[row].bufferColumnForScreenColumn(column))
- # Retrieves the grammar's token scopes for a buffer position.
+ # Retrieves the grammar's token scopeDescriptor for a buffer position.
#
# bufferPosition - A {Point} in the {TextBuffer}
#
diff --git a/src/language-mode.coffee b/src/language-mode.coffee
index b92cdefed..6716ac836 100644
--- a/src/language-mode.coffee
+++ b/src/language-mode.coffee
@@ -29,8 +29,8 @@ class LanguageMode
#
# Returns an {Array} of the commented {Ranges}.
toggleLineCommentsForBufferRows: (start, end) ->
- scopes = @editor.scopeDescriptorForBufferPosition([start, 0])
- properties = atom.config.settingsForScopeDescriptor(scopes, 'editor.commentStart')[0]
+ scopeDescriptor = @editor.scopeDescriptorForBufferPosition([start, 0])
+ properties = atom.config.settingsForScopeDescriptor(scopeDescriptor, 'editor.commentStart')[0]
return unless properties
commentStartString = _.valueForKeyPath(properties, 'editor.commentStart')
@@ -168,12 +168,12 @@ class LanguageMode
return null unless @isFoldableAtBufferRow(bufferRow)
startIndentLevel = @editor.indentationForBufferRow(bufferRow)
- scopes = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
+ scopeDescriptor = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
for row in [(bufferRow + 1)..@editor.getLastBufferRow()]
continue if @editor.isBufferRowBlank(row)
indentation = @editor.indentationForBufferRow(row)
if indentation <= startIndentLevel
- includeRowInFold = indentation == startIndentLevel and @foldEndRegexForScopes(scopes)?.searchSync(@editor.lineTextForBufferRow(row))
+ includeRowInFold = indentation == startIndentLevel and @foldEndRegexForScopeDescriptor(scopeDescriptor)?.searchSync(@editor.lineTextForBufferRow(row))
foldEndRow = row if includeRowInFold
break
@@ -246,8 +246,8 @@ class LanguageMode
# Returns a {Number}.
suggestedIndentForBufferRow: (bufferRow) ->
currentIndentLevel = @editor.indentationForBufferRow(bufferRow)
- scopes = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
- return currentIndentLevel unless increaseIndentRegex = @increaseIndentRegexForScopes(scopes)
+ scopeDescriptor = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
+ return currentIndentLevel unless increaseIndentRegex = @increaseIndentRegexForScopeDescriptor(scopeDescriptor)
currentLine = @buffer.lineForRow(bufferRow)
precedingRow = if bufferRow > 0 then bufferRow - 1 else null
@@ -257,7 +257,7 @@ class LanguageMode
desiredIndentLevel = @editor.indentationForBufferRow(precedingRow)
desiredIndentLevel += 1 if increaseIndentRegex.testSync(precedingLine) and not @editor.isBufferRowCommented(precedingRow)
- return desiredIndentLevel unless decreaseIndentRegex = @decreaseIndentRegexForScopes(scopes)
+ return desiredIndentLevel unless decreaseIndentRegex = @decreaseIndentRegexForScopeDescriptor(scopeDescriptor)
desiredIndentLevel -= 1 if decreaseIndentRegex.testSync(currentLine)
Math.max(desiredIndentLevel, 0)
@@ -292,9 +292,9 @@ class LanguageMode
#
# bufferRow - The row {Number}
autoDecreaseIndentForBufferRow: (bufferRow) ->
- scopes = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
- increaseIndentRegex = @increaseIndentRegexForScopes(scopes)
- decreaseIndentRegex = @decreaseIndentRegexForScopes(scopes)
+ scopeDescriptor = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
+ increaseIndentRegex = @increaseIndentRegexForScopeDescriptor(scopeDescriptor)
+ decreaseIndentRegex = @decreaseIndentRegexForScopeDescriptor(scopeDescriptor)
return unless increaseIndentRegex and decreaseIndentRegex
line = @buffer.lineForRow(bufferRow)
@@ -311,15 +311,15 @@ class LanguageMode
if desiredIndentLevel >= 0 and desiredIndentLevel < currentIndentLevel
@editor.setIndentationForBufferRow(bufferRow, desiredIndentLevel)
- getRegexForProperty: (scopes, property) ->
- if pattern = atom.config.get(scopes, property)
+ getRegexForProperty: (scopeDescriptor, property) ->
+ if pattern = atom.config.get(scopeDescriptor, property)
new OnigRegExp(pattern)
- increaseIndentRegexForScopes: (scopes) ->
- @getRegexForProperty(scopes, 'editor.increaseIndentPattern')
+ increaseIndentRegexForScopeDescriptor: (scopeDescriptor) ->
+ @getRegexForProperty(scopeDescriptor, 'editor.increaseIndentPattern')
- decreaseIndentRegexForScopes: (scopes) ->
- @getRegexForProperty(scopes, 'editor.decreaseIndentPattern')
+ decreaseIndentRegexForScopeDescriptor: (scopeDescriptor) ->
+ @getRegexForProperty(scopeDescriptor, 'editor.decreaseIndentPattern')
- foldEndRegexForScopes: (scopes) ->
- @getRegexForProperty(scopes, 'editor.foldEndPattern')
+ foldEndRegexForScopeDescriptor: (scopeDescriptor) ->
+ @getRegexForProperty(scopeDescriptor, 'editor.foldEndPattern')
diff --git a/src/lines-component.coffee b/src/lines-component.coffee
index 8ccc6dcdd..911850129 100644
--- a/src/lines-component.coffee
+++ b/src/lines-component.coffee
@@ -200,7 +200,7 @@ LinesComponent = React.createClass
firstTrailingWhitespacePosition = text.search(/\s*$/)
lineIsWhitespaceOnly = firstTrailingWhitespacePosition is 0
for token in tokens
- innerHTML += @updateScopeStack(scopeStack, token.scopes)
+ innerHTML += @updateScopeStack(scopeStack, token.scopeDescriptor)
hasIndentGuide = not mini and showIndentGuide and (token.hasLeadingWhitespace() or (token.hasTrailingWhitespace() and lineIsWhitespaceOnly))
innerHTML += token.getValueAsHtml({hasIndentGuide})
@@ -217,20 +217,20 @@ LinesComponent = React.createClass
html += "#{invisible}"
html
- updateScopeStack: (scopeStack, desiredScopes) ->
+ updateScopeStack: (scopeStack, desiredScopeDescriptor) ->
html = ""
# Find a common prefix
- for scope, i in desiredScopes
- break unless scopeStack[i] is desiredScopes[i]
+ for scope, i in desiredScopeDescriptor
+ break unless scopeStack[i] is desiredScopeDescriptor[i]
- # Pop scopes until we're at the common prefx
+ # Pop scopeDescriptor until we're at the common prefx
until scopeStack.length is i
html += @popScope(scopeStack)
- # Push onto common prefix until scopeStack equals desiredScopes
- for j in [i...desiredScopes.length]
- html += @pushScope(scopeStack, desiredScopes[j])
+ # Push onto common prefix until scopeStack equals desiredScopeDescriptor
+ for j in [i...desiredScopeDescriptor.length]
+ html += @pushScope(scopeStack, desiredScopeDescriptor[j])
html
@@ -308,8 +308,8 @@ LinesComponent = React.createClass
iterator = null
charIndex = 0
- for {value, scopes}, tokenIndex in tokenizedLine.tokens
- charWidths = editor.getScopedCharWidths(scopes)
+ for {value, scopeDescriptor}, tokenIndex in tokenizedLine.tokens
+ charWidths = editor.getScopedCharWidths(scopeDescriptor)
for char in value
continue if char is '\0'
@@ -331,7 +331,7 @@ LinesComponent = React.createClass
rangeForMeasurement.setStart(textNode, i)
rangeForMeasurement.setEnd(textNode, i + 1)
charWidth = rangeForMeasurement.getBoundingClientRect().width
- editor.setScopedCharWidth(scopes, char, charWidth)
+ editor.setScopedCharWidth(scopeDescriptor, char, charWidth)
charIndex++
diff --git a/src/syntax.coffee b/src/syntax.coffee
index 8c04a424a..f14f34dad 100644
--- a/src/syntax.coffee
+++ b/src/syntax.coffee
@@ -32,7 +32,7 @@ class Syntax extends GrammarRegistry
serialize: ->
{deserializer: @constructor.name, @grammarOverridesByPath}
- createToken: (value, scopes) -> new Token({value, scopes})
+ createToken: (value, scopeDescriptor) -> new Token({value, scopeDescriptor})
# Deprecated: Used by settings-view to display snippets for packages
@::accessor 'propertyStore', ->
diff --git a/src/text-editor.coffee b/src/text-editor.coffee
index b4a70d235..62bfe3206 100644
--- a/src/text-editor.coffee
+++ b/src/text-editor.coffee
@@ -2376,7 +2376,7 @@ class TextEditor extends Model
getRootScopeDescriptor: ->
@displayBuffer.getRootScopeDescriptor()
- # Essential: Get the syntactic scopes for the given position in buffer
+ # Essential: Get the syntactic scopeDescriptor for the given position in buffer
# coordinates. Useful with {Config::get}.
#
# For example, if called with a position inside the parameter list of an
@@ -2404,9 +2404,9 @@ class TextEditor extends Model
# Extended: Determine if the given row is entirely a comment
isBufferRowCommented: (bufferRow) ->
if match = @lineTextForBufferRow(bufferRow).match(/\S/)
- scopes = @tokenForBufferPosition([bufferRow, match.index]).scopes
+ scopeDescriptor = @tokenForBufferPosition([bufferRow, match.index]).scopeDescriptor
@commentScopeSelector ?= new TextMateScopeSelector('comment.*')
- @commentScopeSelector.matches(scopes)
+ @commentScopeSelector.matches(scopeDescriptor)
logCursorScope: ->
console.log @getLastCursor().getScopeDescriptor()
@@ -2416,10 +2416,7 @@ class TextEditor extends Model
scopesAtCursor: ->
deprecate 'Use editor.getLastCursor().scopesAtCursor() instead'
- if cursor = @getLastCursor()
- cursor.getScopes()
- else
- @getRootScopeDescriptor()
+ cursor.getScopeDescriptor()
getCursorScopes: ->
deprecate 'Use editor.getLastCursor().scopesAtCursor() instead'
@scopesAtCursor()
diff --git a/src/token.coffee b/src/token.coffee
index a36117ea8..ae1bc11b2 100644
--- a/src/token.coffee
+++ b/src/token.coffee
@@ -13,27 +13,27 @@ module.exports =
class Token
value: null
hasPairedCharacter: false
- scopes: null
+ scopeDescriptor: null
isAtomic: null
isHardTab: null
firstNonWhitespaceIndex: null
firstTrailingWhitespaceIndex: null
hasInvisibleCharacters: false
- constructor: ({@value, @scopes, @isAtomic, @bufferDelta, @isHardTab}) ->
+ constructor: ({@value, @scopeDescriptor, @isAtomic, @bufferDelta, @isHardTab}) ->
@screenDelta = @value.length
@bufferDelta ?= @screenDelta
@hasPairedCharacter = textUtils.hasPairedCharacter(@value)
isEqual: (other) ->
- @value == other.value and _.isEqual(@scopes, other.scopes) and !!@isAtomic == !!other.isAtomic
+ @value == other.value and _.isEqual(@scopeDescriptor, other.scopeDescriptor) and !!@isAtomic == !!other.isAtomic
isBracket: ->
- /^meta\.brace\b/.test(_.last(@scopes))
+ /^meta\.brace\b/.test(_.last(@scopeDescriptor))
splitAt: (splitIndex) ->
- leftToken = new Token(value: @value.substring(0, splitIndex), scopes: @scopes)
- rightToken = new Token(value: @value.substring(splitIndex), scopes: @scopes)
+ leftToken = new Token(value: @value.substring(0, splitIndex), scopeDescriptor: @scopeDescriptor)
+ rightToken = new Token(value: @value.substring(splitIndex), scopeDescriptor: @scopeDescriptor)
if @firstNonWhitespaceIndex?
leftToken.firstNonWhitespaceIndex = Math.min(splitIndex, @firstNonWhitespaceIndex)
@@ -92,7 +92,7 @@ class Token
else
breakOutLeadingSoftTabs = false
value = match[0]
- token = new Token({value, @scopes})
+ token = new Token({value, @scopeDescriptor})
column += token.value.length
outputTokens.push(token)
@@ -106,7 +106,7 @@ class Token
while index < @value.length
if textUtils.isPairedCharacter(@value, index)
if nonPairStart isnt index
- outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes}))
+ outputTokens.push(new Token({value: @value[nonPairStart...index], @scopeDescriptor}))
outputTokens.push(@buildPairedCharacterToken(@value, index))
index += 2
nonPairStart = index
@@ -114,14 +114,14 @@ class Token
index++
if nonPairStart isnt index
- outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes}))
+ outputTokens.push(new Token({value: @value[nonPairStart...index], @scopeDescriptor}))
outputTokens
buildPairedCharacterToken: (value, index) ->
new Token(
value: value[index..index + 1]
- scopes: @scopes
+ scopeDescriptor: @scopeDescriptor
isAtomic: true
)
@@ -135,7 +135,7 @@ class Token
tabStop = tabLength - (column % tabLength)
new Token(
value: _.multiplyString(" ", tabStop)
- scopes: @scopes
+ scopeDescriptor: @scopeDescriptor
bufferDelta: if isHardTab then 1 else tabStop
isAtomic: true
isHardTab: isHardTab
@@ -146,7 +146,7 @@ class Token
matchesScopeSelector: (selector) ->
targetClasses = selector.replace(StartDotRegex, '').split('.')
- _.any @scopes, (scope) ->
+ _.any @scopeDescriptor, (scope) ->
scopeClasses = scope.split('.')
_.isSubset(targetClasses, scopeClasses)
diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee
index 650581719..213c63364 100644
--- a/src/tokenized-buffer.coffee
+++ b/src/tokenized-buffer.coffee
@@ -105,7 +105,7 @@ class TokenizedBuffer extends Model
hasTokenForSelector: (selector) ->
for {tokens} in @tokenizedLines
for token in tokens
- return true if selector.matches(token.scopes)
+ return true if selector.matches(token.scopeDescriptor)
false
retokenizeLines: ->
@@ -247,7 +247,7 @@ class TokenizedBuffer extends Model
buildPlaceholderTokenizedLineForRow: (row) ->
line = @buffer.lineForRow(row)
- tokens = [new Token(value: line, scopes: [@grammar.scopeName])]
+ tokens = [new Token(value: line, scopeDescriptor: [@grammar.scopeName])]
tabLength = @getTabLength()
indentLevel = @indentLevelForRow(row)
lineEnding = @buffer.lineEndingForRow(row)
@@ -303,7 +303,7 @@ class TokenizedBuffer extends Model
0
scopeDescriptorForPosition: (position) ->
- @tokenForPosition(position).scopes
+ @tokenForPosition(position).scopeDescriptor
tokenForPosition: (position) ->
{row, column} = Point.fromObject(position)
diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee
index f9fa9f004..ddf91578d 100644
--- a/src/tokenized-line.coffee
+++ b/src/tokenized-line.coffee
@@ -194,9 +194,9 @@ class TokenizedLine
isComment: ->
for token in @tokens
- continue if token.scopes.length is 1
+ continue if token.scopeDescriptor.length is 1
continue if token.isOnlyWhitespace()
- for scope in token.scopes
+ for scope in token.scopeDescriptor
return true if _.contains(scope.split('.'), 'comment')
break
false
@@ -226,26 +226,26 @@ class TokenizedLine
scopeStack = []
for token in @tokens
- @updateScopeStack(scopeStack, token.scopes)
+ @updateScopeStack(scopeStack, token.scopeDescriptor)
_.last(scopeStack).children.push(token)
@scopeTree = scopeStack[0]
@updateScopeStack(scopeStack, [])
@scopeTree
- updateScopeStack: (scopeStack, desiredScopes) ->
+ updateScopeStack: (scopeStack, desiredScopeDescriptor) ->
# Find a common prefix
- for scope, i in desiredScopes
- break unless scopeStack[i]?.scope is desiredScopes[i]
+ for scope, i in desiredScopeDescriptor
+ break unless scopeStack[i]?.scope is desiredScopeDescriptor[i]
- # Pop scopes until we're at the common prefx
+ # Pop scopeDescriptor until we're at the common prefx
until scopeStack.length is i
poppedScope = scopeStack.pop()
_.last(scopeStack)?.children.push(poppedScope)
- # Push onto common prefix until scopeStack equals desiredScopes
- for j in [i...desiredScopes.length]
- scopeStack.push(new Scope(desiredScopes[j]))
+ # Push onto common prefix until scopeStack equals desiredScopeDescriptor
+ for j in [i...desiredScopeDescriptor.length]
+ scopeStack.push(new Scope(desiredScopeDescriptor[j]))
class Scope
constructor: (@scope) ->