Merge pull request #3817 from atom/bo-rename-scopes

Rename scopes -> scopeDescriptor
This commit is contained in:
Ben Ogle
2014-10-13 17:25:49 -07:00
14 changed files with 172 additions and 152 deletions

View File

@@ -277,7 +277,7 @@ describe "TextEditorComponent", ->
expect(leafNodes[0].classList.contains('invisible-character')).toBe true
expect(leafNodes[leafNodes.length - 1].classList.contains('invisible-character')).toBe true
it "displays newlines as their own token outside of the other tokens' scopes", ->
it "displays newlines as their own token outside of the other tokens' scopeDescriptor", ->
editor.setText "var\n"
nextAnimationFrame()
expect(component.lineNodeForScreenRow(0).innerHTML).toBe "<span class=\"source js\"><span class=\"storage modifier js\">var</span></span><span class=\"invisible-character\">#{invisibles.eol}</span>"

View File

@@ -3650,10 +3650,10 @@ describe "TextEditor", ->
{tokens} = grammar.tokenizeLine("var i; // http://github.com")
expect(tokens[0].value).toBe "var"
expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"]
expect(tokens[0].scopeDescriptor).toEqual ["source.js", "storage.modifier.js"]
expect(tokens[6].value).toBe "http://github.com"
expect(tokens[6].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
expect(tokens[6].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
describe "when the grammar is added", ->
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
@@ -3665,7 +3665,7 @@ describe "TextEditor", ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[1].value).toBe " http://github.com"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
expect(tokens[1].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js"]
waitsForPromise ->
atom.packages.activatePackage('language-hyperlink')
@@ -3673,7 +3673,7 @@ describe "TextEditor", ->
runs ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[2].value).toBe "http://github.com"
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
expect(tokens[2].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
describe "when the grammar is updated", ->
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
@@ -3685,7 +3685,7 @@ describe "TextEditor", ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
expect(tokens[1].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js"]
waitsForPromise ->
atom.packages.activatePackage('package-with-injection-selector')
@@ -3693,7 +3693,7 @@ describe "TextEditor", ->
runs ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
expect(tokens[1].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js"]
waitsForPromise ->
atom.packages.activatePackage('language-sql')
@@ -3701,7 +3701,7 @@ describe "TextEditor", ->
runs ->
{tokens} = editor.tokenizedLineForScreenRow(0)
expect(tokens[2].value).toBe "SELECT"
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"]
expect(tokens[2].scopeDescriptor).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"]
describe ".normalizeTabsInBufferRange()", ->
it "normalizes tabs depending on the editor's soft tab/tab length settings", ->

View File

@@ -51,12 +51,12 @@ describe "TokenizedBuffer", ->
it "initially creates un-tokenized screen lines, then tokenizes lines chunk at a time in the background", ->
line0 = tokenizedBuffer.tokenizedLineForRow(0)
expect(line0.tokens.length).toBe 1
expect(line0.tokens[0]).toEqual(value: line0.text, scopes: ['source.js'])
expect(line0.tokens[0]).toEqual(value: line0.text, scopeDescriptor: ['source.js'])
line11 = tokenizedBuffer.tokenizedLineForRow(11)
expect(line11.tokens.length).toBe 2
expect(line11.tokens[0]).toEqual(value: " ", scopes: ['source.js'], isAtomic: true)
expect(line11.tokens[1]).toEqual(value: "return sort(Array.apply(this, arguments));", scopes: ['source.js'])
expect(line11.tokens[0]).toEqual(value: " ", scopeDescriptor: ['source.js'], isAtomic: true)
expect(line11.tokens[1]).toEqual(value: "return sort(Array.apply(this, arguments));", scopeDescriptor: ['source.js'])
# background tokenization has not begun
expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack).toBeUndefined()
@@ -149,10 +149,10 @@ describe "TokenizedBuffer", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[0, 0], [2, 0]], "foo()\n7\n")
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1]).toEqual(value: '(', scopes: ['source.js', 'meta.brace.round.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: '7', scopes: ['source.js', 'constant.numeric.js'])
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[1]).toEqual(value: '(', scopeDescriptor: ['source.js', 'meta.brace.round.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: '7', scopeDescriptor: ['source.js', 'constant.numeric.js'])
# line 2 is unchanged
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'if', scopeDescriptor: ['source.js', 'keyword.control.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -164,7 +164,7 @@ describe "TokenizedBuffer", ->
buffer.insert([5, 30], '/* */')
changeHandler.reset()
buffer.insert([2, 0], '/*')
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
@@ -172,9 +172,9 @@ describe "TokenizedBuffer", ->
changeHandler.reset()
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
@@ -185,23 +185,23 @@ describe "TokenizedBuffer", ->
buffer.insert([5, 0], '*/')
buffer.insert([1, 0], 'var ')
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
describe "when lines are both updated and removed", ->
it "updates tokens to reflect the change", ->
buffer.setTextInRange([[1, 0], [3, 0]], "foo()")
# previous line 0 remains
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual(value: 'var', scopes: ['source.js', 'storage.modifier.js'])
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual(value: 'var', scopeDescriptor: ['source.js', 'storage.modifier.js'])
# previous line 3 should be combined with input to form line 1
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[6]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopeDescriptor: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[6]).toEqual(value: '=', scopeDescriptor: ['source.js', 'keyword.operator.js'])
# lines below deleted regions should be shifted upward
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'while', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[4]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: '<', scopes: ['source.js', 'keyword.operator.js'])
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[2]).toEqual(value: 'while', scopeDescriptor: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[4]).toEqual(value: '=', scopeDescriptor: ['source.js', 'keyword.operator.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: '<', scopeDescriptor: ['source.js', 'keyword.operator.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -214,8 +214,8 @@ describe "TokenizedBuffer", ->
changeHandler.reset()
buffer.setTextInRange([[2, 0], [3, 0]], '/*')
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js']
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
@@ -223,8 +223,8 @@ describe "TokenizedBuffer", ->
changeHandler.reset()
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
@@ -235,19 +235,19 @@ describe "TokenizedBuffer", ->
buffer.setTextInRange([[1, 0], [2, 0]], "foo()\nbar()\nbaz()\nquux()")
# previous line 0 remains
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual( value: 'var', scopes: ['source.js', 'storage.modifier.js'])
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0]).toEqual( value: 'var', scopeDescriptor: ['source.js', 'storage.modifier.js'])
# 3 new lines inserted
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopes: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0]).toEqual(value: 'bar', scopes: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0]).toEqual(value: 'baz', scopes: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0]).toEqual(value: 'foo', scopeDescriptor: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0]).toEqual(value: 'bar', scopeDescriptor: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0]).toEqual(value: 'baz', scopeDescriptor: ['source.js'])
# previous line 2 is joined with quux() on line 4
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0]).toEqual(value: 'quux', scopes: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: 'if', scopes: ['source.js', 'keyword.control.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0]).toEqual(value: 'quux', scopeDescriptor: ['source.js'])
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[4]).toEqual(value: 'if', scopeDescriptor: ['source.js', 'keyword.control.js'])
# previous line 3 is pushed down to become line 5
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[4]).toEqual(value: '=', scopes: ['source.js', 'keyword.operator.js'])
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[4]).toEqual(value: '=', scopeDescriptor: ['source.js', 'keyword.operator.js'])
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -264,17 +264,17 @@ describe "TokenizedBuffer", ->
[event] = changeHandler.argsForCall[0]
delete event.bufferChange
expect(event).toEqual(start: 2, end: 2, delta: 2)
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopes).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js']
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js', 'punctuation.definition.comment.js']
expect(tokenizedBuffer.tokenizedLineForRow(3).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(4).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopeDescriptor).toEqual ['source.js']
changeHandler.reset()
advanceClock() # tokenize invalidated lines in background
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(6).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(7).tokens[0].scopes).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(8).tokens[0].scopes).not.toBe ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(6).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(7).tokens[0].scopeDescriptor).toEqual ['source.js', 'comment.block.js']
expect(tokenizedBuffer.tokenizedLineForRow(8).tokens[0].scopeDescriptor).not.toBe ['source.js', 'comment.block.js']
expect(changeHandler).toHaveBeenCalled()
[event] = changeHandler.argsForCall[0]
@@ -343,7 +343,7 @@ describe "TokenizedBuffer", ->
expect(tokens[0].value).toBe "#"
expect(tokens[1].value).toBe " Econ 101"
expect(tokens[2].value).toBe tabAsSpaces
expect(tokens[2].scopes).toEqual tokens[1].scopes
expect(tokens[2].scopeDescriptor).toEqual tokens[1].scopeDescriptor
expect(tokens[2].isAtomic).toBeTruthy()
expect(tokens[3].value).toBe ""
@@ -526,7 +526,7 @@ describe "TokenizedBuffer", ->
fullyTokenize(tokenizedBuffer)
{tokens} = tokenizedBuffer.tokenizedLineForRow(0)
expect(tokens[0]).toEqual value: "<div class='name'>", scopes: ["text.html.ruby"]
expect(tokens[0]).toEqual value: "<div class='name'>", scopeDescriptor: ["text.html.ruby"]
waitsForPromise ->
atom.packages.activatePackage('language-html')
@@ -534,7 +534,7 @@ describe "TokenizedBuffer", ->
runs ->
fullyTokenize(tokenizedBuffer)
{tokens} = tokenizedBuffer.tokenizedLineForRow(0)
expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
expect(tokens[0]).toEqual value: '<', scopeDescriptor: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
describe ".tokenForPosition(position)", ->
afterEach ->
@@ -545,9 +545,9 @@ describe "TokenizedBuffer", ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({buffer})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenForPosition([1,0]).scopes).toEqual ["source.js"]
expect(tokenizedBuffer.tokenForPosition([1,1]).scopes).toEqual ["source.js"]
expect(tokenizedBuffer.tokenForPosition([1,2]).scopes).toEqual ["source.js", "storage.modifier.js"]
expect(tokenizedBuffer.tokenForPosition([1,0]).scopeDescriptor).toEqual ["source.js"]
expect(tokenizedBuffer.tokenForPosition([1,1]).scopeDescriptor).toEqual ["source.js"]
expect(tokenizedBuffer.tokenForPosition([1,2]).scopeDescriptor).toEqual ["source.js", "storage.modifier.js"]
describe ".bufferRangeForScopeAtPosition(selector, position)", ->
beforeEach ->

View File

@@ -19,16 +19,16 @@ describe "TokenizedLine", ->
expect(editor.tokenizedLineForScreenRow(2).isOnlyWhitespace()).toBe false
describe "::getScopeTree()", ->
it "returns a tree whose inner nodes are scopes and whose leaf nodes are tokens in those scopes", ->
it "returns a tree whose inner nodes are scopeDescriptor and whose leaf nodes are tokens in those scopeDescriptor", ->
[tokens, tokenIndex] = []
ensureValidScopeTree = (scopeTree, scopes=[]) ->
ensureValidScopeTree = (scopeTree, scopeDescriptor=[]) ->
if scopeTree.children?
for child in scopeTree.children
ensureValidScopeTree(child, scopes.concat([scopeTree.scope]))
ensureValidScopeTree(child, scopeDescriptor.concat([scopeTree.scope]))
else
expect(scopeTree).toBe tokens[tokenIndex++]
expect(scopes).toEqual scopeTree.scopes
expect(scopeDescriptor).toEqual scopeTree.scopeDescriptor
waitsForPromise ->
atom.project.open('coffee.coffee').then (o) -> editor = o

View File

@@ -337,7 +337,7 @@ class Config
#
# * `scopeDescriptor` (optional) {Array} of {String}s describing a path from
# the root of the syntax tree to a token. Get one by calling
# {TextEditor::scopesAtCursor}. See {::get} for examples.
# {editor.getLastCursor().getScopeDescriptor()}. See {::get} for examples.
# * `keyPath` {String} name of the key to observe
# * `callback` {Function} to call when the value of the key changes.
# * `value` the new value of the key
@@ -373,7 +373,7 @@ class Config
#
# * `scopeDescriptor` (optional) {Array} of {String}s describing a path from
# the root of the syntax tree to a token. Get one by calling
# {TextEditor::scopesAtCursor}. See {::get} for examples.
# {editor.getLastCursor().getScopeDescriptor()}. See {::get} for examples.
# * `keyPath` (optional) {String} name of the key to observe. Must be
# specified if `scopeDescriptor` is specified.
# * `callback` {Function} to call when the value of the key changes.
@@ -425,16 +425,24 @@ class Config
# atom.config.get(['source.ruby'], 'editor.tabLength') # => 2
# ```
#
# You can get the language scope descriptor via
# {TextEditor::getRootScopeDescriptor}. This will get the setting specifically
# for the editor's language.
#
# ```coffee
# atom.config.get(@editor.getRootScopeDescriptor(), 'editor.tabLength') # => 2
# ```
#
# Additionally, you can get the setting at the specific cursor position.
#
# ```coffee
# scopeDescriptor = @editor.scopesAtCursor()
# scopeDescriptor = @editor.getLastCursor().getScopeDescriptor()
# atom.config.get(scopeDescriptor, 'editor.tabLength') # => 2
# ```
#
# * `scopeDescriptor` (optional) {Array} of {String}s describing a path from
# the root of the syntax tree to a token. Get one by calling
# {TextEditor::scopesAtCursor}
# {editor.getLastCursor().getScopeDescriptor()}
# * `keyPath` The {String} name of the key to retrieve.
#
# Returns the value from Atom's default settings, the user's configuration

View File

@@ -198,7 +198,7 @@ class Cursor extends Model
[before, after] = @editor.getTextInBufferRange(range)
return false if /\s/.test(before) or /\s/.test(after)
nonWordCharacters = atom.config.get(@getScopes(), 'editor.nonWordCharacters').split('')
nonWordCharacters = atom.config.get(@getScopeDescriptor(), 'editor.nonWordCharacters').split('')
_.contains(nonWordCharacters, before) isnt _.contains(nonWordCharacters, after)
# Public: Returns whether this cursor is between a word's start and end.
@@ -214,11 +214,14 @@ class Cursor extends Model
else
@getBufferColumn()
# Public: Retrieves the grammar's token scopes for the line.
# Public: Retrieves the scope descriptor for the cursor's current position.
#
# Returns an {Array} of {String}s.
getScopeDescriptor: ->
@editor.scopeDescriptorForBufferPosition(@getBufferPosition())
getScopes: ->
@editor.scopesForBufferPosition(@getBufferPosition())
Grim.deprecate 'Use Cursor::getScopeDescriptor() instead'
@getScopeDescriptor()
# Public: Returns true if this cursor has no non-whitespace characters before
# its current position.
@@ -617,7 +620,7 @@ class Cursor extends Model
# Returns a {RegExp}.
wordRegExp: ({includeNonWordCharacters}={}) ->
includeNonWordCharacters ?= true
nonWordCharacters = atom.config.get(@getScopes(), 'editor.nonWordCharacters')
nonWordCharacters = atom.config.get(@getScopeDescriptor(), 'editor.nonWordCharacters')
segments = ["^[\t ]*$"]
segments.push("[^\\s#{_.escapeRegExp(nonWordCharacters)}]+")
if includeNonWordCharacters

View File

@@ -650,7 +650,7 @@ class DisplayBuffer extends Model
left = 0
column = 0
for token in @tokenizedLineForScreenRow(targetRow).tokens
charWidths = @getScopedCharWidths(token.scopes)
charWidths = @getScopedCharWidths(token.scopeDescriptor)
for char in token.value
return {top, left} if column is targetColumn
left += charWidths[char] ? defaultCharWidth unless char is '\0'
@@ -668,7 +668,7 @@ class DisplayBuffer extends Model
left = 0
column = 0
for token in @tokenizedLineForScreenRow(row).tokens
charWidths = @getScopedCharWidths(token.scopes)
charWidths = @getScopedCharWidths(token.scopeDescriptor)
for char in token.value
charWidth = charWidths[char] ? defaultCharWidth
break if targetLeft <= left + (charWidth / 2)
@@ -752,13 +752,13 @@ class DisplayBuffer extends Model
[bufferRow] = @rowMap.bufferRowRangeForScreenRow(row)
new Point(bufferRow, @screenLines[row].bufferColumnForScreenColumn(column))
# Retrieves the grammar's token scopes for a buffer position.
# Retrieves the grammar's token scopeDescriptor for a buffer position.
#
# bufferPosition - A {Point} in the {TextBuffer}
#
# Returns an {Array} of {String}s.
scopesForBufferPosition: (bufferPosition) ->
@tokenizedBuffer.scopesForPosition(bufferPosition)
scopeDescriptorForBufferPosition: (bufferPosition) ->
@tokenizedBuffer.scopeDescriptorForPosition(bufferPosition)
bufferRangeForScopeAtPosition: (selector, position) ->
@tokenizedBuffer.bufferRangeForScopeAtPosition(selector, position)
@@ -1061,7 +1061,7 @@ class DisplayBuffer extends Model
console.log row, @bufferRowForScreenRow(row), line, line.length
getRootScopeDescriptor: ->
@tokenizedBuffer.grammarScopeDescriptor
@tokenizedBuffer.rootScopeDescriptor
handleTokenizedBufferChange: (tokenizedBufferChange) =>
{start, end, delta, bufferChange} = tokenizedBufferChange

View File

@@ -29,8 +29,8 @@ class LanguageMode
#
# Returns an {Array} of the commented {Ranges}.
toggleLineCommentsForBufferRows: (start, end) ->
scopes = @editor.scopesForBufferPosition([start, 0])
properties = atom.config.settingsForScopeDescriptor(scopes, 'editor.commentStart')[0]
scopeDescriptor = @editor.scopeDescriptorForBufferPosition([start, 0])
properties = atom.config.settingsForScopeDescriptor(scopeDescriptor, 'editor.commentStart')[0]
return unless properties
commentStartString = _.valueForKeyPath(properties, 'editor.commentStart')
@@ -168,12 +168,12 @@ class LanguageMode
return null unless @isFoldableAtBufferRow(bufferRow)
startIndentLevel = @editor.indentationForBufferRow(bufferRow)
scopes = @editor.scopesForBufferPosition([bufferRow, 0])
scopeDescriptor = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
for row in [(bufferRow + 1)..@editor.getLastBufferRow()]
continue if @editor.isBufferRowBlank(row)
indentation = @editor.indentationForBufferRow(row)
if indentation <= startIndentLevel
includeRowInFold = indentation == startIndentLevel and @foldEndRegexForScopes(scopes)?.searchSync(@editor.lineTextForBufferRow(row))
includeRowInFold = indentation == startIndentLevel and @foldEndRegexForScopeDescriptor(scopeDescriptor)?.searchSync(@editor.lineTextForBufferRow(row))
foldEndRow = row if includeRowInFold
break
@@ -246,8 +246,8 @@ class LanguageMode
# Returns a {Number}.
suggestedIndentForBufferRow: (bufferRow) ->
currentIndentLevel = @editor.indentationForBufferRow(bufferRow)
scopes = @editor.scopesForBufferPosition([bufferRow, 0])
return currentIndentLevel unless increaseIndentRegex = @increaseIndentRegexForScopes(scopes)
scopeDescriptor = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
return currentIndentLevel unless increaseIndentRegex = @increaseIndentRegexForScopeDescriptor(scopeDescriptor)
currentLine = @buffer.lineForRow(bufferRow)
precedingRow = if bufferRow > 0 then bufferRow - 1 else null
@@ -257,7 +257,7 @@ class LanguageMode
desiredIndentLevel = @editor.indentationForBufferRow(precedingRow)
desiredIndentLevel += 1 if increaseIndentRegex.testSync(precedingLine) and not @editor.isBufferRowCommented(precedingRow)
return desiredIndentLevel unless decreaseIndentRegex = @decreaseIndentRegexForScopes(scopes)
return desiredIndentLevel unless decreaseIndentRegex = @decreaseIndentRegexForScopeDescriptor(scopeDescriptor)
desiredIndentLevel -= 1 if decreaseIndentRegex.testSync(currentLine)
Math.max(desiredIndentLevel, 0)
@@ -292,9 +292,9 @@ class LanguageMode
#
# bufferRow - The row {Number}
autoDecreaseIndentForBufferRow: (bufferRow) ->
scopes = @editor.scopesForBufferPosition([bufferRow, 0])
increaseIndentRegex = @increaseIndentRegexForScopes(scopes)
decreaseIndentRegex = @decreaseIndentRegexForScopes(scopes)
scopeDescriptor = @editor.scopeDescriptorForBufferPosition([bufferRow, 0])
increaseIndentRegex = @increaseIndentRegexForScopeDescriptor(scopeDescriptor)
decreaseIndentRegex = @decreaseIndentRegexForScopeDescriptor(scopeDescriptor)
return unless increaseIndentRegex and decreaseIndentRegex
line = @buffer.lineForRow(bufferRow)
@@ -311,15 +311,15 @@ class LanguageMode
if desiredIndentLevel >= 0 and desiredIndentLevel < currentIndentLevel
@editor.setIndentationForBufferRow(bufferRow, desiredIndentLevel)
getRegexForProperty: (scopes, property) ->
if pattern = atom.config.get(scopes, property)
getRegexForProperty: (scopeDescriptor, property) ->
if pattern = atom.config.get(scopeDescriptor, property)
new OnigRegExp(pattern)
increaseIndentRegexForScopes: (scopes) ->
@getRegexForProperty(scopes, 'editor.increaseIndentPattern')
increaseIndentRegexForScopeDescriptor: (scopeDescriptor) ->
@getRegexForProperty(scopeDescriptor, 'editor.increaseIndentPattern')
decreaseIndentRegexForScopes: (scopes) ->
@getRegexForProperty(scopes, 'editor.decreaseIndentPattern')
decreaseIndentRegexForScopeDescriptor: (scopeDescriptor) ->
@getRegexForProperty(scopeDescriptor, 'editor.decreaseIndentPattern')
foldEndRegexForScopes: (scopes) ->
@getRegexForProperty(scopes, 'editor.foldEndPattern')
foldEndRegexForScopeDescriptor: (scopeDescriptor) ->
@getRegexForProperty(scopeDescriptor, 'editor.foldEndPattern')

View File

@@ -200,7 +200,7 @@ LinesComponent = React.createClass
firstTrailingWhitespacePosition = text.search(/\s*$/)
lineIsWhitespaceOnly = firstTrailingWhitespacePosition is 0
for token in tokens
innerHTML += @updateScopeStack(scopeStack, token.scopes)
innerHTML += @updateScopeStack(scopeStack, token.scopeDescriptor)
hasIndentGuide = not mini and showIndentGuide and (token.hasLeadingWhitespace() or (token.hasTrailingWhitespace() and lineIsWhitespaceOnly))
innerHTML += token.getValueAsHtml({hasIndentGuide})
@@ -217,20 +217,20 @@ LinesComponent = React.createClass
html += "<span class='invisible-character'>#{invisible}</span>"
html
updateScopeStack: (scopeStack, desiredScopes) ->
updateScopeStack: (scopeStack, desiredScopeDescriptor) ->
html = ""
# Find a common prefix
for scope, i in desiredScopes
break unless scopeStack[i] is desiredScopes[i]
for scope, i in desiredScopeDescriptor
break unless scopeStack[i] is desiredScopeDescriptor[i]
# Pop scopes until we're at the common prefx
# Pop scopeDescriptor until we're at the common prefx
until scopeStack.length is i
html += @popScope(scopeStack)
# Push onto common prefix until scopeStack equals desiredScopes
for j in [i...desiredScopes.length]
html += @pushScope(scopeStack, desiredScopes[j])
# Push onto common prefix until scopeStack equals desiredScopeDescriptor
for j in [i...desiredScopeDescriptor.length]
html += @pushScope(scopeStack, desiredScopeDescriptor[j])
html
@@ -308,8 +308,8 @@ LinesComponent = React.createClass
iterator = null
charIndex = 0
for {value, scopes}, tokenIndex in tokenizedLine.tokens
charWidths = editor.getScopedCharWidths(scopes)
for {value, scopeDescriptor}, tokenIndex in tokenizedLine.tokens
charWidths = editor.getScopedCharWidths(scopeDescriptor)
for char in value
continue if char is '\0'
@@ -331,7 +331,7 @@ LinesComponent = React.createClass
rangeForMeasurement.setStart(textNode, i)
rangeForMeasurement.setEnd(textNode, i + 1)
charWidth = rangeForMeasurement.getBoundingClientRect().width
editor.setScopedCharWidth(scopes, char, charWidth)
editor.setScopedCharWidth(scopeDescriptor, char, charWidth)
charIndex++

View File

@@ -32,7 +32,7 @@ class Syntax extends GrammarRegistry
serialize: ->
{deserializer: @constructor.name, @grammarOverridesByPath}
createToken: (value, scopes) -> new Token({value, scopes})
createToken: (value, scopeDescriptor) -> new Token({value, scopeDescriptor})
# Deprecated: Used by settings-view to display snippets for packages
@::accessor 'propertyStore', ->

View File

@@ -2370,21 +2370,14 @@ class TextEditor extends Model
Section: Managing Syntax Scopes
###
# Public: Get the syntactic scopes for the most recently added cursor's
# position. See {::scopesForBufferPosition} for more information.
#
# Returns an {Array} of {String}s.
scopesAtCursor: ->
if cursor = @getLastCursor()
cursor.getScopes()
else
@getRootScopeDescriptor()
getCursorScopes: ->
deprecate 'Use TextEditor::scopesAtCursor() instead'
@scopesAtCursor()
# Essential: Returns the scope descriptor that includes the language. eg.
# `['.source.ruby']`, or `['.source.coffee']`. You can use this with
# {Config::get} to get language specific config values.
getRootScopeDescriptor: ->
@displayBuffer.getRootScopeDescriptor()
# Essential: Get the syntactic scopes for the given position in buffer
# coordinates.
# Essential: Get the syntactic scopeDescriptor for the given position in buffer
# coordinates. Useful with {Config::get}.
#
# For example, if called with a position inside the parameter list of an
# anonymous CoffeeScript function, the method returns the following array:
@@ -2393,7 +2386,10 @@ class TextEditor extends Model
# * `bufferPosition` A {Point} or {Array} of [row, column].
#
# Returns an {Array} of {String}s.
scopesForBufferPosition: (bufferPosition) -> @displayBuffer.scopesForBufferPosition(bufferPosition)
scopeDescriptorForBufferPosition: (bufferPosition) -> @displayBuffer.scopeDescriptorForBufferPosition(bufferPosition)
scopesForBufferPosition: (bufferPosition) ->
deprecate 'Use ::scopeDescriptorForBufferPosition instead'
@scopeDescriptorForBufferPosition(bufferPosition)
# Extended: Get the range in buffer coordinates of all tokens surrounding the
# cursor that match the given scope selector.
@@ -2405,21 +2401,25 @@ class TextEditor extends Model
bufferRangeForScopeAtCursor: (selector) ->
@displayBuffer.bufferRangeForScopeAtPosition(selector, @getCursorBufferPosition())
getRootScopeDescriptor: ->
@displayBuffer.getRootScopeDescriptor()
# Extended: Determine if the given row is entirely a comment
isBufferRowCommented: (bufferRow) ->
if match = @lineTextForBufferRow(bufferRow).match(/\S/)
scopeDescriptor = @tokenForBufferPosition([bufferRow, match.index]).scopeDescriptor
@commentScopeSelector ?= new TextMateScopeSelector('comment.*')
@commentScopeSelector.matches(scopeDescriptor)
logCursorScope: ->
console.log @scopesAtCursor()
console.log @getLastCursor().getScopeDescriptor()
# {Delegates to: DisplayBuffer.tokenForBufferPosition}
tokenForBufferPosition: (bufferPosition) -> @displayBuffer.tokenForBufferPosition(bufferPosition)
# Extended: Determine if the given row is entirely a comment
isBufferRowCommented: (bufferRow) ->
if match = @lineTextForBufferRow(bufferRow).match(/\S/)
scopes = @tokenForBufferPosition([bufferRow, match.index]).scopes
@commentScopeSelector ?= new TextMateScopeSelector('comment.*')
@commentScopeSelector.matches(scopes)
scopesAtCursor: ->
deprecate 'Use editor.getLastCursor().scopesAtCursor() instead'
@getLastCursor().getScopeDescriptor()
getCursorScopes: ->
deprecate 'Use editor.getLastCursor().scopesAtCursor() instead'
@scopesAtCursor()
###
Section: Clipboard Operations
@@ -2459,7 +2459,7 @@ class TextEditor extends Model
return
else if atom.config.get(@scopesAtCursor(), "editor.normalizeIndentOnPaste") and metadata?.indentBasis?
else if atom.config.get(@getLastCursor().getScopeDescriptor(), "editor.normalizeIndentOnPaste") and metadata?.indentBasis?
if !@getLastCursor().hasPrecedingCharactersOnLine() or containsNewlines
options.indentBasis ?= metadata.indentBasis

View File

@@ -1,4 +1,5 @@
_ = require 'underscore-plus'
{deprecate} = require 'grim'
textUtils = require './text-utils'
WhitespaceRegexesByTabLength = {}
@@ -13,27 +14,35 @@ module.exports =
class Token
value: null
hasPairedCharacter: false
scopes: null
scopeDescriptor: null
isAtomic: null
isHardTab: null
firstNonWhitespaceIndex: null
firstTrailingWhitespaceIndex: null
hasInvisibleCharacters: false
constructor: ({@value, @scopes, @isAtomic, @bufferDelta, @isHardTab}) ->
Object.defineProperty @::, 'scopes', get: ->
deprecate 'Use ::scopeDescriptor instead'
@scopeDescriptor
constructor: ({@value, @scopeDescriptor, @isAtomic, @bufferDelta, @isHardTab}) ->
@screenDelta = @value.length
@bufferDelta ?= @screenDelta
@hasPairedCharacter = textUtils.hasPairedCharacter(@value)
isEqual: (other) ->
@value == other.value and _.isEqual(@scopes, other.scopes) and !!@isAtomic == !!other.isAtomic
# TODO: scopes is deprecated. This is here for the sake of lang package tests
scopeDescriptor = other.scopeDescriptor ? other.scopes
deprecate 'Test the Token for `scopeDescriptor` rather than `scopes`' if other.scopes?
@value == other.value and _.isEqual(@scopeDescriptor, scopeDescriptor) and !!@isAtomic == !!other.isAtomic
isBracket: ->
/^meta\.brace\b/.test(_.last(@scopes))
/^meta\.brace\b/.test(_.last(@scopeDescriptor))
splitAt: (splitIndex) ->
leftToken = new Token(value: @value.substring(0, splitIndex), scopes: @scopes)
rightToken = new Token(value: @value.substring(splitIndex), scopes: @scopes)
leftToken = new Token(value: @value.substring(0, splitIndex), scopeDescriptor: @scopeDescriptor)
rightToken = new Token(value: @value.substring(splitIndex), scopeDescriptor: @scopeDescriptor)
if @firstNonWhitespaceIndex?
leftToken.firstNonWhitespaceIndex = Math.min(splitIndex, @firstNonWhitespaceIndex)
@@ -92,7 +101,7 @@ class Token
else
breakOutLeadingSoftTabs = false
value = match[0]
token = new Token({value, @scopes})
token = new Token({value, @scopeDescriptor})
column += token.value.length
outputTokens.push(token)
@@ -106,7 +115,7 @@ class Token
while index < @value.length
if textUtils.isPairedCharacter(@value, index)
if nonPairStart isnt index
outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes}))
outputTokens.push(new Token({value: @value[nonPairStart...index], @scopeDescriptor}))
outputTokens.push(@buildPairedCharacterToken(@value, index))
index += 2
nonPairStart = index
@@ -114,14 +123,14 @@ class Token
index++
if nonPairStart isnt index
outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes}))
outputTokens.push(new Token({value: @value[nonPairStart...index], @scopeDescriptor}))
outputTokens
buildPairedCharacterToken: (value, index) ->
new Token(
value: value[index..index + 1]
scopes: @scopes
scopeDescriptor: @scopeDescriptor
isAtomic: true
)
@@ -135,7 +144,7 @@ class Token
tabStop = tabLength - (column % tabLength)
new Token(
value: _.multiplyString(" ", tabStop)
scopes: @scopes
scopeDescriptor: @scopeDescriptor
bufferDelta: if isHardTab then 1 else tabStop
isAtomic: true
isHardTab: isHardTab
@@ -146,7 +155,7 @@ class Token
matchesScopeSelector: (selector) ->
targetClasses = selector.replace(StartDotRegex, '').split('.')
_.any @scopes, (scope) ->
_.any @scopeDescriptor, (scope) ->
scopeClasses = scope.split('.')
_.isSubset(targetClasses, scopeClasses)

View File

@@ -79,14 +79,14 @@ class TokenizedBuffer extends Model
return if grammar is @grammar
@unsubscribe(@grammar) if @grammar
@grammar = grammar
@grammarScopeDescriptor = [@grammar.scopeName]
@rootScopeDescriptor = [@grammar.scopeName]
@currentGrammarScore = score ? grammar.getScore(@buffer.getPath(), @buffer.getText())
@subscribe @grammar.onDidUpdate => @retokenizeLines()
@configSettings = tabLength: atom.config.get(@grammarScopeDescriptor, 'editor.tabLength')
@configSettings = tabLength: atom.config.get(@rootScopeDescriptor, 'editor.tabLength')
@grammarTabLengthSubscription?.dispose()
@grammarTabLengthSubscription = atom.config.onDidChange @grammarScopeDescriptor, 'editor.tabLength', ({newValue}) =>
@grammarTabLengthSubscription = atom.config.onDidChange @rootScopeDescriptor, 'editor.tabLength', ({newValue}) =>
@configSettings.tabLength = newValue
@retokenizeLines()
@subscribe @grammarTabLengthSubscription
@@ -105,7 +105,7 @@ class TokenizedBuffer extends Model
hasTokenForSelector: (selector) ->
for {tokens} in @tokenizedLines
for token in tokens
return true if selector.matches(token.scopes)
return true if selector.matches(token.scopeDescriptor)
false
retokenizeLines: ->
@@ -247,7 +247,7 @@ class TokenizedBuffer extends Model
buildPlaceholderTokenizedLineForRow: (row) ->
line = @buffer.lineForRow(row)
tokens = [new Token(value: line, scopes: [@grammar.scopeName])]
tokens = [new Token(value: line, scopeDescriptor: [@grammar.scopeName])]
tabLength = @getTabLength()
indentLevel = @indentLevelForRow(row)
lineEnding = @buffer.lineEndingForRow(row)
@@ -302,8 +302,8 @@ class TokenizedBuffer extends Model
else
0
scopesForPosition: (position) ->
@tokenForPosition(position).scopes
scopeDescriptorForPosition: (position) ->
@tokenForPosition(position).scopeDescriptor
tokenForPosition: (position) ->
{row, column} = Point.fromObject(position)

View File

@@ -194,9 +194,9 @@ class TokenizedLine
isComment: ->
for token in @tokens
continue if token.scopes.length is 1
continue if token.scopeDescriptor.length is 1
continue if token.isOnlyWhitespace()
for scope in token.scopes
for scope in token.scopeDescriptor
return true if _.contains(scope.split('.'), 'comment')
break
false
@@ -226,26 +226,26 @@ class TokenizedLine
scopeStack = []
for token in @tokens
@updateScopeStack(scopeStack, token.scopes)
@updateScopeStack(scopeStack, token.scopeDescriptor)
_.last(scopeStack).children.push(token)
@scopeTree = scopeStack[0]
@updateScopeStack(scopeStack, [])
@scopeTree
updateScopeStack: (scopeStack, desiredScopes) ->
updateScopeStack: (scopeStack, desiredScopeDescriptor) ->
# Find a common prefix
for scope, i in desiredScopes
break unless scopeStack[i]?.scope is desiredScopes[i]
for scope, i in desiredScopeDescriptor
break unless scopeStack[i]?.scope is desiredScopeDescriptor[i]
# Pop scopes until we're at the common prefx
# Pop scopeDescriptor until we're at the common prefx
until scopeStack.length is i
poppedScope = scopeStack.pop()
_.last(scopeStack)?.children.push(poppedScope)
# Push onto common prefix until scopeStack equals desiredScopes
for j in [i...desiredScopes.length]
scopeStack.push(new Scope(desiredScopes[j]))
# Push onto common prefix until scopeStack equals desiredScopeDescriptor
for j in [i...desiredScopeDescriptor.length]
scopeStack.push(new Scope(desiredScopeDescriptor[j]))
class Scope
constructor: (@scope) ->