mirror of
https://github.com/atom/atom.git
synced 2026-02-14 08:35:11 -05:00
Merge branch 'master' into as-tiled-rendering
# Conflicts: # spec/text-editor-presenter-spec.coffee # src/lines-component.coffee # src/text-editor-presenter.coffee
This commit is contained in:
@@ -2,6 +2,7 @@ _ = require 'underscore-plus'
|
||||
Serializable = require 'serializable'
|
||||
{CompositeDisposable, Emitter} = require 'event-kit'
|
||||
{Point, Range} = require 'text-buffer'
|
||||
Grim = require 'grim'
|
||||
TokenizedBuffer = require './tokenized-buffer'
|
||||
RowMap = require './row-map'
|
||||
Fold = require './fold'
|
||||
@@ -9,7 +10,6 @@ Model = require './model'
|
||||
Token = require './token'
|
||||
Decoration = require './decoration'
|
||||
Marker = require './marker'
|
||||
Grim = require 'grim'
|
||||
|
||||
class BufferToScreenConversionError extends Error
|
||||
constructor: (@message, @metadata) ->
|
||||
@@ -40,9 +40,10 @@ class DisplayBuffer extends Model
|
||||
@disposables.add @tokenizedBuffer.observeGrammar @subscribeToScopedConfigSettings
|
||||
@disposables.add @tokenizedBuffer.onDidChange @handleTokenizedBufferChange
|
||||
@disposables.add @buffer.onDidCreateMarker @handleBufferMarkerCreated
|
||||
@updateAllScreenLines()
|
||||
@foldMarkerAttributes = Object.freeze({class: 'fold', displayBufferId: @id})
|
||||
@createFoldForMarker(marker) for marker in @buffer.findMarkers(@getFoldMarkerAttributes())
|
||||
folds = (new Fold(this, marker) for marker in @buffer.findMarkers(@getFoldMarkerAttributes()))
|
||||
@updateAllScreenLines()
|
||||
@decorateFold(fold) for fold in folds
|
||||
|
||||
subscribeToScopedConfigSettings: =>
|
||||
@scopedConfigSubscriptions?.dispose()
|
||||
@@ -580,9 +581,17 @@ class DisplayBuffer extends Model
|
||||
# Returns the folds in the given row range (exclusive of end row) that are
|
||||
# not contained by any other folds.
|
||||
outermostFoldsInBufferRowRange: (startRow, endRow) ->
|
||||
@findFoldMarkers(containedInRange: [[startRow, 0], [endRow, 0]])
|
||||
.map (marker) => @foldForMarker(marker)
|
||||
.filter (fold) -> not fold.isInsideLargerFold()
|
||||
folds = []
|
||||
lastFoldEndRow = -1
|
||||
|
||||
for marker in @findFoldMarkers(intersectsRowRange: [startRow, endRow])
|
||||
range = marker.getRange()
|
||||
if range.start.row > lastFoldEndRow
|
||||
lastFoldEndRow = range.end.row
|
||||
if startRow <= range.start.row <= range.end.row < endRow
|
||||
folds.push(@foldForMarker(marker))
|
||||
|
||||
folds
|
||||
|
||||
# Public: Given a buffer row, this returns folds that include it.
|
||||
#
|
||||
@@ -650,16 +659,19 @@ class DisplayBuffer extends Model
|
||||
top = targetRow * @lineHeightInPixels
|
||||
left = 0
|
||||
column = 0
|
||||
for token in @tokenizedLineForScreenRow(targetRow).tokens
|
||||
charWidths = @getScopedCharWidths(token.scopes)
|
||||
|
||||
iterator = @tokenizedLineForScreenRow(targetRow).getTokenIterator()
|
||||
while iterator.next()
|
||||
charWidths = @getScopedCharWidths(iterator.getScopes())
|
||||
valueIndex = 0
|
||||
while valueIndex < token.value.length
|
||||
if token.hasPairedCharacter
|
||||
char = token.value.substr(valueIndex, 2)
|
||||
value = iterator.getText()
|
||||
while valueIndex < value.length
|
||||
if iterator.isPairedCharacter()
|
||||
char = value
|
||||
charLength = 2
|
||||
valueIndex += 2
|
||||
else
|
||||
char = token.value[valueIndex]
|
||||
char = value[valueIndex]
|
||||
charLength = 1
|
||||
valueIndex++
|
||||
|
||||
@@ -680,16 +692,19 @@ class DisplayBuffer extends Model
|
||||
|
||||
left = 0
|
||||
column = 0
|
||||
for token in @tokenizedLineForScreenRow(row).tokens
|
||||
charWidths = @getScopedCharWidths(token.scopes)
|
||||
|
||||
iterator = @tokenizedLineForScreenRow(row).getTokenIterator()
|
||||
while iterator.next()
|
||||
charWidths = @getScopedCharWidths(iterator.getScopes())
|
||||
value = iterator.getText()
|
||||
valueIndex = 0
|
||||
while valueIndex < token.value.length
|
||||
if token.hasPairedCharacter
|
||||
char = token.value.substr(valueIndex, 2)
|
||||
while valueIndex < value.length
|
||||
if iterator.isPairedCharacter()
|
||||
char = value
|
||||
charLength = 2
|
||||
valueIndex += 2
|
||||
else
|
||||
char = token.value[valueIndex]
|
||||
char = value[valueIndex]
|
||||
charLength = 1
|
||||
valueIndex++
|
||||
|
||||
@@ -1132,11 +1147,15 @@ class DisplayBuffer extends Model
|
||||
regions = []
|
||||
rectangularRegion = null
|
||||
|
||||
foldsByStartRow = {}
|
||||
for fold in @outermostFoldsInBufferRowRange(startBufferRow, endBufferRow)
|
||||
foldsByStartRow[fold.getStartRow()] = fold
|
||||
|
||||
bufferRow = startBufferRow
|
||||
while bufferRow < endBufferRow
|
||||
tokenizedLine = @tokenizedBuffer.tokenizedLineForRow(bufferRow)
|
||||
|
||||
if fold = @largestFoldStartingAtBufferRow(bufferRow)
|
||||
if fold = foldsByStartRow[bufferRow]
|
||||
foldLine = tokenizedLine.copy()
|
||||
foldLine.fold = fold
|
||||
screenLines.push(foldLine)
|
||||
@@ -1206,16 +1225,19 @@ class DisplayBuffer extends Model
|
||||
@setScrollLeft(Math.min(@getScrollLeft(), @getMaxScrollLeft()))
|
||||
|
||||
handleBufferMarkerCreated: (textBufferMarker) =>
|
||||
@createFoldForMarker(textBufferMarker) if textBufferMarker.matchesParams(@getFoldMarkerAttributes())
|
||||
if textBufferMarker.matchesParams(@getFoldMarkerAttributes())
|
||||
fold = new Fold(this, textBufferMarker)
|
||||
fold.updateDisplayBuffer()
|
||||
@decorateFold(fold)
|
||||
|
||||
if marker = @getMarker(textBufferMarker.id)
|
||||
# The marker might have been removed in some other handler called before
|
||||
# this one. Only emit when the marker still exists.
|
||||
@emit 'marker-created', marker if Grim.includeDeprecatedAPIs
|
||||
@emitter.emit 'did-create-marker', marker
|
||||
|
||||
createFoldForMarker: (marker) ->
|
||||
@decorateMarker(marker, type: 'line-number', class: 'folded')
|
||||
new Fold(this, marker)
|
||||
decorateFold: (fold) ->
|
||||
@decorateMarker(fold.marker, type: 'line-number', class: 'folded')
|
||||
|
||||
foldForMarker: (marker) ->
|
||||
@foldsByMarkerId[marker.id]
|
||||
|
||||
@@ -13,7 +13,6 @@ class Fold
|
||||
constructor: (@displayBuffer, @marker) ->
|
||||
@id = @marker.id
|
||||
@displayBuffer.foldsByMarkerId[@marker.id] = this
|
||||
@updateDisplayBuffer()
|
||||
@marker.onDidDestroy => @destroyed()
|
||||
@marker.onDidChange ({isValid}) => @destroy() unless isValid
|
||||
|
||||
|
||||
@@ -242,8 +242,9 @@ class LanguageMode
|
||||
@suggestedIndentForTokenizedLineAtBufferRow(bufferRow, tokenizedLine, options)
|
||||
|
||||
suggestedIndentForTokenizedLineAtBufferRow: (bufferRow, tokenizedLine, options) ->
|
||||
scopes = tokenizedLine.tokens[0].scopes
|
||||
scopeDescriptor = new ScopeDescriptor({scopes})
|
||||
iterator = tokenizedLine.getTokenIterator()
|
||||
iterator.next()
|
||||
scopeDescriptor = new ScopeDescriptor(scopes: iterator.getScopes())
|
||||
|
||||
currentIndentLevel = @editor.indentationForBufferRow(bufferRow)
|
||||
return currentIndentLevel unless increaseIndentRegex = @increaseIndentRegexForScopeDescriptor(scopeDescriptor)
|
||||
|
||||
@@ -2,9 +2,12 @@ _ = require 'underscore-plus'
|
||||
{toArray} = require 'underscore-plus'
|
||||
{$$} = require 'space-pen'
|
||||
|
||||
TokenIterator = require './token-iterator'
|
||||
DummyLineNode = $$(-> @div className: 'line', style: 'position: absolute; visibility: hidden;', => @span 'x')[0]
|
||||
AcceptFilter = {acceptNode: -> NodeFilter.FILTER_ACCEPT}
|
||||
WrapperDiv = document.createElement('div')
|
||||
TokenTextEscapeRegex = /[&"'<>]/g
|
||||
MaxTokenLength = 20000
|
||||
|
||||
cloneObject = (object) ->
|
||||
clone = {}
|
||||
@@ -16,6 +19,7 @@ class TileComponent
|
||||
placeholderTextDiv: null
|
||||
|
||||
constructor: ({@presenter, @id}) ->
|
||||
@tokenIterator = new TokenIterator
|
||||
@measuredLines = new Set
|
||||
@lineNodesByLineId = {}
|
||||
@screenRowsByLineId = {}
|
||||
@@ -147,20 +151,116 @@ class TileComponent
|
||||
@buildEndOfLineHTML(id) or ' '
|
||||
|
||||
buildLineInnerHTML: (id) ->
|
||||
{indentGuidesVisible} = @newState
|
||||
{tokens, text, isOnlyWhitespace} = @newTileState.lines[id]
|
||||
lineState = @newTileState.lines[id]
|
||||
{firstNonWhitespaceIndex, firstTrailingWhitespaceIndex, invisibles} = lineState
|
||||
lineIsWhitespaceOnly = firstTrailingWhitespaceIndex is 0
|
||||
|
||||
innerHTML = ""
|
||||
@tokenIterator.reset(lineState)
|
||||
|
||||
scopeStack = []
|
||||
for token in tokens
|
||||
innerHTML += @updateScopeStack(scopeStack, token.scopes)
|
||||
hasIndentGuide = indentGuidesVisible and (token.hasLeadingWhitespace() or (token.hasTrailingWhitespace() and isOnlyWhitespace))
|
||||
innerHTML += token.getValueAsHtml({hasIndentGuide})
|
||||
while @tokenIterator.next()
|
||||
for scope in @tokenIterator.getScopeEnds()
|
||||
innerHTML += "</span>"
|
||||
|
||||
for scope in @tokenIterator.getScopeStarts()
|
||||
innerHTML += "<span class=\"#{scope.replace(/\.+/g, ' ')}\">"
|
||||
|
||||
tokenStart = @tokenIterator.getScreenStart()
|
||||
tokenEnd = @tokenIterator.getScreenEnd()
|
||||
tokenText = @tokenIterator.getText()
|
||||
isHardTab = @tokenIterator.isHardTab()
|
||||
|
||||
if hasLeadingWhitespace = tokenStart < firstNonWhitespaceIndex
|
||||
tokenFirstNonWhitespaceIndex = firstNonWhitespaceIndex - tokenStart
|
||||
else
|
||||
tokenFirstNonWhitespaceIndex = null
|
||||
|
||||
if hasTrailingWhitespace = tokenEnd > firstTrailingWhitespaceIndex
|
||||
tokenFirstTrailingWhitespaceIndex = Math.max(0, firstTrailingWhitespaceIndex - tokenStart)
|
||||
else
|
||||
tokenFirstTrailingWhitespaceIndex = null
|
||||
|
||||
hasIndentGuide =
|
||||
@newState.indentGuidesVisible and
|
||||
(hasLeadingWhitespace or lineIsWhitespaceOnly)
|
||||
|
||||
hasInvisibleCharacters =
|
||||
(invisibles?.tab and isHardTab) or
|
||||
(invisibles?.space and (hasLeadingWhitespace or hasTrailingWhitespace))
|
||||
|
||||
innerHTML += @buildTokenHTML(tokenText, isHardTab, tokenFirstNonWhitespaceIndex, tokenFirstTrailingWhitespaceIndex, hasIndentGuide, hasInvisibleCharacters)
|
||||
|
||||
for scope in @tokenIterator.getScopeEnds()
|
||||
innerHTML += "</span>"
|
||||
|
||||
for scope in @tokenIterator.getScopes()
|
||||
innerHTML += "</span>"
|
||||
|
||||
innerHTML += @popScope(scopeStack) while scopeStack.length > 0
|
||||
innerHTML += @buildEndOfLineHTML(id)
|
||||
innerHTML
|
||||
|
||||
buildTokenHTML: (tokenText, isHardTab, firstNonWhitespaceIndex, firstTrailingWhitespaceIndex, hasIndentGuide, hasInvisibleCharacters) ->
|
||||
if isHardTab
|
||||
classes = 'hard-tab'
|
||||
classes += ' leading-whitespace' if firstNonWhitespaceIndex?
|
||||
classes += ' trailing-whitespace' if firstTrailingWhitespaceIndex?
|
||||
classes += ' indent-guide' if hasIndentGuide
|
||||
classes += ' invisible-character' if hasInvisibleCharacters
|
||||
return "<span class='#{classes}'>#{@escapeTokenText(tokenText)}</span>"
|
||||
else
|
||||
startIndex = 0
|
||||
endIndex = tokenText.length
|
||||
|
||||
leadingHtml = ''
|
||||
trailingHtml = ''
|
||||
|
||||
if firstNonWhitespaceIndex?
|
||||
leadingWhitespace = tokenText.substring(0, firstNonWhitespaceIndex)
|
||||
|
||||
classes = 'leading-whitespace'
|
||||
classes += ' indent-guide' if hasIndentGuide
|
||||
classes += ' invisible-character' if hasInvisibleCharacters
|
||||
|
||||
leadingHtml = "<span class='#{classes}'>#{leadingWhitespace}</span>"
|
||||
startIndex = firstNonWhitespaceIndex
|
||||
|
||||
if firstTrailingWhitespaceIndex?
|
||||
tokenIsOnlyWhitespace = firstTrailingWhitespaceIndex is 0
|
||||
trailingWhitespace = tokenText.substring(firstTrailingWhitespaceIndex)
|
||||
|
||||
classes = 'trailing-whitespace'
|
||||
classes += ' indent-guide' if hasIndentGuide and not firstNonWhitespaceIndex? and tokenIsOnlyWhitespace
|
||||
classes += ' invisible-character' if hasInvisibleCharacters
|
||||
|
||||
trailingHtml = "<span class='#{classes}'>#{trailingWhitespace}</span>"
|
||||
|
||||
endIndex = firstTrailingWhitespaceIndex
|
||||
|
||||
html = leadingHtml
|
||||
if tokenText.length > MaxTokenLength
|
||||
while startIndex < endIndex
|
||||
html += "<span>" + @escapeTokenText(tokenText, startIndex, startIndex + MaxTokenLength) + "</span>"
|
||||
startIndex += MaxTokenLength
|
||||
else
|
||||
html += @escapeTokenText(tokenText, startIndex, endIndex)
|
||||
|
||||
html += trailingHtml
|
||||
html
|
||||
|
||||
escapeTokenText: (tokenText, startIndex, endIndex) ->
|
||||
if startIndex? and endIndex? and startIndex > 0 or endIndex < tokenText.length
|
||||
tokenText = tokenText.slice(startIndex, endIndex)
|
||||
tokenText.replace(TokenTextEscapeRegex, @escapeTokenTextReplace)
|
||||
|
||||
escapeTokenTextReplace: (match) ->
|
||||
switch match
|
||||
when '&' then '&'
|
||||
when '"' then '"'
|
||||
when "'" then '''
|
||||
when '<' then '<'
|
||||
when '>' then '>'
|
||||
else match
|
||||
|
||||
buildEndOfLineHTML: (id) ->
|
||||
{endOfLineInvisibles} = @newTileState.lines[id]
|
||||
|
||||
@@ -170,31 +270,6 @@ class TileComponent
|
||||
html += "<span class='invisible-character'>#{invisible}</span>"
|
||||
html
|
||||
|
||||
updateScopeStack: (scopeStack, desiredScopeDescriptor) ->
|
||||
html = ""
|
||||
|
||||
# Find a common prefix
|
||||
for scope, i in desiredScopeDescriptor
|
||||
break unless scopeStack[i] is desiredScopeDescriptor[i]
|
||||
|
||||
# Pop scopeDescriptor until we're at the common prefx
|
||||
until scopeStack.length is i
|
||||
html += @popScope(scopeStack)
|
||||
|
||||
# Push onto common prefix until scopeStack equals desiredScopeDescriptor
|
||||
for j in [i...desiredScopeDescriptor.length]
|
||||
html += @pushScope(scopeStack, desiredScopeDescriptor[j])
|
||||
|
||||
html
|
||||
|
||||
popScope: (scopeStack) ->
|
||||
scopeStack.pop()
|
||||
"</span>"
|
||||
|
||||
pushScope: (scopeStack, scope) ->
|
||||
scopeStack.push(scope)
|
||||
"<span class=\"#{scope.replace(/\.+/g, ' ')}\">"
|
||||
|
||||
updateLineNode: (id) ->
|
||||
oldLineState = @oldTileState.lines[id]
|
||||
newLineState = @newTileState.lines[id]
|
||||
@@ -243,19 +318,22 @@ class TileComponent
|
||||
iterator = null
|
||||
charIndex = 0
|
||||
|
||||
for {value, scopes, hasPairedCharacter} in tokenizedLine.tokens
|
||||
@tokenIterator.reset(tokenizedLine)
|
||||
while @tokenIterator.next()
|
||||
scopes = @tokenIterator.getScopes()
|
||||
text = @tokenIterator.getText()
|
||||
charWidths = @presenter.getScopedCharacterWidths(scopes)
|
||||
|
||||
valueIndex = 0
|
||||
while valueIndex < value.length
|
||||
if hasPairedCharacter
|
||||
char = value.substr(valueIndex, 2)
|
||||
textIndex = 0
|
||||
while textIndex < text.length
|
||||
if @tokenIterator.isPairedCharacter()
|
||||
char = text
|
||||
charLength = 2
|
||||
valueIndex += 2
|
||||
textIndex += 2
|
||||
else
|
||||
char = value[valueIndex]
|
||||
char = text[textIndex]
|
||||
charLength = 1
|
||||
valueIndex++
|
||||
textIndex++
|
||||
|
||||
continue if char is '\0'
|
||||
|
||||
|
||||
6
src/special-token-symbols.coffee
Normal file
6
src/special-token-symbols.coffee
Normal file
@@ -0,0 +1,6 @@
|
||||
module.exports = {
|
||||
SoftTab: Symbol('SoftTab')
|
||||
HardTab: Symbol('HardTab')
|
||||
PairedCharacter: Symbol('PairedCharacter')
|
||||
SoftWrapIndent: Symbol('SoftWrapIndent')
|
||||
}
|
||||
@@ -247,7 +247,6 @@ atom.commands.add 'atom-text-editor', stopEventPropagation(
|
||||
'core:select-up': -> @selectUp()
|
||||
'core:select-down': -> @selectDown()
|
||||
'core:select-all': -> @selectAll()
|
||||
'editor:move-to-previous-word': -> @moveToPreviousWord()
|
||||
'editor:select-word': -> @selectWordsContainingCursors()
|
||||
'editor:consolidate-selections': (event) -> event.abortKeyBinding() unless @consolidateSelections()
|
||||
'editor:move-to-beginning-of-next-paragraph': -> @moveToBeginningOfNextParagraph()
|
||||
|
||||
@@ -358,9 +358,14 @@ class TextEditorPresenter
|
||||
tileState.lines[line.id] =
|
||||
screenRow: row
|
||||
text: line.text
|
||||
tokens: line.tokens
|
||||
isOnlyWhitespace: line.isOnlyWhitespace()
|
||||
openScopes: line.openScopes
|
||||
tags: line.tags
|
||||
specialTokens: line.specialTokens
|
||||
firstNonWhitespaceIndex: line.firstNonWhitespaceIndex
|
||||
firstTrailingWhitespaceIndex: line.firstTrailingWhitespaceIndex
|
||||
invisibles: line.invisibles
|
||||
endOfLineInvisibles: line.endOfLineInvisibles
|
||||
isOnlyWhitespace: line.isOnlyWhitespace()
|
||||
indentLevel: line.indentLevel
|
||||
tabLength: line.tabLength
|
||||
fold: line.fold
|
||||
@@ -370,6 +375,7 @@ class TextEditorPresenter
|
||||
|
||||
for id, line of tileState.lines
|
||||
delete tileState.lines[id] unless visibleLineIds.hasOwnProperty(id)
|
||||
return
|
||||
|
||||
updateCursorsState: ->
|
||||
@state.content.cursors = {}
|
||||
@@ -1033,17 +1039,20 @@ class TextEditorPresenter
|
||||
top = targetRow * @lineHeight
|
||||
left = 0
|
||||
column = 0
|
||||
for token in @model.tokenizedLineForScreenRow(targetRow).tokens
|
||||
characterWidths = @getScopedCharacterWidths(token.scopes)
|
||||
|
||||
iterator = @model.tokenizedLineForScreenRow(targetRow).getTokenIterator()
|
||||
while iterator.next()
|
||||
characterWidths = @getScopedCharacterWidths(iterator.getScopes())
|
||||
|
||||
valueIndex = 0
|
||||
while valueIndex < token.value.length
|
||||
if token.hasPairedCharacter
|
||||
char = token.value.substr(valueIndex, 2)
|
||||
text = iterator.getText()
|
||||
while valueIndex < text.length
|
||||
if iterator.isPairedCharacter()
|
||||
char = text
|
||||
charLength = 2
|
||||
valueIndex += 2
|
||||
else
|
||||
char = token.value[valueIndex]
|
||||
char = text[valueIndex]
|
||||
charLength = 1
|
||||
valueIndex++
|
||||
|
||||
|
||||
@@ -2457,9 +2457,8 @@ class TextEditor extends Model
|
||||
# Extended: Determine if the given row is entirely a comment
|
||||
isBufferRowCommented: (bufferRow) ->
|
||||
if match = @lineTextForBufferRow(bufferRow).match(/\S/)
|
||||
scopeDescriptor = @tokenForBufferPosition([bufferRow, match.index]).scopes
|
||||
@commentScopeSelector ?= new TextMateScopeSelector('comment.*')
|
||||
@commentScopeSelector.matches(scopeDescriptor)
|
||||
@commentScopeSelector.matches(@scopeDescriptorForBufferPosition([bufferRow, match.index]).scopes)
|
||||
|
||||
logCursorScope: ->
|
||||
scopeDescriptor = @getLastCursor().getScopeDescriptor()
|
||||
|
||||
83
src/token-iterator.coffee
Normal file
83
src/token-iterator.coffee
Normal file
@@ -0,0 +1,83 @@
|
||||
{SoftTab, HardTab, PairedCharacter, SoftWrapIndent} = require './special-token-symbols'
|
||||
|
||||
module.exports =
|
||||
class TokenIterator
|
||||
constructor: (line) ->
|
||||
@reset(line) if line?
|
||||
|
||||
reset: (@line) ->
|
||||
@index = null
|
||||
@bufferStart = @line.startBufferColumn
|
||||
@bufferEnd = @bufferStart
|
||||
@screenStart = 0
|
||||
@screenEnd = 0
|
||||
@scopes = @line.openScopes.map (id) -> atom.grammars.scopeForId(id)
|
||||
@scopeStarts = @scopes.slice()
|
||||
@scopeEnds = []
|
||||
this
|
||||
|
||||
next: ->
|
||||
{tags} = @line
|
||||
|
||||
if @index?
|
||||
@index++
|
||||
@scopeEnds.length = 0
|
||||
@scopeStarts.length = 0
|
||||
@bufferStart = @bufferEnd
|
||||
@screenStart = @screenEnd
|
||||
else
|
||||
@index = 0
|
||||
|
||||
while @index < tags.length
|
||||
tag = tags[@index]
|
||||
if tag < 0
|
||||
if tag % 2 is 0
|
||||
@scopeEnds.push(atom.grammars.scopeForId(tag + 1))
|
||||
@scopes.pop()
|
||||
else
|
||||
scope = atom.grammars.scopeForId(tag)
|
||||
@scopeStarts.push(scope)
|
||||
@scopes.push(scope)
|
||||
@index++
|
||||
else
|
||||
if @isHardTab()
|
||||
@screenEnd = @screenStart + tag
|
||||
@bufferEnd = @bufferStart + 1
|
||||
else if @isSoftWrapIndentation()
|
||||
@screenEnd = @screenStart + tag
|
||||
@bufferEnd = @bufferStart + 0
|
||||
else
|
||||
@screenEnd = @screenStart + tag
|
||||
@bufferEnd = @bufferStart + tag
|
||||
return true
|
||||
|
||||
false
|
||||
|
||||
getBufferStart: -> @bufferStart
|
||||
getBufferEnd: -> @bufferEnd
|
||||
|
||||
getScreenStart: -> @screenStart
|
||||
getScreenEnd: -> @screenEnd
|
||||
|
||||
getScopeStarts: -> @scopeStarts
|
||||
getScopeEnds: -> @scopeEnds
|
||||
|
||||
getScopes: -> @scopes
|
||||
|
||||
getText: ->
|
||||
@line.text.substring(@screenStart, @screenEnd)
|
||||
|
||||
isSoftTab: ->
|
||||
@line.specialTokens[@index] is SoftTab
|
||||
|
||||
isHardTab: ->
|
||||
@line.specialTokens[@index] is HardTab
|
||||
|
||||
isSoftWrapIndentation: ->
|
||||
@line.specialTokens[@index] is SoftWrapIndent
|
||||
|
||||
isPairedCharacter: ->
|
||||
@line.specialTokens[@index] is PairedCharacter
|
||||
|
||||
isAtomic: ->
|
||||
@isSoftTab() or @isHardTab() or @isSoftWrapIndentation() or @isPairedCharacter()
|
||||
199
src/token.coffee
199
src/token.coffee
@@ -1,13 +1,8 @@
|
||||
_ = require 'underscore-plus'
|
||||
textUtils = require './text-utils'
|
||||
|
||||
WhitespaceRegexesByTabLength = {}
|
||||
EscapeRegex = /[&"'<>]/g
|
||||
StartDotRegex = /^\.?/
|
||||
WhitespaceRegex = /\S/
|
||||
|
||||
MaxTokenLength = 20000
|
||||
|
||||
# Represents a single unit of text as selected by a grammar.
|
||||
module.exports =
|
||||
class Token
|
||||
@@ -20,10 +15,14 @@ class Token
|
||||
firstTrailingWhitespaceIndex: null
|
||||
hasInvisibleCharacters: false
|
||||
|
||||
constructor: ({@value, @scopes, @isAtomic, @bufferDelta, @isHardTab, @hasPairedCharacter, @isSoftWrapIndentation}) ->
|
||||
constructor: (properties) ->
|
||||
{@value, @scopes, @isAtomic, @isHardTab, @bufferDelta} = properties
|
||||
{@hasInvisibleCharacters, @hasPairedCharacter, @isSoftWrapIndentation} = properties
|
||||
@firstNonWhitespaceIndex = properties.firstNonWhitespaceIndex ? null
|
||||
@firstTrailingWhitespaceIndex = properties.firstTrailingWhitespaceIndex ? null
|
||||
|
||||
@screenDelta = @value.length
|
||||
@bufferDelta ?= @screenDelta
|
||||
@hasPairedCharacter ?= textUtils.hasPairedCharacter(@value)
|
||||
|
||||
isEqual: (other) ->
|
||||
# TODO: scopes is deprecated. This is here for the sake of lang package tests
|
||||
@@ -32,126 +31,6 @@ class Token
|
||||
isBracket: ->
|
||||
/^meta\.brace\b/.test(_.last(@scopes))
|
||||
|
||||
splitAt: (splitIndex) ->
|
||||
leftToken = new Token(value: @value.substring(0, splitIndex), scopes: @scopes)
|
||||
rightToken = new Token(value: @value.substring(splitIndex), scopes: @scopes)
|
||||
|
||||
if @firstNonWhitespaceIndex?
|
||||
leftToken.firstNonWhitespaceIndex = Math.min(splitIndex, @firstNonWhitespaceIndex)
|
||||
leftToken.hasInvisibleCharacters = @hasInvisibleCharacters
|
||||
|
||||
if @firstNonWhitespaceIndex > splitIndex
|
||||
rightToken.firstNonWhitespaceIndex = @firstNonWhitespaceIndex - splitIndex
|
||||
rightToken.hasInvisibleCharacters = @hasInvisibleCharacters
|
||||
|
||||
if @firstTrailingWhitespaceIndex?
|
||||
rightToken.firstTrailingWhitespaceIndex = Math.max(0, @firstTrailingWhitespaceIndex - splitIndex)
|
||||
rightToken.hasInvisibleCharacters = @hasInvisibleCharacters
|
||||
|
||||
if @firstTrailingWhitespaceIndex < splitIndex
|
||||
leftToken.firstTrailingWhitespaceIndex = @firstTrailingWhitespaceIndex
|
||||
leftToken.hasInvisibleCharacters = @hasInvisibleCharacters
|
||||
|
||||
[leftToken, rightToken]
|
||||
|
||||
whitespaceRegexForTabLength: (tabLength) ->
|
||||
WhitespaceRegexesByTabLength[tabLength] ?= new RegExp("([ ]{#{tabLength}})|(\t)|([^\t]+)", "g")
|
||||
|
||||
breakOutAtomicTokens: (tabLength, breakOutLeadingSoftTabs, startColumn) ->
|
||||
if @hasPairedCharacter
|
||||
outputTokens = []
|
||||
column = startColumn
|
||||
|
||||
for token in @breakOutPairedCharacters()
|
||||
if token.isAtomic
|
||||
outputTokens.push(token)
|
||||
else
|
||||
outputTokens.push(token.breakOutAtomicTokens(tabLength, breakOutLeadingSoftTabs, column)...)
|
||||
breakOutLeadingSoftTabs = token.isOnlyWhitespace() if breakOutLeadingSoftTabs
|
||||
column += token.value.length
|
||||
|
||||
outputTokens
|
||||
else
|
||||
return [this] if @isAtomic
|
||||
|
||||
if breakOutLeadingSoftTabs
|
||||
return [this] unless /^[ ]|\t/.test(@value)
|
||||
else
|
||||
return [this] unless /\t/.test(@value)
|
||||
|
||||
outputTokens = []
|
||||
regex = @whitespaceRegexForTabLength(tabLength)
|
||||
column = startColumn
|
||||
while match = regex.exec(@value)
|
||||
[fullMatch, softTab, hardTab] = match
|
||||
token = null
|
||||
if softTab and breakOutLeadingSoftTabs
|
||||
token = @buildSoftTabToken(tabLength)
|
||||
else if hardTab
|
||||
breakOutLeadingSoftTabs = false
|
||||
token = @buildHardTabToken(tabLength, column)
|
||||
else
|
||||
breakOutLeadingSoftTabs = false
|
||||
value = match[0]
|
||||
token = new Token({value, @scopes})
|
||||
column += token.value.length
|
||||
outputTokens.push(token)
|
||||
|
||||
outputTokens
|
||||
|
||||
breakOutPairedCharacters: ->
|
||||
outputTokens = []
|
||||
index = 0
|
||||
nonPairStart = 0
|
||||
|
||||
while index < @value.length
|
||||
if textUtils.isPairedCharacter(@value, index)
|
||||
if nonPairStart isnt index
|
||||
outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes}))
|
||||
outputTokens.push(@buildPairedCharacterToken(@value, index))
|
||||
index += 2
|
||||
nonPairStart = index
|
||||
else
|
||||
index++
|
||||
|
||||
if nonPairStart isnt index
|
||||
outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes}))
|
||||
|
||||
outputTokens
|
||||
|
||||
buildPairedCharacterToken: (value, index) ->
|
||||
new Token(
|
||||
value: value[index..index + 1]
|
||||
scopes: @scopes
|
||||
isAtomic: true
|
||||
hasPairedCharacter: true
|
||||
)
|
||||
|
||||
buildHardTabToken: (tabLength, column) ->
|
||||
@buildTabToken(tabLength, true, column)
|
||||
|
||||
buildSoftTabToken: (tabLength) ->
|
||||
@buildTabToken(tabLength, false, 0)
|
||||
|
||||
buildTabToken: (tabLength, isHardTab, column=0) ->
|
||||
tabStop = tabLength - (column % tabLength)
|
||||
new Token(
|
||||
value: _.multiplyString(" ", tabStop)
|
||||
scopes: @scopes
|
||||
bufferDelta: if isHardTab then 1 else tabStop
|
||||
isAtomic: true
|
||||
isHardTab: isHardTab
|
||||
)
|
||||
|
||||
buildSoftWrapIndentationToken: (length) ->
|
||||
new Token(
|
||||
value: _.multiplyString(" ", length),
|
||||
scopes: @scopes,
|
||||
bufferDelta: 0,
|
||||
isAtomic: true,
|
||||
isSoftWrapIndentation: true
|
||||
)
|
||||
|
||||
isOnlyWhitespace: ->
|
||||
not WhitespaceRegex.test(@value)
|
||||
|
||||
@@ -161,72 +40,6 @@ class Token
|
||||
scopeClasses = scope.split('.')
|
||||
_.isSubset(targetClasses, scopeClasses)
|
||||
|
||||
getValueAsHtml: ({hasIndentGuide}) ->
|
||||
if @isHardTab
|
||||
classes = 'hard-tab'
|
||||
classes += ' leading-whitespace' if @hasLeadingWhitespace()
|
||||
classes += ' trailing-whitespace' if @hasTrailingWhitespace()
|
||||
classes += ' indent-guide' if hasIndentGuide
|
||||
classes += ' invisible-character' if @hasInvisibleCharacters
|
||||
html = "<span class='#{classes}'>#{@escapeString(@value)}</span>"
|
||||
else
|
||||
startIndex = 0
|
||||
endIndex = @value.length
|
||||
|
||||
leadingHtml = ''
|
||||
trailingHtml = ''
|
||||
|
||||
if @hasLeadingWhitespace()
|
||||
leadingWhitespace = @value.substring(0, @firstNonWhitespaceIndex)
|
||||
|
||||
classes = 'leading-whitespace'
|
||||
classes += ' indent-guide' if hasIndentGuide
|
||||
classes += ' invisible-character' if @hasInvisibleCharacters
|
||||
|
||||
leadingHtml = "<span class='#{classes}'>#{leadingWhitespace}</span>"
|
||||
startIndex = @firstNonWhitespaceIndex
|
||||
|
||||
if @hasTrailingWhitespace()
|
||||
tokenIsOnlyWhitespace = @firstTrailingWhitespaceIndex is 0
|
||||
trailingWhitespace = @value.substring(@firstTrailingWhitespaceIndex)
|
||||
|
||||
classes = 'trailing-whitespace'
|
||||
classes += ' indent-guide' if hasIndentGuide and not @hasLeadingWhitespace() and tokenIsOnlyWhitespace
|
||||
classes += ' invisible-character' if @hasInvisibleCharacters
|
||||
|
||||
trailingHtml = "<span class='#{classes}'>#{trailingWhitespace}</span>"
|
||||
|
||||
endIndex = @firstTrailingWhitespaceIndex
|
||||
|
||||
html = leadingHtml
|
||||
if @value.length > MaxTokenLength
|
||||
while startIndex < endIndex
|
||||
html += "<span>" + @escapeString(@value, startIndex, startIndex + MaxTokenLength) + "</span>"
|
||||
startIndex += MaxTokenLength
|
||||
else
|
||||
html += @escapeString(@value, startIndex, endIndex)
|
||||
|
||||
html += trailingHtml
|
||||
html
|
||||
|
||||
escapeString: (str, startIndex, endIndex) ->
|
||||
strLength = str.length
|
||||
|
||||
startIndex ?= 0
|
||||
endIndex ?= strLength
|
||||
|
||||
str = str.slice(startIndex, endIndex) if startIndex > 0 or endIndex < strLength
|
||||
str.replace(EscapeRegex, @escapeStringReplace)
|
||||
|
||||
escapeStringReplace: (match) ->
|
||||
switch match
|
||||
when '&' then '&'
|
||||
when '"' then '"'
|
||||
when "'" then '''
|
||||
when '<' then '<'
|
||||
when '>' then '>'
|
||||
else match
|
||||
|
||||
hasLeadingWhitespace: ->
|
||||
@firstNonWhitespaceIndex? and @firstNonWhitespaceIndex > 0
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
_ = require 'underscore-plus'
|
||||
{CompositeDisposable, Emitter} = require 'event-kit'
|
||||
{Point, Range} = require 'text-buffer'
|
||||
{ScopeSelector} = require 'first-mate'
|
||||
Serializable = require 'serializable'
|
||||
Model = require './model'
|
||||
TokenizedLine = require './tokenized-line'
|
||||
TokenIterator = require './token-iterator'
|
||||
Token = require './token'
|
||||
ScopeDescriptor = require './scope-descriptor'
|
||||
Grim = require 'grim'
|
||||
@@ -25,6 +27,7 @@ class TokenizedBuffer extends Model
|
||||
constructor: ({@buffer, @tabLength, @ignoreInvisibles}) ->
|
||||
@emitter = new Emitter
|
||||
@disposables = new CompositeDisposable
|
||||
@tokenIterator = new TokenIterator
|
||||
|
||||
@disposables.add atom.grammars.onDidAddGrammar(@grammarAddedOrUpdated)
|
||||
@disposables.add atom.grammars.onDidUpdateGrammar(@grammarAddedOrUpdated)
|
||||
@@ -167,7 +170,7 @@ class TokenizedBuffer extends Model
|
||||
row = startRow
|
||||
loop
|
||||
previousStack = @stackForRow(row)
|
||||
@tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1))
|
||||
@tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1), @openScopesForRow(row))
|
||||
if --rowsRemaining is 0
|
||||
filledRegion = false
|
||||
endRow = row
|
||||
@@ -227,7 +230,7 @@ class TokenizedBuffer extends Model
|
||||
|
||||
@updateInvalidRows(start, end, delta)
|
||||
previousEndStack = @stackForRow(end) # used in spill detection below
|
||||
newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1))
|
||||
newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start))
|
||||
_.spliceWithArray(@tokenizedLines, start, end - start + 1, newTokenizedLines)
|
||||
|
||||
start = @retokenizeWhitespaceRowsIfIndentLevelChanged(start - 1, -1)
|
||||
@@ -248,7 +251,7 @@ class TokenizedBuffer extends Model
|
||||
line = @tokenizedLines[row]
|
||||
if line?.isOnlyWhitespace() and @indentLevelForRow(row) isnt line.indentLevel
|
||||
while line?.isOnlyWhitespace()
|
||||
@tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1))
|
||||
@tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1), @openScopesForRow(row))
|
||||
row += increment
|
||||
line = @tokenizedLines[row]
|
||||
|
||||
@@ -290,16 +293,18 @@ class TokenizedBuffer extends Model
|
||||
@tokenizedLineForRow(row).isComment() and
|
||||
@tokenizedLineForRow(nextRow).isComment()
|
||||
|
||||
buildTokenizedLinesForRows: (startRow, endRow, startingStack) ->
|
||||
buildTokenizedLinesForRows: (startRow, endRow, startingStack, startingopenScopes) ->
|
||||
ruleStack = startingStack
|
||||
openScopes = startingopenScopes
|
||||
stopTokenizingAt = startRow + @chunkSize
|
||||
tokenizedLines = for row in [startRow..endRow]
|
||||
if (ruleStack or row is 0) and row < stopTokenizingAt
|
||||
screenLine = @buildTokenizedLineForRow(row, ruleStack)
|
||||
ruleStack = screenLine.ruleStack
|
||||
tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes)
|
||||
ruleStack = tokenizedLine.ruleStack
|
||||
openScopes = @scopesFromTags(openScopes, tokenizedLine.tags)
|
||||
else
|
||||
screenLine = @buildPlaceholderTokenizedLineForRow(row)
|
||||
screenLine
|
||||
tokenizedLine = @buildPlaceholderTokenizedLineForRow(row, openScopes)
|
||||
tokenizedLine
|
||||
|
||||
if endRow >= stopTokenizingAt
|
||||
@invalidateRow(stopTokenizingAt)
|
||||
@@ -311,22 +316,23 @@ class TokenizedBuffer extends Model
|
||||
@buildPlaceholderTokenizedLineForRow(row) for row in [startRow..endRow]
|
||||
|
||||
buildPlaceholderTokenizedLineForRow: (row) ->
|
||||
line = @buffer.lineForRow(row)
|
||||
tokens = [new Token(value: line, scopes: [@grammar.scopeName])]
|
||||
openScopes = [@grammar.startIdForScope(@grammar.scopeName)]
|
||||
text = @buffer.lineForRow(row)
|
||||
tags = [text.length]
|
||||
tabLength = @getTabLength()
|
||||
indentLevel = @indentLevelForRow(row)
|
||||
lineEnding = @buffer.lineEndingForRow(row)
|
||||
new TokenizedLine({tokens, tabLength, indentLevel, invisibles: @getInvisiblesToShow(), lineEnding})
|
||||
new TokenizedLine({openScopes, text, tags, tabLength, indentLevel, invisibles: @getInvisiblesToShow(), lineEnding, @tokenIterator})
|
||||
|
||||
buildTokenizedLineForRow: (row, ruleStack) ->
|
||||
@buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack)
|
||||
buildTokenizedLineForRow: (row, ruleStack, openScopes) ->
|
||||
@buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes)
|
||||
|
||||
buildTokenizedLineForRowWithText: (row, line, ruleStack = @stackForRow(row - 1)) ->
|
||||
buildTokenizedLineForRowWithText: (row, text, ruleStack = @stackForRow(row - 1), openScopes = @openScopesForRow(row)) ->
|
||||
lineEnding = @buffer.lineEndingForRow(row)
|
||||
tabLength = @getTabLength()
|
||||
indentLevel = @indentLevelForRow(row)
|
||||
{tokens, ruleStack} = @grammar.tokenizeLine(line, ruleStack, row is 0)
|
||||
new TokenizedLine({tokens, ruleStack, tabLength, lineEnding, indentLevel, invisibles: @getInvisiblesToShow()})
|
||||
{tags, ruleStack} = @grammar.tokenizeLine(text, ruleStack, row is 0, false)
|
||||
new TokenizedLine({openScopes, text, tags, ruleStack, tabLength, lineEnding, indentLevel, invisibles: @getInvisiblesToShow(), @tokenIterator})
|
||||
|
||||
getInvisiblesToShow: ->
|
||||
if @configSettings.showInvisibles and not @ignoreInvisibles
|
||||
@@ -340,6 +346,25 @@ class TokenizedBuffer extends Model
|
||||
stackForRow: (bufferRow) ->
|
||||
@tokenizedLines[bufferRow]?.ruleStack
|
||||
|
||||
openScopesForRow: (bufferRow) ->
|
||||
if bufferRow > 0
|
||||
precedingLine = @tokenizedLines[bufferRow - 1]
|
||||
@scopesFromTags(precedingLine.openScopes, precedingLine.tags)
|
||||
else
|
||||
[]
|
||||
|
||||
scopesFromTags: (startingScopes, tags) ->
|
||||
scopes = startingScopes.slice()
|
||||
for tag in tags when tag < 0
|
||||
if (tag % 2) is -1
|
||||
scopes.push(tag)
|
||||
else
|
||||
expectedScope = tag + 1
|
||||
poppedScope = scopes.pop()
|
||||
unless poppedScope is expectedScope
|
||||
throw new Error("Encountered an invalid scope end id. Popped #{poppedScope}, expected to pop #{expectedScope}.")
|
||||
scopes
|
||||
|
||||
indentLevelForRow: (bufferRow) ->
|
||||
line = @buffer.lineForRow(bufferRow)
|
||||
indentLevel = 0
|
||||
@@ -376,7 +401,20 @@ class TokenizedBuffer extends Model
|
||||
0
|
||||
|
||||
scopeDescriptorForPosition: (position) ->
|
||||
new ScopeDescriptor(scopes: @tokenForPosition(position).scopes)
|
||||
{row, column} = Point.fromObject(position)
|
||||
|
||||
iterator = @tokenizedLines[row].getTokenIterator()
|
||||
while iterator.next()
|
||||
if iterator.getScreenEnd() > column
|
||||
scopes = iterator.getScopes()
|
||||
break
|
||||
|
||||
# rebuild scope of last token if we iterated off the end
|
||||
unless scopes?
|
||||
scopes = iterator.getScopes()
|
||||
scopes.push(iterator.getScopeEnds().reverse()...)
|
||||
|
||||
new ScopeDescriptor({scopes})
|
||||
|
||||
tokenForPosition: (position) ->
|
||||
{row, column} = Point.fromObject(position)
|
||||
@@ -388,85 +426,53 @@ class TokenizedBuffer extends Model
|
||||
new Point(row, column)
|
||||
|
||||
bufferRangeForScopeAtPosition: (selector, position) ->
|
||||
selector = new ScopeSelector(selector.replace(/^\./, ''))
|
||||
position = Point.fromObject(position)
|
||||
tokenizedLine = @tokenizedLines[position.row]
|
||||
startIndex = tokenizedLine.tokenIndexAtBufferColumn(position.column)
|
||||
|
||||
for index in [startIndex..0]
|
||||
token = tokenizedLine.tokenAtIndex(index)
|
||||
break unless token.matchesScopeSelector(selector)
|
||||
firstToken = token
|
||||
{openScopes, tags} = @tokenizedLines[position.row]
|
||||
scopes = openScopes.map (tag) -> atom.grammars.scopeForId(tag)
|
||||
|
||||
for index in [startIndex...tokenizedLine.getTokenCount()]
|
||||
token = tokenizedLine.tokenAtIndex(index)
|
||||
break unless token.matchesScopeSelector(selector)
|
||||
lastToken = token
|
||||
startColumn = 0
|
||||
for tag, tokenIndex in tags
|
||||
if tag < 0
|
||||
if tag % 2 is -1
|
||||
scopes.push(atom.grammars.scopeForId(tag))
|
||||
else
|
||||
scopes.pop()
|
||||
else
|
||||
endColumn = startColumn + tag
|
||||
if endColumn > position.column
|
||||
break
|
||||
else
|
||||
startColumn = endColumn
|
||||
|
||||
return unless firstToken? and lastToken?
|
||||
return unless selector.matches(scopes)
|
||||
|
||||
startColumn = tokenizedLine.bufferColumnForToken(firstToken)
|
||||
endColumn = tokenizedLine.bufferColumnForToken(lastToken) + lastToken.bufferDelta
|
||||
new Range([position.row, startColumn], [position.row, endColumn])
|
||||
startScopes = scopes.slice()
|
||||
for startTokenIndex in [(tokenIndex - 1)..0] by -1
|
||||
tag = tags[startTokenIndex]
|
||||
if tag < 0
|
||||
if tag % 2 is -1
|
||||
startScopes.pop()
|
||||
else
|
||||
startScopes.push(atom.grammars.scopeForId(tag))
|
||||
else
|
||||
break unless selector.matches(startScopes)
|
||||
startColumn -= tag
|
||||
|
||||
iterateTokensInBufferRange: (bufferRange, iterator) ->
|
||||
bufferRange = Range.fromObject(bufferRange)
|
||||
{start, end} = bufferRange
|
||||
endScopes = scopes.slice()
|
||||
for endTokenIndex in [(tokenIndex + 1)...tags.length] by 1
|
||||
tag = tags[endTokenIndex]
|
||||
if tag < 0
|
||||
if tag % 2 is -1
|
||||
endScopes.push(atom.grammars.scopeForId(tag))
|
||||
else
|
||||
endScopes.pop()
|
||||
else
|
||||
break unless selector.matches(endScopes)
|
||||
endColumn += tag
|
||||
|
||||
keepLooping = true
|
||||
stop = -> keepLooping = false
|
||||
|
||||
for bufferRow in [start.row..end.row]
|
||||
bufferColumn = 0
|
||||
for token in @tokenizedLines[bufferRow].tokens
|
||||
startOfToken = new Point(bufferRow, bufferColumn)
|
||||
iterator(token, startOfToken, {stop}) if bufferRange.containsPoint(startOfToken)
|
||||
return unless keepLooping
|
||||
bufferColumn += token.bufferDelta
|
||||
|
||||
backwardsIterateTokensInBufferRange: (bufferRange, iterator) ->
|
||||
bufferRange = Range.fromObject(bufferRange)
|
||||
{start, end} = bufferRange
|
||||
|
||||
keepLooping = true
|
||||
stop = -> keepLooping = false
|
||||
|
||||
for bufferRow in [end.row..start.row]
|
||||
bufferColumn = @buffer.lineLengthForRow(bufferRow)
|
||||
for token in new Array(@tokenizedLines[bufferRow].tokens...).reverse()
|
||||
bufferColumn -= token.bufferDelta
|
||||
startOfToken = new Point(bufferRow, bufferColumn)
|
||||
iterator(token, startOfToken, {stop}) if bufferRange.containsPoint(startOfToken)
|
||||
return unless keepLooping
|
||||
|
||||
findOpeningBracket: (startBufferPosition) ->
|
||||
range = [[0,0], startBufferPosition]
|
||||
position = null
|
||||
depth = 0
|
||||
@backwardsIterateTokensInBufferRange range, (token, startPosition, {stop}) ->
|
||||
if token.isBracket()
|
||||
if token.value is '}'
|
||||
depth++
|
||||
else if token.value is '{'
|
||||
depth--
|
||||
if depth is 0
|
||||
position = startPosition
|
||||
stop()
|
||||
position
|
||||
|
||||
findClosingBracket: (startBufferPosition) ->
|
||||
range = [startBufferPosition, @buffer.getEndPosition()]
|
||||
position = null
|
||||
depth = 0
|
||||
@iterateTokensInBufferRange range, (token, startPosition, {stop}) ->
|
||||
if token.isBracket()
|
||||
if token.value is '{'
|
||||
depth++
|
||||
else if token.value is '}'
|
||||
depth--
|
||||
if depth is 0
|
||||
position = startPosition
|
||||
stop()
|
||||
position
|
||||
new Range(new Point(position.row, startColumn), new Point(position.row, endColumn))
|
||||
|
||||
# Gets the row number of the last line.
|
||||
#
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
_ = require 'underscore-plus'
|
||||
{isPairedCharacter} = require './text-utils'
|
||||
Token = require './token'
|
||||
{SoftTab, HardTab, PairedCharacter, SoftWrapIndent} = require './special-token-symbols'
|
||||
|
||||
NonWhitespaceRegex = /\S/
|
||||
LeadingWhitespaceRegex = /^\s*/
|
||||
TrailingWhitespaceRegex = /\s*$/
|
||||
RepeatedSpaceRegex = /[ ]/g
|
||||
CommentScopeRegex = /(\b|\.)comment/
|
||||
idCounter = 1
|
||||
|
||||
module.exports =
|
||||
@@ -14,32 +17,181 @@ class TokenizedLine
|
||||
firstNonWhitespaceIndex: 0
|
||||
foldable: false
|
||||
|
||||
constructor: ({tokens, @lineEnding, @ruleStack, @startBufferColumn, @fold, @tabLength, @indentLevel, @invisibles}) ->
|
||||
@startBufferColumn ?= 0
|
||||
@tokens = @breakOutAtomicTokens(tokens)
|
||||
@text = @buildText()
|
||||
@bufferDelta = @buildBufferDelta()
|
||||
@softWrapIndentationTokens = @getSoftWrapIndentationTokens()
|
||||
@softWrapIndentationDelta = @buildSoftWrapIndentationDelta()
|
||||
|
||||
constructor: (properties) ->
|
||||
@id = idCounter++
|
||||
@markLeadingAndTrailingWhitespaceTokens()
|
||||
if @invisibles
|
||||
@substituteInvisibleCharacters()
|
||||
@buildEndOfLineInvisibles() if @lineEnding?
|
||||
|
||||
buildText: ->
|
||||
text = ""
|
||||
text += token.value for token in @tokens
|
||||
text
|
||||
return unless properties?
|
||||
|
||||
buildBufferDelta: ->
|
||||
delta = 0
|
||||
delta += token.bufferDelta for token in @tokens
|
||||
delta
|
||||
@specialTokens = {}
|
||||
{@openScopes, @text, @tags, @lineEnding, @ruleStack, @tokenIterator} = properties
|
||||
{@startBufferColumn, @fold, @tabLength, @indentLevel, @invisibles} = properties
|
||||
|
||||
@startBufferColumn ?= 0
|
||||
@bufferDelta = @text.length
|
||||
|
||||
@transformContent()
|
||||
@buildEndOfLineInvisibles() if @invisibles? and @lineEnding?
|
||||
|
||||
transformContent: ->
|
||||
text = ''
|
||||
bufferColumn = 0
|
||||
screenColumn = 0
|
||||
tokenIndex = 0
|
||||
tokenOffset = 0
|
||||
firstNonWhitespaceColumn = null
|
||||
lastNonWhitespaceColumn = null
|
||||
|
||||
while bufferColumn < @text.length
|
||||
# advance to next token if we've iterated over its length
|
||||
if tokenOffset is @tags[tokenIndex]
|
||||
tokenIndex++
|
||||
tokenOffset = 0
|
||||
|
||||
# advance to next token tag
|
||||
tokenIndex++ while @tags[tokenIndex] < 0
|
||||
|
||||
character = @text[bufferColumn]
|
||||
|
||||
# split out unicode surrogate pairs
|
||||
if isPairedCharacter(@text, bufferColumn)
|
||||
prefix = tokenOffset
|
||||
suffix = @tags[tokenIndex] - tokenOffset - 2
|
||||
splitTokens = []
|
||||
splitTokens.push(prefix) if prefix > 0
|
||||
splitTokens.push(2)
|
||||
splitTokens.push(suffix) if suffix > 0
|
||||
|
||||
@tags.splice(tokenIndex, 1, splitTokens...)
|
||||
|
||||
firstNonWhitespaceColumn ?= screenColumn
|
||||
lastNonWhitespaceColumn = screenColumn + 1
|
||||
|
||||
text += @text.substr(bufferColumn, 2)
|
||||
screenColumn += 2
|
||||
bufferColumn += 2
|
||||
|
||||
tokenIndex++ if prefix > 0
|
||||
@specialTokens[tokenIndex] = PairedCharacter
|
||||
tokenIndex++
|
||||
tokenOffset = 0
|
||||
|
||||
# split out leading soft tabs
|
||||
else if character is ' '
|
||||
if firstNonWhitespaceColumn?
|
||||
text += ' '
|
||||
else
|
||||
if (screenColumn + 1) % @tabLength is 0
|
||||
@specialTokens[tokenIndex] = SoftTab
|
||||
suffix = @tags[tokenIndex] - @tabLength
|
||||
@tags.splice(tokenIndex, 1, @tabLength)
|
||||
@tags.splice(tokenIndex + 1, 0, suffix) if suffix > 0
|
||||
text += @invisibles?.space ? ' '
|
||||
|
||||
screenColumn++
|
||||
bufferColumn++
|
||||
tokenOffset++
|
||||
|
||||
# expand hard tabs to the next tab stop
|
||||
else if character is '\t'
|
||||
tabLength = @tabLength - (screenColumn % @tabLength)
|
||||
if @invisibles?.tab
|
||||
text += @invisibles.tab
|
||||
else
|
||||
text += ' '
|
||||
text += ' ' for i in [1...tabLength] by 1
|
||||
|
||||
prefix = tokenOffset
|
||||
suffix = @tags[tokenIndex] - tokenOffset - 1
|
||||
splitTokens = []
|
||||
splitTokens.push(prefix) if prefix > 0
|
||||
splitTokens.push(tabLength)
|
||||
splitTokens.push(suffix) if suffix > 0
|
||||
|
||||
@tags.splice(tokenIndex, 1, splitTokens...)
|
||||
|
||||
screenColumn += tabLength
|
||||
bufferColumn++
|
||||
|
||||
tokenIndex++ if prefix > 0
|
||||
@specialTokens[tokenIndex] = HardTab
|
||||
tokenIndex++
|
||||
tokenOffset = 0
|
||||
|
||||
# continue past any other character
|
||||
else
|
||||
firstNonWhitespaceColumn ?= screenColumn
|
||||
lastNonWhitespaceColumn = screenColumn
|
||||
|
||||
text += character
|
||||
screenColumn++
|
||||
bufferColumn++
|
||||
tokenOffset++
|
||||
|
||||
@text = text
|
||||
|
||||
@firstNonWhitespaceIndex = firstNonWhitespaceColumn
|
||||
if lastNonWhitespaceColumn?
|
||||
if lastNonWhitespaceColumn + 1 < @text.length
|
||||
@firstTrailingWhitespaceIndex = lastNonWhitespaceColumn + 1
|
||||
if @invisibles?.space
|
||||
@text =
|
||||
@text.substring(0, @firstTrailingWhitespaceIndex) +
|
||||
@text.substring(@firstTrailingWhitespaceIndex)
|
||||
.replace(RepeatedSpaceRegex, @invisibles.space)
|
||||
else
|
||||
@lineIsWhitespaceOnly = true
|
||||
@firstTrailingWhitespaceIndex = 0
|
||||
|
||||
getTokenIterator: -> @tokenIterator.reset(this)
|
||||
|
||||
Object.defineProperty @prototype, 'tokens', get: ->
|
||||
iterator = @getTokenIterator()
|
||||
tokens = []
|
||||
|
||||
while iterator.next()
|
||||
properties = {
|
||||
value: iterator.getText()
|
||||
scopes: iterator.getScopes().slice()
|
||||
isAtomic: iterator.isAtomic()
|
||||
isHardTab: iterator.isHardTab()
|
||||
hasPairedCharacter: iterator.isPairedCharacter()
|
||||
isSoftWrapIndentation: iterator.isSoftWrapIndentation()
|
||||
}
|
||||
|
||||
if iterator.isHardTab()
|
||||
properties.bufferDelta = 1
|
||||
properties.hasInvisibleCharacters = true if @invisibles?.tab
|
||||
|
||||
if iterator.getScreenStart() < @firstNonWhitespaceIndex
|
||||
properties.firstNonWhitespaceIndex =
|
||||
Math.min(@firstNonWhitespaceIndex, iterator.getScreenEnd()) - iterator.getScreenStart()
|
||||
properties.hasInvisibleCharacters = true if @invisibles?.space
|
||||
|
||||
if @lineEnding? and iterator.getScreenEnd() > @firstTrailingWhitespaceIndex
|
||||
properties.firstTrailingWhitespaceIndex =
|
||||
Math.max(0, @firstTrailingWhitespaceIndex - iterator.getScreenStart())
|
||||
properties.hasInvisibleCharacters = true if @invisibles?.space
|
||||
|
||||
tokens.push(new Token(properties))
|
||||
|
||||
tokens
|
||||
|
||||
copy: ->
|
||||
new TokenizedLine({@tokens, @lineEnding, @ruleStack, @startBufferColumn, @fold})
|
||||
copy = new TokenizedLine
|
||||
copy.tokenIterator = @tokenIterator
|
||||
copy.indentLevel = @indentLevel
|
||||
copy.openScopes = @openScopes
|
||||
copy.text = @text
|
||||
copy.tags = @tags
|
||||
copy.specialTokens = @specialTokens
|
||||
copy.firstNonWhitespaceIndex = @firstNonWhitespaceIndex
|
||||
copy.firstTrailingWhitespaceIndex = @firstTrailingWhitespaceIndex
|
||||
copy.lineEnding = @lineEnding
|
||||
copy.endOfLineInvisibles = @endOfLineInvisibles
|
||||
copy.ruleStack = @ruleStack
|
||||
copy.startBufferColumn = @startBufferColumn
|
||||
copy.fold = @fold
|
||||
copy
|
||||
|
||||
# This clips a given screen column to a valid column that's within the line
|
||||
# and not in the middle of any atomic tokens.
|
||||
@@ -52,49 +204,58 @@ class TokenizedLine
|
||||
#
|
||||
# Returns a {Number} representing the clipped column.
|
||||
clipScreenColumn: (column, options={}) ->
|
||||
return 0 if @tokens.length is 0
|
||||
return 0 if @tags.length is 0
|
||||
|
||||
{clip} = options
|
||||
column = Math.min(column, @getMaxScreenColumn())
|
||||
|
||||
tokenStartColumn = 0
|
||||
for token in @tokens
|
||||
break if tokenStartColumn + token.screenDelta > column
|
||||
tokenStartColumn += token.screenDelta
|
||||
|
||||
if @isColumnInsideSoftWrapIndentation(tokenStartColumn)
|
||||
@softWrapIndentationDelta
|
||||
else if token.isAtomic and tokenStartColumn < column
|
||||
iterator = @getTokenIterator()
|
||||
while iterator.next()
|
||||
break if iterator.getScreenEnd() > column
|
||||
|
||||
if iterator.isSoftWrapIndentation()
|
||||
iterator.next() while iterator.isSoftWrapIndentation()
|
||||
iterator.getScreenStart()
|
||||
else if iterator.isAtomic() and iterator.getScreenStart() < column
|
||||
if clip is 'forward'
|
||||
tokenStartColumn + token.screenDelta
|
||||
iterator.getScreenEnd()
|
||||
else if clip is 'backward'
|
||||
tokenStartColumn
|
||||
iterator.getScreenStart()
|
||||
else #'closest'
|
||||
if column > tokenStartColumn + (token.screenDelta / 2)
|
||||
tokenStartColumn + token.screenDelta
|
||||
if column > ((iterator.getScreenStart() + iterator.getScreenEnd()) / 2)
|
||||
iterator.getScreenEnd()
|
||||
else
|
||||
tokenStartColumn
|
||||
iterator.getScreenStart()
|
||||
else
|
||||
column
|
||||
|
||||
screenColumnForBufferColumn: (bufferColumn, options) ->
|
||||
bufferColumn = bufferColumn - @startBufferColumn
|
||||
screenColumn = 0
|
||||
currentBufferColumn = 0
|
||||
for token in @tokens
|
||||
break if currentBufferColumn + token.bufferDelta > bufferColumn
|
||||
screenColumn += token.screenDelta
|
||||
currentBufferColumn += token.bufferDelta
|
||||
@clipScreenColumn(screenColumn + (bufferColumn - currentBufferColumn))
|
||||
screenColumnForBufferColumn: (targetBufferColumn, options) ->
|
||||
iterator = @getTokenIterator()
|
||||
while iterator.next()
|
||||
tokenBufferStart = iterator.getBufferStart()
|
||||
tokenBufferEnd = iterator.getBufferEnd()
|
||||
if tokenBufferStart <= targetBufferColumn < tokenBufferEnd
|
||||
overshoot = targetBufferColumn - tokenBufferStart
|
||||
return Math.min(
|
||||
iterator.getScreenStart() + overshoot,
|
||||
iterator.getScreenEnd()
|
||||
)
|
||||
iterator.getScreenEnd()
|
||||
|
||||
bufferColumnForScreenColumn: (screenColumn, options) ->
|
||||
bufferColumn = @startBufferColumn
|
||||
currentScreenColumn = 0
|
||||
for token in @tokens
|
||||
break if currentScreenColumn + token.screenDelta > screenColumn
|
||||
bufferColumn += token.bufferDelta
|
||||
currentScreenColumn += token.screenDelta
|
||||
bufferColumn + (screenColumn - currentScreenColumn)
|
||||
bufferColumnForScreenColumn: (targetScreenColumn) ->
|
||||
iterator = @getTokenIterator()
|
||||
while iterator.next()
|
||||
tokenScreenStart = iterator.getScreenStart()
|
||||
tokenScreenEnd = iterator.getScreenEnd()
|
||||
if tokenScreenStart <= targetScreenColumn < tokenScreenEnd
|
||||
overshoot = targetScreenColumn - tokenScreenStart
|
||||
return Math.min(
|
||||
iterator.getBufferStart() + overshoot,
|
||||
iterator.getBufferEnd()
|
||||
)
|
||||
iterator.getBufferEnd()
|
||||
|
||||
getMaxScreenColumn: ->
|
||||
if @fold
|
||||
@@ -128,69 +289,128 @@ class TokenizedLine
|
||||
|
||||
return maxColumn
|
||||
|
||||
buildSoftWrapIndentationTokens: (token, hangingIndent) ->
|
||||
totalIndentSpaces = (@indentLevel * @tabLength) + hangingIndent
|
||||
indentTokens = []
|
||||
while totalIndentSpaces > 0
|
||||
tokenLength = Math.min(@tabLength, totalIndentSpaces)
|
||||
indentToken = token.buildSoftWrapIndentationToken(tokenLength)
|
||||
indentTokens.push(indentToken)
|
||||
totalIndentSpaces -= tokenLength
|
||||
|
||||
indentTokens
|
||||
|
||||
softWrapAt: (column, hangingIndent) ->
|
||||
return [new TokenizedLine([], '', [0, 0], [0, 0]), this] if column is 0
|
||||
return [null, this] if column is 0
|
||||
|
||||
rightTokens = new Array(@tokens...)
|
||||
leftTokens = []
|
||||
leftScreenColumn = 0
|
||||
leftText = @text.substring(0, column)
|
||||
rightText = @text.substring(column)
|
||||
|
||||
while leftScreenColumn < column
|
||||
if leftScreenColumn + rightTokens[0].screenDelta > column
|
||||
rightTokens[0..0] = rightTokens[0].splitAt(column - leftScreenColumn)
|
||||
nextToken = rightTokens.shift()
|
||||
leftScreenColumn += nextToken.screenDelta
|
||||
leftTokens.push nextToken
|
||||
leftTags = []
|
||||
rightTags = []
|
||||
|
||||
indentationTokens = @buildSoftWrapIndentationTokens(leftTokens[0], hangingIndent)
|
||||
leftSpecialTokens = {}
|
||||
rightSpecialTokens = {}
|
||||
|
||||
rightOpenScopes = @openScopes.slice()
|
||||
|
||||
screenColumn = 0
|
||||
|
||||
for tag, index in @tags
|
||||
# tag represents a token
|
||||
if tag >= 0
|
||||
# token ends before the soft wrap column
|
||||
if screenColumn + tag <= column
|
||||
if specialToken = @specialTokens[index]
|
||||
leftSpecialTokens[index] = specialToken
|
||||
leftTags.push(tag)
|
||||
screenColumn += tag
|
||||
|
||||
# token starts before and ends after the split column
|
||||
else if screenColumn <= column
|
||||
leftSuffix = column - screenColumn
|
||||
rightPrefix = screenColumn + tag - column
|
||||
|
||||
leftTags.push(leftSuffix) if leftSuffix > 0
|
||||
|
||||
softWrapIndent = @indentLevel * @tabLength + (hangingIndent ? 0)
|
||||
for i in [0...softWrapIndent] by 1
|
||||
rightText = ' ' + rightText
|
||||
remainingSoftWrapIndent = softWrapIndent
|
||||
while remainingSoftWrapIndent > 0
|
||||
indentToken = Math.min(remainingSoftWrapIndent, @tabLength)
|
||||
rightSpecialTokens[rightTags.length] = SoftWrapIndent
|
||||
rightTags.push(indentToken)
|
||||
remainingSoftWrapIndent -= indentToken
|
||||
|
||||
rightTags.push(rightPrefix) if rightPrefix > 0
|
||||
|
||||
screenColumn += tag
|
||||
|
||||
# token is after split column
|
||||
else
|
||||
if specialToken = @specialTokens[index]
|
||||
rightSpecialTokens[rightTags.length] = specialToken
|
||||
rightTags.push(tag)
|
||||
|
||||
# tag represents the start or end of a scop
|
||||
else if (tag % 2) is -1
|
||||
if screenColumn < column
|
||||
leftTags.push(tag)
|
||||
rightOpenScopes.push(tag)
|
||||
else
|
||||
rightTags.push(tag)
|
||||
else
|
||||
if screenColumn < column
|
||||
leftTags.push(tag)
|
||||
rightOpenScopes.pop()
|
||||
else
|
||||
rightTags.push(tag)
|
||||
|
||||
splitBufferColumn = @bufferColumnForScreenColumn(column)
|
||||
|
||||
leftFragment = new TokenizedLine
|
||||
leftFragment.tokenIterator = @tokenIterator
|
||||
leftFragment.openScopes = @openScopes
|
||||
leftFragment.text = leftText
|
||||
leftFragment.tags = leftTags
|
||||
leftFragment.specialTokens = leftSpecialTokens
|
||||
leftFragment.startBufferColumn = @startBufferColumn
|
||||
leftFragment.bufferDelta = splitBufferColumn - @startBufferColumn
|
||||
leftFragment.ruleStack = @ruleStack
|
||||
leftFragment.invisibles = @invisibles
|
||||
leftFragment.lineEnding = null
|
||||
leftFragment.indentLevel = @indentLevel
|
||||
leftFragment.tabLength = @tabLength
|
||||
leftFragment.firstNonWhitespaceIndex = Math.min(column, @firstNonWhitespaceIndex)
|
||||
leftFragment.firstTrailingWhitespaceIndex = Math.min(column, @firstTrailingWhitespaceIndex)
|
||||
|
||||
rightFragment = new TokenizedLine
|
||||
rightFragment.tokenIterator = @tokenIterator
|
||||
rightFragment.openScopes = rightOpenScopes
|
||||
rightFragment.text = rightText
|
||||
rightFragment.tags = rightTags
|
||||
rightFragment.specialTokens = rightSpecialTokens
|
||||
rightFragment.startBufferColumn = splitBufferColumn
|
||||
rightFragment.bufferDelta = @bufferDelta - splitBufferColumn
|
||||
rightFragment.ruleStack = @ruleStack
|
||||
rightFragment.invisibles = @invisibles
|
||||
rightFragment.lineEnding = @lineEnding
|
||||
rightFragment.indentLevel = @indentLevel
|
||||
rightFragment.tabLength = @tabLength
|
||||
rightFragment.endOfLineInvisibles = @endOfLineInvisibles
|
||||
rightFragment.firstNonWhitespaceIndex = Math.max(softWrapIndent, @firstNonWhitespaceIndex - column + softWrapIndent)
|
||||
rightFragment.firstTrailingWhitespaceIndex = Math.max(softWrapIndent, @firstTrailingWhitespaceIndex - column + softWrapIndent)
|
||||
|
||||
leftFragment = new TokenizedLine(
|
||||
tokens: leftTokens
|
||||
startBufferColumn: @startBufferColumn
|
||||
ruleStack: @ruleStack
|
||||
invisibles: @invisibles
|
||||
lineEnding: null,
|
||||
indentLevel: @indentLevel,
|
||||
tabLength: @tabLength
|
||||
)
|
||||
rightFragment = new TokenizedLine(
|
||||
tokens: indentationTokens.concat(rightTokens)
|
||||
startBufferColumn: @bufferColumnForScreenColumn(column)
|
||||
ruleStack: @ruleStack
|
||||
invisibles: @invisibles
|
||||
lineEnding: @lineEnding,
|
||||
indentLevel: @indentLevel,
|
||||
tabLength: @tabLength
|
||||
)
|
||||
[leftFragment, rightFragment]
|
||||
|
||||
isSoftWrapped: ->
|
||||
@lineEnding is null
|
||||
|
||||
isColumnInsideSoftWrapIndentation: (column) ->
|
||||
return false if @softWrapIndentationTokens.length is 0
|
||||
isColumnInsideSoftWrapIndentation: (targetColumn) ->
|
||||
targetColumn < @getSoftWrapIndentationDelta()
|
||||
|
||||
column < @softWrapIndentationDelta
|
||||
|
||||
getSoftWrapIndentationTokens: ->
|
||||
_.select(@tokens, (token) -> token.isSoftWrapIndentation)
|
||||
|
||||
buildSoftWrapIndentationDelta: ->
|
||||
_.reduce @softWrapIndentationTokens, ((acc, token) -> acc + token.screenDelta), 0
|
||||
getSoftWrapIndentationDelta: ->
|
||||
delta = 0
|
||||
for tag, index in @tags
|
||||
if tag >= 0
|
||||
if @specialTokens[index] is SoftWrapIndent
|
||||
delta += tag
|
||||
else
|
||||
break
|
||||
delta
|
||||
|
||||
hasOnlySoftWrapIndentation: ->
|
||||
@tokens.length is @softWrapIndentationTokens.length
|
||||
@getSoftWrapIndentationDelta() is @text.length
|
||||
|
||||
tokenAtBufferColumn: (bufferColumn) ->
|
||||
@tokens[@tokenIndexAtBufferColumn(bufferColumn)]
|
||||
@@ -210,58 +430,6 @@ class TokenizedLine
|
||||
delta = nextDelta
|
||||
delta
|
||||
|
||||
breakOutAtomicTokens: (inputTokens) ->
|
||||
outputTokens = []
|
||||
breakOutLeadingSoftTabs = true
|
||||
column = @startBufferColumn
|
||||
for token in inputTokens
|
||||
newTokens = token.breakOutAtomicTokens(@tabLength, breakOutLeadingSoftTabs, column)
|
||||
column += newToken.value.length for newToken in newTokens
|
||||
outputTokens.push(newTokens...)
|
||||
breakOutLeadingSoftTabs = token.isOnlyWhitespace() if breakOutLeadingSoftTabs
|
||||
outputTokens
|
||||
|
||||
markLeadingAndTrailingWhitespaceTokens: ->
|
||||
@firstNonWhitespaceIndex = @text.search(NonWhitespaceRegex)
|
||||
if @firstNonWhitespaceIndex > 0 and isPairedCharacter(@text, @firstNonWhitespaceIndex - 1)
|
||||
@firstNonWhitespaceIndex--
|
||||
firstTrailingWhitespaceIndex = @text.search(TrailingWhitespaceRegex)
|
||||
@lineIsWhitespaceOnly = firstTrailingWhitespaceIndex is 0
|
||||
index = 0
|
||||
for token in @tokens
|
||||
if index < @firstNonWhitespaceIndex
|
||||
token.firstNonWhitespaceIndex = Math.min(index + token.value.length, @firstNonWhitespaceIndex - index)
|
||||
# Only the *last* segment of a soft-wrapped line can have trailing whitespace
|
||||
if @lineEnding? and (index + token.value.length > firstTrailingWhitespaceIndex)
|
||||
token.firstTrailingWhitespaceIndex = Math.max(0, firstTrailingWhitespaceIndex - index)
|
||||
index += token.value.length
|
||||
return
|
||||
|
||||
substituteInvisibleCharacters: ->
|
||||
invisibles = @invisibles
|
||||
changedText = false
|
||||
|
||||
for token, i in @tokens
|
||||
if token.isHardTab
|
||||
if invisibles.tab
|
||||
token.value = invisibles.tab + token.value.substring(invisibles.tab.length)
|
||||
token.hasInvisibleCharacters = true
|
||||
changedText = true
|
||||
else
|
||||
if invisibles.space
|
||||
if token.hasLeadingWhitespace() and not token.isSoftWrapIndentation
|
||||
token.value = token.value.replace LeadingWhitespaceRegex, (leadingWhitespace) ->
|
||||
leadingWhitespace.replace RepeatedSpaceRegex, invisibles.space
|
||||
token.hasInvisibleCharacters = true
|
||||
changedText = true
|
||||
if token.hasTrailingWhitespace()
|
||||
token.value = token.value.replace TrailingWhitespaceRegex, (leadingWhitespace) ->
|
||||
leadingWhitespace.replace RepeatedSpaceRegex, invisibles.space
|
||||
token.hasInvisibleCharacters = true
|
||||
changedText = true
|
||||
|
||||
@text = @buildText() if changedText
|
||||
|
||||
buildEndOfLineInvisibles: ->
|
||||
@endOfLineInvisibles = []
|
||||
{cr, eol} = @invisibles
|
||||
@@ -274,11 +442,13 @@ class TokenizedLine
|
||||
@endOfLineInvisibles.push(eol) if eol
|
||||
|
||||
isComment: ->
|
||||
for token in @tokens
|
||||
continue if token.scopes.length is 1
|
||||
continue if token.isOnlyWhitespace()
|
||||
for scope in token.scopes
|
||||
return true if _.contains(scope.split('.'), 'comment')
|
||||
iterator = @getTokenIterator()
|
||||
while iterator.next()
|
||||
scopes = iterator.getScopes()
|
||||
continue if scopes.length is 1
|
||||
continue unless NonWhitespaceRegex.test(iterator.getText())
|
||||
for scope in scopes
|
||||
return true if CommentScopeRegex.test(scope)
|
||||
break
|
||||
false
|
||||
|
||||
@@ -289,42 +459,6 @@ class TokenizedLine
|
||||
@tokens[index]
|
||||
|
||||
getTokenCount: ->
|
||||
@tokens.length
|
||||
|
||||
bufferColumnForToken: (targetToken) ->
|
||||
column = 0
|
||||
for token in @tokens
|
||||
return column if token is targetToken
|
||||
column += token.bufferDelta
|
||||
|
||||
getScopeTree: ->
|
||||
return @scopeTree if @scopeTree?
|
||||
|
||||
scopeStack = []
|
||||
for token in @tokens
|
||||
@updateScopeStack(scopeStack, token.scopes)
|
||||
_.last(scopeStack).children.push(token)
|
||||
|
||||
@scopeTree = scopeStack[0]
|
||||
@updateScopeStack(scopeStack, [])
|
||||
@scopeTree
|
||||
|
||||
updateScopeStack: (scopeStack, desiredScopeDescriptor) ->
|
||||
# Find a common prefix
|
||||
for scope, i in desiredScopeDescriptor
|
||||
break unless scopeStack[i]?.scope is desiredScopeDescriptor[i]
|
||||
|
||||
# Pop scopeDescriptor until we're at the common prefx
|
||||
until scopeStack.length is i
|
||||
poppedScope = scopeStack.pop()
|
||||
_.last(scopeStack)?.children.push(poppedScope)
|
||||
|
||||
# Push onto common prefix until scopeStack equals desiredScopeDescriptor
|
||||
for j in [i...desiredScopeDescriptor.length]
|
||||
scopeStack.push(new Scope(desiredScopeDescriptor[j]))
|
||||
|
||||
return
|
||||
|
||||
class Scope
|
||||
constructor: (@scope) ->
|
||||
@children = []
|
||||
count = 0
|
||||
count++ for tag in @tags when tag >= 0
|
||||
count
|
||||
|
||||
Reference in New Issue
Block a user