From f919bc40dc033c914dcec014597c7b59da1617c5 Mon Sep 17 00:00:00 2001 From: Machiste Quintana Date: Thu, 21 May 2015 08:35:01 -0400 Subject: [PATCH 01/12] Add lsb_release as Linux dependency --- resources/linux/debian/control.in | 1 + resources/linux/redhat/atom.spec.in | 2 ++ 2 files changed, 3 insertions(+) diff --git a/resources/linux/debian/control.in b/resources/linux/debian/control.in index 1578f544b..cf2356a5d 100644 --- a/resources/linux/debian/control.in +++ b/resources/linux/debian/control.in @@ -1,6 +1,7 @@ Package: <%= name %> Version: <%= version %> Depends: git, gconf2, gconf-service, libgtk2.0-0, libudev0 | libudev1, libgcrypt11 | libgcrypt20, libnotify4, libxtst6, libnss3, python, gvfs-bin, xdg-utils +Recommends: lsb-release Suggests: libgnome-keyring0, gir1.2-gnomekeyring-1.0 Section: <%= section %> Priority: optional diff --git a/resources/linux/redhat/atom.spec.in b/resources/linux/redhat/atom.spec.in index 369aeea70..3bc37e83a 100644 --- a/resources/linux/redhat/atom.spec.in +++ b/resources/linux/redhat/atom.spec.in @@ -7,6 +7,8 @@ URL: https://atom.io/ AutoReqProv: no # Avoid libchromiumcontent.so missing dependency Prefix: <%= installDir %> +Requires: redhat-lsb-core + %description <%= description %> From 7cb0bc3bc2aaa0f01b26c8a71f7e5890f8e570d3 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Thu, 21 May 2015 16:25:23 +0200 Subject: [PATCH 02/12] Revert "Merge pull request #6757 from atom/ns-less-memory-for-tokens" This reverts commit 0cd1f110b581e4c29fa4cec432cdbb93ecba2409, reversing changes made to d75d202d337142e206e500c10fed34151b4cc750. Conflicts: package.json --- package.json | 4 +- spec/text-editor-presenter-spec.coffee | 36 +- spec/text-editor-spec.coffee | 3 +- spec/tokenized-buffer-spec.coffee | 28 +- spec/tokenized-line-spec.coffee | 21 + src/display-buffer.coffee | 32 +- src/language-mode.coffee | 5 +- src/lines-component.coffee | 160 ++------ src/special-token-symbols.coffee | 6 - src/text-editor-presenter.coffee | 24 +- src/text-editor.coffee | 3 +- src/token-iterator.coffee | 83 ---- src/token.coffee | 199 +++++++++- src/tokenized-buffer.coffee | 184 +++++---- src/tokenized-line.coffee | 524 +++++++++---------------- 15 files changed, 601 insertions(+), 711 deletions(-) delete mode 100644 src/special-token-symbols.coffee delete mode 100644 src/token-iterator.coffee diff --git a/package.json b/package.json index 7e6619ab8..15eb53b29 100644 --- a/package.json +++ b/package.json @@ -32,7 +32,7 @@ "delegato": "^1", "emissary": "^1.3.3", "event-kit": "^1.2.0", - "first-mate": "^4.1.4", + "first-mate": "^3.1", "fs-plus": "^2.8.0", "fstream": "0.1.24", "fuzzaldrin": "^2.1", @@ -151,7 +151,7 @@ "language-ruby": "0.54.0", "language-ruby-on-rails": "0.21.0", "language-sass": "0.38.0", - "language-shellscript": "0.15.0", + "language-shellscript": "0.14.0", "language-source": "0.9.0", "language-sql": "0.15.0", "language-text": "0.6.0", diff --git a/spec/text-editor-presenter-spec.coffee b/spec/text-editor-presenter-spec.coffee index d15c4759d..7da866ab4 100644 --- a/spec/text-editor-presenter-spec.coffee +++ b/spec/text-editor-presenter-spec.coffee @@ -670,11 +670,7 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 4), { screenRow: 4 text: line4.text - tags: line4.tags - specialTokens: line4.specialTokens - firstNonWhitespaceIndex: line4.firstNonWhitespaceIndex - firstTrailingWhitespaceIndex: line4.firstTrailingWhitespaceIndex - invisibles: line4.invisibles + tokens: line4.tokens top: 10 * 4 } @@ -682,11 +678,7 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 5), { screenRow: 5 text: line5.text - tags: line5.tags - specialTokens: line5.specialTokens - firstNonWhitespaceIndex: line5.firstNonWhitespaceIndex - firstTrailingWhitespaceIndex: line5.firstTrailingWhitespaceIndex - invisibles: line5.invisibles + tokens: line5.tokens top: 10 * 5 } @@ -694,11 +686,7 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 6), { screenRow: 6 text: line6.text - tags: line6.tags - specialTokens: line6.specialTokens - firstNonWhitespaceIndex: line6.firstNonWhitespaceIndex - firstTrailingWhitespaceIndex: line6.firstTrailingWhitespaceIndex - invisibles: line6.invisibles + tokens: line6.tokens top: 10 * 6 } @@ -706,11 +694,7 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 7), { screenRow: 7 text: line7.text - tags: line7.tags - specialTokens: line7.specialTokens - firstNonWhitespaceIndex: line7.firstNonWhitespaceIndex - firstTrailingWhitespaceIndex: line7.firstTrailingWhitespaceIndex - invisibles: line7.invisibles + tokens: line7.tokens top: 10 * 7 } @@ -718,11 +702,7 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 8), { screenRow: 8 text: line8.text - tags: line8.tags - specialTokens: line8.specialTokens - firstNonWhitespaceIndex: line8.firstNonWhitespaceIndex - firstTrailingWhitespaceIndex: line8.firstTrailingWhitespaceIndex - invisibles: line8.invisibles + tokens: line8.tokens top: 10 * 8 } @@ -817,19 +797,19 @@ describe "TextEditorPresenter", -> line1 = editor.tokenizedLineForScreenRow(1) expectValues lineStateForScreenRow(presenter, 1), { text: line1.text - tags: line1.tags + tokens: line1.tokens } line2 = editor.tokenizedLineForScreenRow(2) expectValues lineStateForScreenRow(presenter, 2), { text: line2.text - tags: line2.tags + tokens: line2.tokens } line3 = editor.tokenizedLineForScreenRow(3) expectValues lineStateForScreenRow(presenter, 3), { text: line3.text - tags: line3.tags + tokens: line3.tokens } it "does not remove out-of-view lines corresponding to ::mouseWheelScreenRow until ::stoppedScrollingDelay elapses", -> diff --git a/spec/text-editor-spec.coffee b/spec/text-editor-spec.coffee index a845619ba..d1d311088 100644 --- a/spec/text-editor-spec.coffee +++ b/spec/text-editor-spec.coffee @@ -4110,9 +4110,8 @@ describe "TextEditor", -> runs -> grammar = atom.grammars.selectGrammar("text.js") - {line, tags} = grammar.tokenizeLine("var i; // http://github.com") + {tokens} = grammar.tokenizeLine("var i; // http://github.com") - tokens = atom.grammars.decodeTokens(line, tags) expect(tokens[0].value).toBe "var" expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"] diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index 45cc03a44..9d92335af 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -296,6 +296,14 @@ describe "TokenizedBuffer", -> expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy() expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeTruthy() + describe ".findOpeningBracket(closingBufferPosition)", -> + it "returns the position of the matching bracket, skipping any nested brackets", -> + expect(tokenizedBuffer.findOpeningBracket([9, 2])).toEqual [1, 29] + + describe ".findClosingBracket(startBufferPosition)", -> + it "returns the position of the matching bracket, skipping any nested brackets", -> + expect(tokenizedBuffer.findClosingBracket([1, 29])).toEqual [9, 2] + it "tokenizes leading whitespace based on the new tab length", -> expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].isAtomic).toBeTruthy() expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].value).toBe " " @@ -572,7 +580,7 @@ describe "TokenizedBuffer", -> describe "when the selector matches a run of multiple tokens at the position", -> it "returns the range covered by all contigous tokens (within a single line)", -> - expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.meta.function', [1, 18])).toEqual [[1, 6], [1, 28]] + expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.function', [1, 18])).toEqual [[1, 6], [1, 28]] describe "when the editor.tabLength config value changes", -> it "updates the tab length of the tokenized lines", -> @@ -689,6 +697,22 @@ describe "TokenizedBuffer", -> expect(line.tokens[0].firstNonWhitespaceIndex).toBe 2 expect(line.tokens[line.tokens.length - 1].firstTrailingWhitespaceIndex).toBe 0 + it "sets the ::firstNonWhitespaceIndex and ::firstTrailingWhitespaceIndex correctly when tokens are split for soft-wrapping", -> + atom.config.set("editor.showInvisibles", true) + atom.config.set("editor.invisibles", space: 'S') + buffer.setText(" token ") + fullyTokenize(tokenizedBuffer) + token = tokenizedBuffer.tokenizedLines[0].tokens[0] + + [leftToken, rightToken] = token.splitAt(1) + expect(leftToken.hasInvisibleCharacters).toBe true + expect(leftToken.firstNonWhitespaceIndex).toBe 1 + expect(leftToken.firstTrailingWhitespaceIndex).toBe null + + expect(leftToken.hasInvisibleCharacters).toBe true + expect(rightToken.firstNonWhitespaceIndex).toBe null + expect(rightToken.firstTrailingWhitespaceIndex).toBe 5 + describe ".indentLevel on tokenized lines", -> beforeEach -> buffer = atom.project.bufferForPathSync('sample.js') @@ -728,7 +752,7 @@ describe "TokenizedBuffer", -> it "updates empty line indent guides when the empty line is the last line", -> buffer.insert([12, 2], '\n') - # The newline and the tab need to be in two different operations to surface the bug + # The newline and he tab need to be in two different operations to surface the bug buffer.insert([12, 0], ' ') expect(tokenizedBuffer.tokenizedLineForRow(13).indentLevel).toBe 1 diff --git a/spec/tokenized-line-spec.coffee b/spec/tokenized-line-spec.coffee index 2914ec089..0da83c91c 100644 --- a/spec/tokenized-line-spec.coffee +++ b/spec/tokenized-line-spec.coffee @@ -17,3 +17,24 @@ describe "TokenizedLine", -> it "returns false when the line is not only whitespace", -> expect(editor.tokenizedLineForScreenRow(0).isOnlyWhitespace()).toBe false expect(editor.tokenizedLineForScreenRow(2).isOnlyWhitespace()).toBe false + + describe "::getScopeTree()", -> + it "returns a tree whose inner nodes are scopeDescriptor and whose leaf nodes are tokens in those scopeDescriptor", -> + [tokens, tokenIndex] = [] + + ensureValidScopeTree = (scopeTree, scopeDescriptor=[]) -> + if scopeTree.children? + for child in scopeTree.children + ensureValidScopeTree(child, scopeDescriptor.concat([scopeTree.scope])) + else + expect(scopeTree).toBe tokens[tokenIndex++] + expect(scopeDescriptor).toEqual scopeTree.scopes + + waitsForPromise -> + atom.project.open('coffee.coffee').then (o) -> editor = o + + runs -> + tokenIndex = 0 + tokens = editor.tokenizedLineForScreenRow(1).tokens + scopeTree = editor.tokenizedLineForScreenRow(1).getScopeTree() + ensureValidScopeTree(scopeTree) diff --git a/src/display-buffer.coffee b/src/display-buffer.coffee index b2460addc..26bf43dce 100644 --- a/src/display-buffer.coffee +++ b/src/display-buffer.coffee @@ -2,7 +2,6 @@ _ = require 'underscore-plus' Serializable = require 'serializable' {CompositeDisposable, Emitter} = require 'event-kit' {Point, Range} = require 'text-buffer' -Grim = require 'grim' TokenizedBuffer = require './tokenized-buffer' RowMap = require './row-map' Fold = require './fold' @@ -10,6 +9,7 @@ Model = require './model' Token = require './token' Decoration = require './decoration' Marker = require './marker' +Grim = require 'grim' class BufferToScreenConversionError extends Error constructor: (@message, @metadata) -> @@ -659,19 +659,16 @@ class DisplayBuffer extends Model top = targetRow * @lineHeightInPixels left = 0 column = 0 - - iterator = @tokenizedLineForScreenRow(targetRow).getTokenIterator() - while iterator.next() - charWidths = @getScopedCharWidths(iterator.getScopes()) + for token in @tokenizedLineForScreenRow(targetRow).tokens + charWidths = @getScopedCharWidths(token.scopes) valueIndex = 0 - value = iterator.getText() - while valueIndex < value.length - if iterator.isPairedCharacter() - char = value + while valueIndex < token.value.length + if token.hasPairedCharacter + char = token.value.substr(valueIndex, 2) charLength = 2 valueIndex += 2 else - char = value[valueIndex] + char = token.value[valueIndex] charLength = 1 valueIndex++ @@ -692,19 +689,16 @@ class DisplayBuffer extends Model left = 0 column = 0 - - iterator = @tokenizedLineForScreenRow(row).getTokenIterator() - while iterator.next() - charWidths = @getScopedCharWidths(iterator.getScopes()) - value = iterator.getText() + for token in @tokenizedLineForScreenRow(row).tokens + charWidths = @getScopedCharWidths(token.scopes) valueIndex = 0 - while valueIndex < value.length - if iterator.isPairedCharacter() - char = value + while valueIndex < token.value.length + if token.hasPairedCharacter + char = token.value.substr(valueIndex, 2) charLength = 2 valueIndex += 2 else - char = value[valueIndex] + char = token.value[valueIndex] charLength = 1 valueIndex++ diff --git a/src/language-mode.coffee b/src/language-mode.coffee index c9401550b..b5529a05e 100644 --- a/src/language-mode.coffee +++ b/src/language-mode.coffee @@ -242,9 +242,8 @@ class LanguageMode @suggestedIndentForTokenizedLineAtBufferRow(bufferRow, tokenizedLine, options) suggestedIndentForTokenizedLineAtBufferRow: (bufferRow, tokenizedLine, options) -> - iterator = tokenizedLine.getTokenIterator() - iterator.next() - scopeDescriptor = new ScopeDescriptor(scopes: iterator.getScopes()) + scopes = tokenizedLine.tokens[0].scopes + scopeDescriptor = new ScopeDescriptor({scopes}) currentIndentLevel = @editor.indentationForBufferRow(bufferRow) return currentIndentLevel unless increaseIndentRegex = @increaseIndentRegexForScopeDescriptor(scopeDescriptor) diff --git a/src/lines-component.coffee b/src/lines-component.coffee index 17c904e99..fbec40b79 100644 --- a/src/lines-component.coffee +++ b/src/lines-component.coffee @@ -4,13 +4,10 @@ _ = require 'underscore-plus' CursorsComponent = require './cursors-component' HighlightsComponent = require './highlights-component' -TokenIterator = require './token-iterator' DummyLineNode = $$(-> @div className: 'line', style: 'position: absolute; visibility: hidden;', => @span 'x')[0] AcceptFilter = {acceptNode: -> NodeFilter.FILTER_ACCEPT} WrapperDiv = document.createElement('div') -TokenTextEscapeRegex = /[&"'<>]/g -MaxTokenLength = 20000 cloneObject = (object) -> clone = {} @@ -22,7 +19,6 @@ class LinesComponent placeholderTextDiv: null constructor: ({@presenter, @hostElement, @useShadowDOM, visible}) -> - @tokenIterator = new TokenIterator @measuredLines = new Set @lineNodesByLineId = {} @screenRowsByLineId = {} @@ -171,116 +167,20 @@ class LinesComponent @buildEndOfLineHTML(id) or ' ' buildLineInnerHTML: (id) -> - lineState = @newState.lines[id] - {firstNonWhitespaceIndex, firstTrailingWhitespaceIndex, invisibles} = lineState - lineIsWhitespaceOnly = firstTrailingWhitespaceIndex is 0 - + {indentGuidesVisible} = @newState + {tokens, text, isOnlyWhitespace} = @newState.lines[id] innerHTML = "" - @tokenIterator.reset(lineState) - while @tokenIterator.next() - for scope in @tokenIterator.getScopeEnds() - innerHTML += "" - - for scope in @tokenIterator.getScopeStarts() - innerHTML += "" - - tokenStart = @tokenIterator.getScreenStart() - tokenEnd = @tokenIterator.getScreenEnd() - tokenText = @tokenIterator.getText() - isHardTab = @tokenIterator.isHardTab() - - if hasLeadingWhitespace = tokenStart < firstNonWhitespaceIndex - tokenFirstNonWhitespaceIndex = firstNonWhitespaceIndex - tokenStart - else - tokenFirstNonWhitespaceIndex = null - - if hasTrailingWhitespace = tokenEnd > firstTrailingWhitespaceIndex - tokenFirstTrailingWhitespaceIndex = Math.max(0, firstTrailingWhitespaceIndex - tokenStart) - else - tokenFirstTrailingWhitespaceIndex = null - - hasIndentGuide = - @newState.indentGuidesVisible and - (hasLeadingWhitespace or lineIsWhitespaceOnly) - - hasInvisibleCharacters = - (invisibles?.tab and isHardTab) or - (invisibles?.space and (hasLeadingWhitespace or hasTrailingWhitespace)) - - innerHTML += @buildTokenHTML(tokenText, isHardTab, tokenFirstNonWhitespaceIndex, tokenFirstTrailingWhitespaceIndex, hasIndentGuide, hasInvisibleCharacters) - - for scope in @tokenIterator.getScopeEnds() - innerHTML += "" - - for scope in @tokenIterator.getScopes() - innerHTML += "" + scopeStack = [] + for token in tokens + innerHTML += @updateScopeStack(scopeStack, token.scopes) + hasIndentGuide = indentGuidesVisible and (token.hasLeadingWhitespace() or (token.hasTrailingWhitespace() and isOnlyWhitespace)) + innerHTML += token.getValueAsHtml({hasIndentGuide}) + innerHTML += @popScope(scopeStack) while scopeStack.length > 0 innerHTML += @buildEndOfLineHTML(id) innerHTML - buildTokenHTML: (tokenText, isHardTab, firstNonWhitespaceIndex, firstTrailingWhitespaceIndex, hasIndentGuide, hasInvisibleCharacters) -> - if isHardTab - classes = 'hard-tab' - classes += ' leading-whitespace' if firstNonWhitespaceIndex? - classes += ' trailing-whitespace' if firstTrailingWhitespaceIndex? - classes += ' indent-guide' if hasIndentGuide - classes += ' invisible-character' if hasInvisibleCharacters - return "#{@escapeTokenText(tokenText)}" - else - startIndex = 0 - endIndex = tokenText.length - - leadingHtml = '' - trailingHtml = '' - - if firstNonWhitespaceIndex? - leadingWhitespace = tokenText.substring(0, firstNonWhitespaceIndex) - - classes = 'leading-whitespace' - classes += ' indent-guide' if hasIndentGuide - classes += ' invisible-character' if hasInvisibleCharacters - - leadingHtml = "#{leadingWhitespace}" - startIndex = firstNonWhitespaceIndex - - if firstTrailingWhitespaceIndex? - tokenIsOnlyWhitespace = firstTrailingWhitespaceIndex is 0 - trailingWhitespace = tokenText.substring(firstTrailingWhitespaceIndex) - - classes = 'trailing-whitespace' - classes += ' indent-guide' if hasIndentGuide and not firstNonWhitespaceIndex? and tokenIsOnlyWhitespace - classes += ' invisible-character' if hasInvisibleCharacters - - trailingHtml = "#{trailingWhitespace}" - - endIndex = firstTrailingWhitespaceIndex - - html = leadingHtml - if tokenText.length > MaxTokenLength - while startIndex < endIndex - html += "" + @escapeTokenText(tokenText, startIndex, startIndex + MaxTokenLength) + "" - startIndex += MaxTokenLength - else - html += @escapeTokenText(tokenText, startIndex, endIndex) - - html += trailingHtml - html - - escapeTokenText: (tokenText, startIndex, endIndex) -> - if startIndex? and endIndex? and startIndex > 0 or endIndex < tokenText.length - tokenText = tokenText.slice(startIndex, endIndex) - tokenText.replace(TokenTextEscapeRegex, @escapeTokenTextReplace) - - escapeTokenTextReplace: (match) -> - switch match - when '&' then '&' - when '"' then '"' - when "'" then ''' - when '<' then '<' - when '>' then '>' - else match - buildEndOfLineHTML: (id) -> {endOfLineInvisibles} = @newState.lines[id] @@ -290,6 +190,31 @@ class LinesComponent html += "#{invisible}" html + updateScopeStack: (scopeStack, desiredScopeDescriptor) -> + html = "" + + # Find a common prefix + for scope, i in desiredScopeDescriptor + break unless scopeStack[i] is desiredScopeDescriptor[i] + + # Pop scopeDescriptor until we're at the common prefx + until scopeStack.length is i + html += @popScope(scopeStack) + + # Push onto common prefix until scopeStack equals desiredScopeDescriptor + for j in [i...desiredScopeDescriptor.length] + html += @pushScope(scopeStack, desiredScopeDescriptor[j]) + + html + + popScope: (scopeStack) -> + scopeStack.pop() + "" + + pushScope: (scopeStack, scope) -> + scopeStack.push(scope) + "" + updateLineNode: (id) -> oldLineState = @oldState.lines[id] newLineState = @newState.lines[id] @@ -354,22 +279,19 @@ class LinesComponent iterator = null charIndex = 0 - @tokenIterator.reset(tokenizedLine) - while @tokenIterator.next() - scopes = @tokenIterator.getScopes() - text = @tokenIterator.getText() + for {value, scopes, hasPairedCharacter} in tokenizedLine.tokens charWidths = @presenter.getScopedCharacterWidths(scopes) - textIndex = 0 - while textIndex < text.length - if @tokenIterator.isPairedCharacter() - char = text + valueIndex = 0 + while valueIndex < value.length + if hasPairedCharacter + char = value.substr(valueIndex, 2) charLength = 2 - textIndex += 2 + valueIndex += 2 else - char = text[textIndex] + char = value[valueIndex] charLength = 1 - textIndex++ + valueIndex++ continue if char is '\0' diff --git a/src/special-token-symbols.coffee b/src/special-token-symbols.coffee deleted file mode 100644 index 06884b85f..000000000 --- a/src/special-token-symbols.coffee +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - SoftTab: Symbol('SoftTab') - HardTab: Symbol('HardTab') - PairedCharacter: Symbol('PairedCharacter') - SoftWrapIndent: Symbol('SoftWrapIndent') -} diff --git a/src/text-editor-presenter.coffee b/src/text-editor-presenter.coffee index 3aea57f29..70c26a1a3 100644 --- a/src/text-editor-presenter.coffee +++ b/src/text-editor-presenter.coffee @@ -336,14 +336,9 @@ class TextEditorPresenter @state.content.lines[line.id] = screenRow: row text: line.text - openScopes: line.openScopes - tags: line.tags - specialTokens: line.specialTokens - firstNonWhitespaceIndex: line.firstNonWhitespaceIndex - firstTrailingWhitespaceIndex: line.firstTrailingWhitespaceIndex - invisibles: line.invisibles - endOfLineInvisibles: line.endOfLineInvisibles + tokens: line.tokens isOnlyWhitespace: line.isOnlyWhitespace() + endOfLineInvisibles: line.endOfLineInvisibles indentLevel: line.indentLevel tabLength: line.tabLength fold: line.fold @@ -1011,20 +1006,17 @@ class TextEditorPresenter top = targetRow * @lineHeight left = 0 column = 0 - - iterator = @model.tokenizedLineForScreenRow(targetRow).getTokenIterator() - while iterator.next() - characterWidths = @getScopedCharacterWidths(iterator.getScopes()) + for token in @model.tokenizedLineForScreenRow(targetRow).tokens + characterWidths = @getScopedCharacterWidths(token.scopes) valueIndex = 0 - text = iterator.getText() - while valueIndex < text.length - if iterator.isPairedCharacter() - char = text + while valueIndex < token.value.length + if token.hasPairedCharacter + char = token.value.substr(valueIndex, 2) charLength = 2 valueIndex += 2 else - char = text[valueIndex] + char = token.value[valueIndex] charLength = 1 valueIndex++ diff --git a/src/text-editor.coffee b/src/text-editor.coffee index 4489d82af..d2bd77522 100644 --- a/src/text-editor.coffee +++ b/src/text-editor.coffee @@ -2457,8 +2457,9 @@ class TextEditor extends Model # Extended: Determine if the given row is entirely a comment isBufferRowCommented: (bufferRow) -> if match = @lineTextForBufferRow(bufferRow).match(/\S/) + scopeDescriptor = @tokenForBufferPosition([bufferRow, match.index]).scopes @commentScopeSelector ?= new TextMateScopeSelector('comment.*') - @commentScopeSelector.matches(@scopeDescriptorForBufferPosition([bufferRow, match.index]).scopes) + @commentScopeSelector.matches(scopeDescriptor) logCursorScope: -> scopeDescriptor = @getLastCursor().getScopeDescriptor() diff --git a/src/token-iterator.coffee b/src/token-iterator.coffee deleted file mode 100644 index 202b044ba..000000000 --- a/src/token-iterator.coffee +++ /dev/null @@ -1,83 +0,0 @@ -{SoftTab, HardTab, PairedCharacter, SoftWrapIndent} = require './special-token-symbols' - -module.exports = -class TokenIterator - constructor: (line) -> - @reset(line) if line? - - reset: (@line) -> - @index = null - @bufferStart = @line.startBufferColumn - @bufferEnd = @bufferStart - @screenStart = 0 - @screenEnd = 0 - @scopes = @line.openScopes.map (id) -> atom.grammars.scopeForId(id) - @scopeStarts = @scopes.slice() - @scopeEnds = [] - this - - next: -> - {tags} = @line - - if @index? - @index++ - @scopeEnds.length = 0 - @scopeStarts.length = 0 - @bufferStart = @bufferEnd - @screenStart = @screenEnd - else - @index = 0 - - while @index < tags.length - tag = tags[@index] - if tag < 0 - if tag % 2 is 0 - @scopeEnds.push(atom.grammars.scopeForId(tag + 1)) - @scopes.pop() - else - scope = atom.grammars.scopeForId(tag) - @scopeStarts.push(scope) - @scopes.push(scope) - @index++ - else - if @isHardTab() - @screenEnd = @screenStart + tag - @bufferEnd = @bufferStart + 1 - else if @isSoftWrapIndentation() - @screenEnd = @screenStart + tag - @bufferEnd = @bufferStart + 0 - else - @screenEnd = @screenStart + tag - @bufferEnd = @bufferStart + tag - return true - - false - - getBufferStart: -> @bufferStart - getBufferEnd: -> @bufferEnd - - getScreenStart: -> @screenStart - getScreenEnd: -> @screenEnd - - getScopeStarts: -> @scopeStarts - getScopeEnds: -> @scopeEnds - - getScopes: -> @scopes - - getText: -> - @line.text.substring(@screenStart, @screenEnd) - - isSoftTab: -> - @line.specialTokens[@index] is SoftTab - - isHardTab: -> - @line.specialTokens[@index] is HardTab - - isSoftWrapIndentation: -> - @line.specialTokens[@index] is SoftWrapIndent - - isPairedCharacter: -> - @line.specialTokens[@index] is PairedCharacter - - isAtomic: -> - @isSoftTab() or @isHardTab() or @isSoftWrapIndentation() or @isPairedCharacter() diff --git a/src/token.coffee b/src/token.coffee index 60e8194f8..8aa4a8706 100644 --- a/src/token.coffee +++ b/src/token.coffee @@ -1,8 +1,13 @@ _ = require 'underscore-plus' +textUtils = require './text-utils' +WhitespaceRegexesByTabLength = {} +EscapeRegex = /[&"'<>]/g StartDotRegex = /^\.?/ WhitespaceRegex = /\S/ +MaxTokenLength = 20000 + # Represents a single unit of text as selected by a grammar. module.exports = class Token @@ -15,14 +20,10 @@ class Token firstTrailingWhitespaceIndex: null hasInvisibleCharacters: false - constructor: (properties) -> - {@value, @scopes, @isAtomic, @isHardTab, @bufferDelta} = properties - {@hasInvisibleCharacters, @hasPairedCharacter, @isSoftWrapIndentation} = properties - @firstNonWhitespaceIndex = properties.firstNonWhitespaceIndex ? null - @firstTrailingWhitespaceIndex = properties.firstTrailingWhitespaceIndex ? null - + constructor: ({@value, @scopes, @isAtomic, @bufferDelta, @isHardTab, @hasPairedCharacter, @isSoftWrapIndentation}) -> @screenDelta = @value.length @bufferDelta ?= @screenDelta + @hasPairedCharacter ?= textUtils.hasPairedCharacter(@value) isEqual: (other) -> # TODO: scopes is deprecated. This is here for the sake of lang package tests @@ -31,6 +32,126 @@ class Token isBracket: -> /^meta\.brace\b/.test(_.last(@scopes)) + splitAt: (splitIndex) -> + leftToken = new Token(value: @value.substring(0, splitIndex), scopes: @scopes) + rightToken = new Token(value: @value.substring(splitIndex), scopes: @scopes) + + if @firstNonWhitespaceIndex? + leftToken.firstNonWhitespaceIndex = Math.min(splitIndex, @firstNonWhitespaceIndex) + leftToken.hasInvisibleCharacters = @hasInvisibleCharacters + + if @firstNonWhitespaceIndex > splitIndex + rightToken.firstNonWhitespaceIndex = @firstNonWhitespaceIndex - splitIndex + rightToken.hasInvisibleCharacters = @hasInvisibleCharacters + + if @firstTrailingWhitespaceIndex? + rightToken.firstTrailingWhitespaceIndex = Math.max(0, @firstTrailingWhitespaceIndex - splitIndex) + rightToken.hasInvisibleCharacters = @hasInvisibleCharacters + + if @firstTrailingWhitespaceIndex < splitIndex + leftToken.firstTrailingWhitespaceIndex = @firstTrailingWhitespaceIndex + leftToken.hasInvisibleCharacters = @hasInvisibleCharacters + + [leftToken, rightToken] + + whitespaceRegexForTabLength: (tabLength) -> + WhitespaceRegexesByTabLength[tabLength] ?= new RegExp("([ ]{#{tabLength}})|(\t)|([^\t]+)", "g") + + breakOutAtomicTokens: (tabLength, breakOutLeadingSoftTabs, startColumn) -> + if @hasPairedCharacter + outputTokens = [] + column = startColumn + + for token in @breakOutPairedCharacters() + if token.isAtomic + outputTokens.push(token) + else + outputTokens.push(token.breakOutAtomicTokens(tabLength, breakOutLeadingSoftTabs, column)...) + breakOutLeadingSoftTabs = token.isOnlyWhitespace() if breakOutLeadingSoftTabs + column += token.value.length + + outputTokens + else + return [this] if @isAtomic + + if breakOutLeadingSoftTabs + return [this] unless /^[ ]|\t/.test(@value) + else + return [this] unless /\t/.test(@value) + + outputTokens = [] + regex = @whitespaceRegexForTabLength(tabLength) + column = startColumn + while match = regex.exec(@value) + [fullMatch, softTab, hardTab] = match + token = null + if softTab and breakOutLeadingSoftTabs + token = @buildSoftTabToken(tabLength) + else if hardTab + breakOutLeadingSoftTabs = false + token = @buildHardTabToken(tabLength, column) + else + breakOutLeadingSoftTabs = false + value = match[0] + token = new Token({value, @scopes}) + column += token.value.length + outputTokens.push(token) + + outputTokens + + breakOutPairedCharacters: -> + outputTokens = [] + index = 0 + nonPairStart = 0 + + while index < @value.length + if textUtils.isPairedCharacter(@value, index) + if nonPairStart isnt index + outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes})) + outputTokens.push(@buildPairedCharacterToken(@value, index)) + index += 2 + nonPairStart = index + else + index++ + + if nonPairStart isnt index + outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes})) + + outputTokens + + buildPairedCharacterToken: (value, index) -> + new Token( + value: value[index..index + 1] + scopes: @scopes + isAtomic: true + hasPairedCharacter: true + ) + + buildHardTabToken: (tabLength, column) -> + @buildTabToken(tabLength, true, column) + + buildSoftTabToken: (tabLength) -> + @buildTabToken(tabLength, false, 0) + + buildTabToken: (tabLength, isHardTab, column=0) -> + tabStop = tabLength - (column % tabLength) + new Token( + value: _.multiplyString(" ", tabStop) + scopes: @scopes + bufferDelta: if isHardTab then 1 else tabStop + isAtomic: true + isHardTab: isHardTab + ) + + buildSoftWrapIndentationToken: (length) -> + new Token( + value: _.multiplyString(" ", length), + scopes: @scopes, + bufferDelta: 0, + isAtomic: true, + isSoftWrapIndentation: true + ) + isOnlyWhitespace: -> not WhitespaceRegex.test(@value) @@ -40,6 +161,72 @@ class Token scopeClasses = scope.split('.') _.isSubset(targetClasses, scopeClasses) + getValueAsHtml: ({hasIndentGuide}) -> + if @isHardTab + classes = 'hard-tab' + classes += ' leading-whitespace' if @hasLeadingWhitespace() + classes += ' trailing-whitespace' if @hasTrailingWhitespace() + classes += ' indent-guide' if hasIndentGuide + classes += ' invisible-character' if @hasInvisibleCharacters + html = "#{@escapeString(@value)}" + else + startIndex = 0 + endIndex = @value.length + + leadingHtml = '' + trailingHtml = '' + + if @hasLeadingWhitespace() + leadingWhitespace = @value.substring(0, @firstNonWhitespaceIndex) + + classes = 'leading-whitespace' + classes += ' indent-guide' if hasIndentGuide + classes += ' invisible-character' if @hasInvisibleCharacters + + leadingHtml = "#{leadingWhitespace}" + startIndex = @firstNonWhitespaceIndex + + if @hasTrailingWhitespace() + tokenIsOnlyWhitespace = @firstTrailingWhitespaceIndex is 0 + trailingWhitespace = @value.substring(@firstTrailingWhitespaceIndex) + + classes = 'trailing-whitespace' + classes += ' indent-guide' if hasIndentGuide and not @hasLeadingWhitespace() and tokenIsOnlyWhitespace + classes += ' invisible-character' if @hasInvisibleCharacters + + trailingHtml = "#{trailingWhitespace}" + + endIndex = @firstTrailingWhitespaceIndex + + html = leadingHtml + if @value.length > MaxTokenLength + while startIndex < endIndex + html += "" + @escapeString(@value, startIndex, startIndex + MaxTokenLength) + "" + startIndex += MaxTokenLength + else + html += @escapeString(@value, startIndex, endIndex) + + html += trailingHtml + html + + escapeString: (str, startIndex, endIndex) -> + strLength = str.length + + startIndex ?= 0 + endIndex ?= strLength + + str = str.slice(startIndex, endIndex) if startIndex > 0 or endIndex < strLength + str.replace(EscapeRegex, @escapeStringReplace) + + escapeStringReplace: (match) -> + switch match + when '&' then '&' + when '"' then '"' + when "'" then ''' + when '<' then '<' + when '>' then '>' + else match + hasLeadingWhitespace: -> @firstNonWhitespaceIndex? and @firstNonWhitespaceIndex > 0 diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index 60ebe16f0..6d8f0c018 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -1,11 +1,9 @@ _ = require 'underscore-plus' {CompositeDisposable, Emitter} = require 'event-kit' {Point, Range} = require 'text-buffer' -{ScopeSelector} = require 'first-mate' Serializable = require 'serializable' Model = require './model' TokenizedLine = require './tokenized-line' -TokenIterator = require './token-iterator' Token = require './token' ScopeDescriptor = require './scope-descriptor' Grim = require 'grim' @@ -27,7 +25,6 @@ class TokenizedBuffer extends Model constructor: ({@buffer, @tabLength, @ignoreInvisibles}) -> @emitter = new Emitter @disposables = new CompositeDisposable - @tokenIterator = new TokenIterator @disposables.add atom.grammars.onDidAddGrammar(@grammarAddedOrUpdated) @disposables.add atom.grammars.onDidUpdateGrammar(@grammarAddedOrUpdated) @@ -170,7 +167,7 @@ class TokenizedBuffer extends Model row = startRow loop previousStack = @stackForRow(row) - @tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1), @openScopesForRow(row)) + @tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1)) if --rowsRemaining is 0 filledRegion = false endRow = row @@ -230,7 +227,7 @@ class TokenizedBuffer extends Model @updateInvalidRows(start, end, delta) previousEndStack = @stackForRow(end) # used in spill detection below - newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start)) + newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1)) _.spliceWithArray(@tokenizedLines, start, end - start + 1, newTokenizedLines) start = @retokenizeWhitespaceRowsIfIndentLevelChanged(start - 1, -1) @@ -251,7 +248,7 @@ class TokenizedBuffer extends Model line = @tokenizedLines[row] if line?.isOnlyWhitespace() and @indentLevelForRow(row) isnt line.indentLevel while line?.isOnlyWhitespace() - @tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1), @openScopesForRow(row)) + @tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1)) row += increment line = @tokenizedLines[row] @@ -293,18 +290,16 @@ class TokenizedBuffer extends Model @tokenizedLineForRow(row).isComment() and @tokenizedLineForRow(nextRow).isComment() - buildTokenizedLinesForRows: (startRow, endRow, startingStack, startingopenScopes) -> + buildTokenizedLinesForRows: (startRow, endRow, startingStack) -> ruleStack = startingStack - openScopes = startingopenScopes stopTokenizingAt = startRow + @chunkSize tokenizedLines = for row in [startRow..endRow] if (ruleStack or row is 0) and row < stopTokenizingAt - tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes) - ruleStack = tokenizedLine.ruleStack - openScopes = @scopesFromTags(openScopes, tokenizedLine.tags) + screenLine = @buildTokenizedLineForRow(row, ruleStack) + ruleStack = screenLine.ruleStack else - tokenizedLine = @buildPlaceholderTokenizedLineForRow(row, openScopes) - tokenizedLine + screenLine = @buildPlaceholderTokenizedLineForRow(row) + screenLine if endRow >= stopTokenizingAt @invalidateRow(stopTokenizingAt) @@ -316,23 +311,22 @@ class TokenizedBuffer extends Model @buildPlaceholderTokenizedLineForRow(row) for row in [startRow..endRow] buildPlaceholderTokenizedLineForRow: (row) -> - openScopes = [@grammar.startIdForScope(@grammar.scopeName)] - text = @buffer.lineForRow(row) - tags = [text.length] + line = @buffer.lineForRow(row) + tokens = [new Token(value: line, scopes: [@grammar.scopeName])] tabLength = @getTabLength() indentLevel = @indentLevelForRow(row) lineEnding = @buffer.lineEndingForRow(row) - new TokenizedLine({openScopes, text, tags, tabLength, indentLevel, invisibles: @getInvisiblesToShow(), lineEnding, @tokenIterator}) + new TokenizedLine({tokens, tabLength, indentLevel, invisibles: @getInvisiblesToShow(), lineEnding}) - buildTokenizedLineForRow: (row, ruleStack, openScopes) -> - @buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes) + buildTokenizedLineForRow: (row, ruleStack) -> + @buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack) - buildTokenizedLineForRowWithText: (row, text, ruleStack = @stackForRow(row - 1), openScopes = @openScopesForRow(row)) -> + buildTokenizedLineForRowWithText: (row, line, ruleStack = @stackForRow(row - 1)) -> lineEnding = @buffer.lineEndingForRow(row) tabLength = @getTabLength() indentLevel = @indentLevelForRow(row) - {tags, ruleStack} = @grammar.tokenizeLine(text, ruleStack, row is 0, false) - new TokenizedLine({openScopes, text, tags, ruleStack, tabLength, lineEnding, indentLevel, invisibles: @getInvisiblesToShow(), @tokenIterator}) + {tokens, ruleStack} = @grammar.tokenizeLine(line, ruleStack, row is 0) + new TokenizedLine({tokens, ruleStack, tabLength, lineEnding, indentLevel, invisibles: @getInvisiblesToShow()}) getInvisiblesToShow: -> if @configSettings.showInvisibles and not @ignoreInvisibles @@ -346,25 +340,6 @@ class TokenizedBuffer extends Model stackForRow: (bufferRow) -> @tokenizedLines[bufferRow]?.ruleStack - openScopesForRow: (bufferRow) -> - if bufferRow > 0 - precedingLine = @tokenizedLines[bufferRow - 1] - @scopesFromTags(precedingLine.openScopes, precedingLine.tags) - else - [] - - scopesFromTags: (startingScopes, tags) -> - scopes = startingScopes.slice() - for tag in tags when tag < 0 - if (tag % 2) is -1 - scopes.push(tag) - else - expectedScope = tag + 1 - poppedScope = scopes.pop() - unless poppedScope is expectedScope - throw new Error("Encountered an invalid scope end id. Popped #{poppedScope}, expected to pop #{expectedScope}.") - scopes - indentLevelForRow: (bufferRow) -> line = @buffer.lineForRow(bufferRow) indentLevel = 0 @@ -401,20 +376,7 @@ class TokenizedBuffer extends Model 0 scopeDescriptorForPosition: (position) -> - {row, column} = Point.fromObject(position) - - iterator = @tokenizedLines[row].getTokenIterator() - while iterator.next() - if iterator.getScreenEnd() > column - scopes = iterator.getScopes() - break - - # rebuild scope of last token if we iterated off the end - unless scopes? - scopes = iterator.getScopes() - scopes.push(iterator.getScopeEnds().reverse()...) - - new ScopeDescriptor({scopes}) + new ScopeDescriptor(scopes: @tokenForPosition(position).scopes) tokenForPosition: (position) -> {row, column} = Point.fromObject(position) @@ -426,53 +388,85 @@ class TokenizedBuffer extends Model new Point(row, column) bufferRangeForScopeAtPosition: (selector, position) -> - selector = new ScopeSelector(selector.replace(/^\./, '')) position = Point.fromObject(position) + tokenizedLine = @tokenizedLines[position.row] + startIndex = tokenizedLine.tokenIndexAtBufferColumn(position.column) - {openScopes, tags} = @tokenizedLines[position.row] - scopes = openScopes.map (tag) -> atom.grammars.scopeForId(tag) + for index in [startIndex..0] + token = tokenizedLine.tokenAtIndex(index) + break unless token.matchesScopeSelector(selector) + firstToken = token - startColumn = 0 - for tag, tokenIndex in tags - if tag < 0 - if tag % 2 is -1 - scopes.push(atom.grammars.scopeForId(tag)) - else - scopes.pop() - else - endColumn = startColumn + tag - if endColumn > position.column - break - else - startColumn = endColumn + for index in [startIndex...tokenizedLine.getTokenCount()] + token = tokenizedLine.tokenAtIndex(index) + break unless token.matchesScopeSelector(selector) + lastToken = token - return unless selector.matches(scopes) + return unless firstToken? and lastToken? - startScopes = scopes.slice() - for startTokenIndex in [(tokenIndex - 1)..0] by -1 - tag = tags[startTokenIndex] - if tag < 0 - if tag % 2 is -1 - startScopes.pop() - else - startScopes.push(atom.grammars.scopeForId(tag)) - else - break unless selector.matches(startScopes) - startColumn -= tag + startColumn = tokenizedLine.bufferColumnForToken(firstToken) + endColumn = tokenizedLine.bufferColumnForToken(lastToken) + lastToken.bufferDelta + new Range([position.row, startColumn], [position.row, endColumn]) - endScopes = scopes.slice() - for endTokenIndex in [(tokenIndex + 1)...tags.length] by 1 - tag = tags[endTokenIndex] - if tag < 0 - if tag % 2 is -1 - endScopes.push(atom.grammars.scopeForId(tag)) - else - endScopes.pop() - else - break unless selector.matches(endScopes) - endColumn += tag + iterateTokensInBufferRange: (bufferRange, iterator) -> + bufferRange = Range.fromObject(bufferRange) + {start, end} = bufferRange - new Range(new Point(position.row, startColumn), new Point(position.row, endColumn)) + keepLooping = true + stop = -> keepLooping = false + + for bufferRow in [start.row..end.row] + bufferColumn = 0 + for token in @tokenizedLines[bufferRow].tokens + startOfToken = new Point(bufferRow, bufferColumn) + iterator(token, startOfToken, {stop}) if bufferRange.containsPoint(startOfToken) + return unless keepLooping + bufferColumn += token.bufferDelta + + backwardsIterateTokensInBufferRange: (bufferRange, iterator) -> + bufferRange = Range.fromObject(bufferRange) + {start, end} = bufferRange + + keepLooping = true + stop = -> keepLooping = false + + for bufferRow in [end.row..start.row] + bufferColumn = @buffer.lineLengthForRow(bufferRow) + for token in new Array(@tokenizedLines[bufferRow].tokens...).reverse() + bufferColumn -= token.bufferDelta + startOfToken = new Point(bufferRow, bufferColumn) + iterator(token, startOfToken, {stop}) if bufferRange.containsPoint(startOfToken) + return unless keepLooping + + findOpeningBracket: (startBufferPosition) -> + range = [[0,0], startBufferPosition] + position = null + depth = 0 + @backwardsIterateTokensInBufferRange range, (token, startPosition, {stop}) -> + if token.isBracket() + if token.value is '}' + depth++ + else if token.value is '{' + depth-- + if depth is 0 + position = startPosition + stop() + position + + findClosingBracket: (startBufferPosition) -> + range = [startBufferPosition, @buffer.getEndPosition()] + position = null + depth = 0 + @iterateTokensInBufferRange range, (token, startPosition, {stop}) -> + if token.isBracket() + if token.value is '{' + depth++ + else if token.value is '}' + depth-- + if depth is 0 + position = startPosition + stop() + position # Gets the row number of the last line. # diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee index 45af81e57..b81d972a0 100644 --- a/src/tokenized-line.coffee +++ b/src/tokenized-line.coffee @@ -1,13 +1,10 @@ _ = require 'underscore-plus' {isPairedCharacter} = require './text-utils' -Token = require './token' -{SoftTab, HardTab, PairedCharacter, SoftWrapIndent} = require './special-token-symbols' NonWhitespaceRegex = /\S/ LeadingWhitespaceRegex = /^\s*/ TrailingWhitespaceRegex = /\s*$/ RepeatedSpaceRegex = /[ ]/g -CommentScopeRegex = /(\b|\.)comment/ idCounter = 1 module.exports = @@ -17,181 +14,32 @@ class TokenizedLine firstNonWhitespaceIndex: 0 foldable: false - constructor: (properties) -> - @id = idCounter++ - - return unless properties? - - @specialTokens = {} - {@openScopes, @text, @tags, @lineEnding, @ruleStack, @tokenIterator} = properties - {@startBufferColumn, @fold, @tabLength, @indentLevel, @invisibles} = properties - + constructor: ({tokens, @lineEnding, @ruleStack, @startBufferColumn, @fold, @tabLength, @indentLevel, @invisibles}) -> @startBufferColumn ?= 0 - @bufferDelta = @text.length + @tokens = @breakOutAtomicTokens(tokens) + @text = @buildText() + @bufferDelta = @buildBufferDelta() + @softWrapIndentationTokens = @getSoftWrapIndentationTokens() + @softWrapIndentationDelta = @buildSoftWrapIndentationDelta() - @transformContent() - @buildEndOfLineInvisibles() if @invisibles? and @lineEnding? + @id = idCounter++ + @markLeadingAndTrailingWhitespaceTokens() + if @invisibles + @substituteInvisibleCharacters() + @buildEndOfLineInvisibles() if @lineEnding? - transformContent: -> - text = '' - bufferColumn = 0 - screenColumn = 0 - tokenIndex = 0 - tokenOffset = 0 - firstNonWhitespaceColumn = null - lastNonWhitespaceColumn = null + buildText: -> + text = "" + text += token.value for token in @tokens + text - while bufferColumn < @text.length - # advance to next token if we've iterated over its length - if tokenOffset is @tags[tokenIndex] - tokenIndex++ - tokenOffset = 0 - - # advance to next token tag - tokenIndex++ while @tags[tokenIndex] < 0 - - character = @text[bufferColumn] - - # split out unicode surrogate pairs - if isPairedCharacter(@text, bufferColumn) - prefix = tokenOffset - suffix = @tags[tokenIndex] - tokenOffset - 2 - splitTokens = [] - splitTokens.push(prefix) if prefix > 0 - splitTokens.push(2) - splitTokens.push(suffix) if suffix > 0 - - @tags.splice(tokenIndex, 1, splitTokens...) - - firstNonWhitespaceColumn ?= screenColumn - lastNonWhitespaceColumn = screenColumn + 1 - - text += @text.substr(bufferColumn, 2) - screenColumn += 2 - bufferColumn += 2 - - tokenIndex++ if prefix > 0 - @specialTokens[tokenIndex] = PairedCharacter - tokenIndex++ - tokenOffset = 0 - - # split out leading soft tabs - else if character is ' ' - if firstNonWhitespaceColumn? - text += ' ' - else - if (screenColumn + 1) % @tabLength is 0 - @specialTokens[tokenIndex] = SoftTab - suffix = @tags[tokenIndex] - @tabLength - @tags.splice(tokenIndex, 1, @tabLength) - @tags.splice(tokenIndex + 1, 0, suffix) if suffix > 0 - text += @invisibles?.space ? ' ' - - screenColumn++ - bufferColumn++ - tokenOffset++ - - # expand hard tabs to the next tab stop - else if character is '\t' - tabLength = @tabLength - (screenColumn % @tabLength) - if @invisibles?.tab - text += @invisibles.tab - else - text += ' ' - text += ' ' for i in [1...tabLength] by 1 - - prefix = tokenOffset - suffix = @tags[tokenIndex] - tokenOffset - 1 - splitTokens = [] - splitTokens.push(prefix) if prefix > 0 - splitTokens.push(tabLength) - splitTokens.push(suffix) if suffix > 0 - - @tags.splice(tokenIndex, 1, splitTokens...) - - screenColumn += tabLength - bufferColumn++ - - tokenIndex++ if prefix > 0 - @specialTokens[tokenIndex] = HardTab - tokenIndex++ - tokenOffset = 0 - - # continue past any other character - else - firstNonWhitespaceColumn ?= screenColumn - lastNonWhitespaceColumn = screenColumn - - text += character - screenColumn++ - bufferColumn++ - tokenOffset++ - - @text = text - - @firstNonWhitespaceIndex = firstNonWhitespaceColumn - if lastNonWhitespaceColumn? - if lastNonWhitespaceColumn + 1 < @text.length - @firstTrailingWhitespaceIndex = lastNonWhitespaceColumn + 1 - if @invisibles?.space - @text = - @text.substring(0, @firstTrailingWhitespaceIndex) + - @text.substring(@firstTrailingWhitespaceIndex) - .replace(RepeatedSpaceRegex, @invisibles.space) - else - @lineIsWhitespaceOnly = true - @firstTrailingWhitespaceIndex = 0 - - getTokenIterator: -> @tokenIterator.reset(this) - - Object.defineProperty @prototype, 'tokens', get: -> - iterator = @getTokenIterator() - tokens = [] - - while iterator.next() - properties = { - value: iterator.getText() - scopes: iterator.getScopes().slice() - isAtomic: iterator.isAtomic() - isHardTab: iterator.isHardTab() - hasPairedCharacter: iterator.isPairedCharacter() - isSoftWrapIndentation: iterator.isSoftWrapIndentation() - } - - if iterator.isHardTab() - properties.bufferDelta = 1 - properties.hasInvisibleCharacters = true if @invisibles?.tab - - if iterator.getScreenStart() < @firstNonWhitespaceIndex - properties.firstNonWhitespaceIndex = - Math.min(@firstNonWhitespaceIndex, iterator.getScreenEnd()) - iterator.getScreenStart() - properties.hasInvisibleCharacters = true if @invisibles?.space - - if @lineEnding? and iterator.getScreenEnd() > @firstTrailingWhitespaceIndex - properties.firstTrailingWhitespaceIndex = - Math.max(0, @firstTrailingWhitespaceIndex - iterator.getScreenStart()) - properties.hasInvisibleCharacters = true if @invisibles?.space - - tokens.push(new Token(properties)) - - tokens + buildBufferDelta: -> + delta = 0 + delta += token.bufferDelta for token in @tokens + delta copy: -> - copy = new TokenizedLine - copy.tokenIterator = @tokenIterator - copy.indentLevel = @indentLevel - copy.openScopes = @openScopes - copy.text = @text - copy.tags = @tags - copy.specialTokens = @specialTokens - copy.firstNonWhitespaceIndex = @firstNonWhitespaceIndex - copy.firstTrailingWhitespaceIndex = @firstTrailingWhitespaceIndex - copy.lineEnding = @lineEnding - copy.endOfLineInvisibles = @endOfLineInvisibles - copy.ruleStack = @ruleStack - copy.startBufferColumn = @startBufferColumn - copy.fold = @fold - copy + new TokenizedLine({@tokens, @lineEnding, @ruleStack, @startBufferColumn, @fold}) # This clips a given screen column to a valid column that's within the line # and not in the middle of any atomic tokens. @@ -204,58 +52,49 @@ class TokenizedLine # # Returns a {Number} representing the clipped column. clipScreenColumn: (column, options={}) -> - return 0 if @tags.length is 0 + return 0 if @tokens.length is 0 {clip} = options column = Math.min(column, @getMaxScreenColumn()) tokenStartColumn = 0 + for token in @tokens + break if tokenStartColumn + token.screenDelta > column + tokenStartColumn += token.screenDelta - iterator = @getTokenIterator() - while iterator.next() - break if iterator.getScreenEnd() > column - - if iterator.isSoftWrapIndentation() - iterator.next() while iterator.isSoftWrapIndentation() - iterator.getScreenStart() - else if iterator.isAtomic() and iterator.getScreenStart() < column + if @isColumnInsideSoftWrapIndentation(tokenStartColumn) + @softWrapIndentationDelta + else if token.isAtomic and tokenStartColumn < column if clip is 'forward' - iterator.getScreenEnd() + tokenStartColumn + token.screenDelta else if clip is 'backward' - iterator.getScreenStart() + tokenStartColumn else #'closest' - if column > ((iterator.getScreenStart() + iterator.getScreenEnd()) / 2) - iterator.getScreenEnd() + if column > tokenStartColumn + (token.screenDelta / 2) + tokenStartColumn + token.screenDelta else - iterator.getScreenStart() + tokenStartColumn else column - screenColumnForBufferColumn: (targetBufferColumn, options) -> - iterator = @getTokenIterator() - while iterator.next() - tokenBufferStart = iterator.getBufferStart() - tokenBufferEnd = iterator.getBufferEnd() - if tokenBufferStart <= targetBufferColumn < tokenBufferEnd - overshoot = targetBufferColumn - tokenBufferStart - return Math.min( - iterator.getScreenStart() + overshoot, - iterator.getScreenEnd() - ) - iterator.getScreenEnd() + screenColumnForBufferColumn: (bufferColumn, options) -> + bufferColumn = bufferColumn - @startBufferColumn + screenColumn = 0 + currentBufferColumn = 0 + for token in @tokens + break if currentBufferColumn + token.bufferDelta > bufferColumn + screenColumn += token.screenDelta + currentBufferColumn += token.bufferDelta + @clipScreenColumn(screenColumn + (bufferColumn - currentBufferColumn)) - bufferColumnForScreenColumn: (targetScreenColumn) -> - iterator = @getTokenIterator() - while iterator.next() - tokenScreenStart = iterator.getScreenStart() - tokenScreenEnd = iterator.getScreenEnd() - if tokenScreenStart <= targetScreenColumn < tokenScreenEnd - overshoot = targetScreenColumn - tokenScreenStart - return Math.min( - iterator.getBufferStart() + overshoot, - iterator.getBufferEnd() - ) - iterator.getBufferEnd() + bufferColumnForScreenColumn: (screenColumn, options) -> + bufferColumn = @startBufferColumn + currentScreenColumn = 0 + for token in @tokens + break if currentScreenColumn + token.screenDelta > screenColumn + bufferColumn += token.bufferDelta + currentScreenColumn += token.screenDelta + bufferColumn + (screenColumn - currentScreenColumn) getMaxScreenColumn: -> if @fold @@ -289,128 +128,69 @@ class TokenizedLine return maxColumn + buildSoftWrapIndentationTokens: (token, hangingIndent) -> + totalIndentSpaces = (@indentLevel * @tabLength) + hangingIndent + indentTokens = [] + while totalIndentSpaces > 0 + tokenLength = Math.min(@tabLength, totalIndentSpaces) + indentToken = token.buildSoftWrapIndentationToken(tokenLength) + indentTokens.push(indentToken) + totalIndentSpaces -= tokenLength + + indentTokens + softWrapAt: (column, hangingIndent) -> - return [null, this] if column is 0 + return [new TokenizedLine([], '', [0, 0], [0, 0]), this] if column is 0 - leftText = @text.substring(0, column) - rightText = @text.substring(column) + rightTokens = new Array(@tokens...) + leftTokens = [] + leftScreenColumn = 0 - leftTags = [] - rightTags = [] + while leftScreenColumn < column + if leftScreenColumn + rightTokens[0].screenDelta > column + rightTokens[0..0] = rightTokens[0].splitAt(column - leftScreenColumn) + nextToken = rightTokens.shift() + leftScreenColumn += nextToken.screenDelta + leftTokens.push nextToken - leftSpecialTokens = {} - rightSpecialTokens = {} - - rightOpenScopes = @openScopes.slice() - - screenColumn = 0 - - for tag, index in @tags - # tag represents a token - if tag >= 0 - # token ends before the soft wrap column - if screenColumn + tag <= column - if specialToken = @specialTokens[index] - leftSpecialTokens[index] = specialToken - leftTags.push(tag) - screenColumn += tag - - # token starts before and ends after the split column - else if screenColumn <= column - leftSuffix = column - screenColumn - rightPrefix = screenColumn + tag - column - - leftTags.push(leftSuffix) if leftSuffix > 0 - - softWrapIndent = @indentLevel * @tabLength + (hangingIndent ? 0) - for i in [0...softWrapIndent] by 1 - rightText = ' ' + rightText - remainingSoftWrapIndent = softWrapIndent - while remainingSoftWrapIndent > 0 - indentToken = Math.min(remainingSoftWrapIndent, @tabLength) - rightSpecialTokens[rightTags.length] = SoftWrapIndent - rightTags.push(indentToken) - remainingSoftWrapIndent -= indentToken - - rightTags.push(rightPrefix) if rightPrefix > 0 - - screenColumn += tag - - # token is after split column - else - if specialToken = @specialTokens[index] - rightSpecialTokens[rightTags.length] = specialToken - rightTags.push(tag) - - # tag represents the start or end of a scop - else if (tag % 2) is -1 - if screenColumn < column - leftTags.push(tag) - rightOpenScopes.push(tag) - else - rightTags.push(tag) - else - if screenColumn < column - leftTags.push(tag) - rightOpenScopes.pop() - else - rightTags.push(tag) - - splitBufferColumn = @bufferColumnForScreenColumn(column) - - leftFragment = new TokenizedLine - leftFragment.tokenIterator = @tokenIterator - leftFragment.openScopes = @openScopes - leftFragment.text = leftText - leftFragment.tags = leftTags - leftFragment.specialTokens = leftSpecialTokens - leftFragment.startBufferColumn = @startBufferColumn - leftFragment.bufferDelta = splitBufferColumn - @startBufferColumn - leftFragment.ruleStack = @ruleStack - leftFragment.invisibles = @invisibles - leftFragment.lineEnding = null - leftFragment.indentLevel = @indentLevel - leftFragment.tabLength = @tabLength - leftFragment.firstNonWhitespaceIndex = Math.min(column, @firstNonWhitespaceIndex) - leftFragment.firstTrailingWhitespaceIndex = Math.min(column, @firstTrailingWhitespaceIndex) - - rightFragment = new TokenizedLine - rightFragment.tokenIterator = @tokenIterator - rightFragment.openScopes = rightOpenScopes - rightFragment.text = rightText - rightFragment.tags = rightTags - rightFragment.specialTokens = rightSpecialTokens - rightFragment.startBufferColumn = splitBufferColumn - rightFragment.bufferDelta = @bufferDelta - splitBufferColumn - rightFragment.ruleStack = @ruleStack - rightFragment.invisibles = @invisibles - rightFragment.lineEnding = @lineEnding - rightFragment.indentLevel = @indentLevel - rightFragment.tabLength = @tabLength - rightFragment.endOfLineInvisibles = @endOfLineInvisibles - rightFragment.firstNonWhitespaceIndex = Math.max(softWrapIndent, @firstNonWhitespaceIndex - column + softWrapIndent) - rightFragment.firstTrailingWhitespaceIndex = Math.max(softWrapIndent, @firstTrailingWhitespaceIndex - column + softWrapIndent) + indentationTokens = @buildSoftWrapIndentationTokens(leftTokens[0], hangingIndent) + leftFragment = new TokenizedLine( + tokens: leftTokens + startBufferColumn: @startBufferColumn + ruleStack: @ruleStack + invisibles: @invisibles + lineEnding: null, + indentLevel: @indentLevel, + tabLength: @tabLength + ) + rightFragment = new TokenizedLine( + tokens: indentationTokens.concat(rightTokens) + startBufferColumn: @bufferColumnForScreenColumn(column) + ruleStack: @ruleStack + invisibles: @invisibles + lineEnding: @lineEnding, + indentLevel: @indentLevel, + tabLength: @tabLength + ) [leftFragment, rightFragment] isSoftWrapped: -> @lineEnding is null - isColumnInsideSoftWrapIndentation: (targetColumn) -> - targetColumn < @getSoftWrapIndentationDelta() + isColumnInsideSoftWrapIndentation: (column) -> + return false if @softWrapIndentationTokens.length is 0 - getSoftWrapIndentationDelta: -> - delta = 0 - for tag, index in @tags - if tag >= 0 - if @specialTokens[index] is SoftWrapIndent - delta += tag - else - break - delta + column < @softWrapIndentationDelta + + getSoftWrapIndentationTokens: -> + _.select(@tokens, (token) -> token.isSoftWrapIndentation) + + buildSoftWrapIndentationDelta: -> + _.reduce @softWrapIndentationTokens, ((acc, token) -> acc + token.screenDelta), 0 hasOnlySoftWrapIndentation: -> - @getSoftWrapIndentationDelta() is @text.length + @tokens.length is @softWrapIndentationTokens.length tokenAtBufferColumn: (bufferColumn) -> @tokens[@tokenIndexAtBufferColumn(bufferColumn)] @@ -430,6 +210,58 @@ class TokenizedLine delta = nextDelta delta + breakOutAtomicTokens: (inputTokens) -> + outputTokens = [] + breakOutLeadingSoftTabs = true + column = @startBufferColumn + for token in inputTokens + newTokens = token.breakOutAtomicTokens(@tabLength, breakOutLeadingSoftTabs, column) + column += newToken.value.length for newToken in newTokens + outputTokens.push(newTokens...) + breakOutLeadingSoftTabs = token.isOnlyWhitespace() if breakOutLeadingSoftTabs + outputTokens + + markLeadingAndTrailingWhitespaceTokens: -> + @firstNonWhitespaceIndex = @text.search(NonWhitespaceRegex) + if @firstNonWhitespaceIndex > 0 and isPairedCharacter(@text, @firstNonWhitespaceIndex - 1) + @firstNonWhitespaceIndex-- + firstTrailingWhitespaceIndex = @text.search(TrailingWhitespaceRegex) + @lineIsWhitespaceOnly = firstTrailingWhitespaceIndex is 0 + index = 0 + for token in @tokens + if index < @firstNonWhitespaceIndex + token.firstNonWhitespaceIndex = Math.min(index + token.value.length, @firstNonWhitespaceIndex - index) + # Only the *last* segment of a soft-wrapped line can have trailing whitespace + if @lineEnding? and (index + token.value.length > firstTrailingWhitespaceIndex) + token.firstTrailingWhitespaceIndex = Math.max(0, firstTrailingWhitespaceIndex - index) + index += token.value.length + return + + substituteInvisibleCharacters: -> + invisibles = @invisibles + changedText = false + + for token, i in @tokens + if token.isHardTab + if invisibles.tab + token.value = invisibles.tab + token.value.substring(invisibles.tab.length) + token.hasInvisibleCharacters = true + changedText = true + else + if invisibles.space + if token.hasLeadingWhitespace() and not token.isSoftWrapIndentation + token.value = token.value.replace LeadingWhitespaceRegex, (leadingWhitespace) -> + leadingWhitespace.replace RepeatedSpaceRegex, invisibles.space + token.hasInvisibleCharacters = true + changedText = true + if token.hasTrailingWhitespace() + token.value = token.value.replace TrailingWhitespaceRegex, (leadingWhitespace) -> + leadingWhitespace.replace RepeatedSpaceRegex, invisibles.space + token.hasInvisibleCharacters = true + changedText = true + + @text = @buildText() if changedText + buildEndOfLineInvisibles: -> @endOfLineInvisibles = [] {cr, eol} = @invisibles @@ -442,13 +274,11 @@ class TokenizedLine @endOfLineInvisibles.push(eol) if eol isComment: -> - iterator = @getTokenIterator() - while iterator.next() - scopes = iterator.getScopes() - continue if scopes.length is 1 - continue unless NonWhitespaceRegex.test(iterator.getText()) - for scope in scopes - return true if CommentScopeRegex.test(scope) + for token in @tokens + continue if token.scopes.length is 1 + continue if token.isOnlyWhitespace() + for scope in token.scopes + return true if _.contains(scope.split('.'), 'comment') break false @@ -459,6 +289,42 @@ class TokenizedLine @tokens[index] getTokenCount: -> - count = 0 - count++ for tag in @tags when tag >= 0 - count + @tokens.length + + bufferColumnForToken: (targetToken) -> + column = 0 + for token in @tokens + return column if token is targetToken + column += token.bufferDelta + + getScopeTree: -> + return @scopeTree if @scopeTree? + + scopeStack = [] + for token in @tokens + @updateScopeStack(scopeStack, token.scopes) + _.last(scopeStack).children.push(token) + + @scopeTree = scopeStack[0] + @updateScopeStack(scopeStack, []) + @scopeTree + + updateScopeStack: (scopeStack, desiredScopeDescriptor) -> + # Find a common prefix + for scope, i in desiredScopeDescriptor + break unless scopeStack[i]?.scope is desiredScopeDescriptor[i] + + # Pop scopeDescriptor until we're at the common prefx + until scopeStack.length is i + poppedScope = scopeStack.pop() + _.last(scopeStack)?.children.push(poppedScope) + + # Push onto common prefix until scopeStack equals desiredScopeDescriptor + for j in [i...desiredScopeDescriptor.length] + scopeStack.push(new Scope(desiredScopeDescriptor[j])) + + return + +class Scope + constructor: (@scope) -> + @children = [] From 05d4b3da2786bb3160072d8b71771ee6df86076c Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 21 May 2015 10:24:31 -0700 Subject: [PATCH 03/12] :arrow_up: snippets --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 15eb53b29..807a67939 100644 --- a/package.json +++ b/package.json @@ -115,7 +115,7 @@ "package-generator": "0.39.0", "release-notes": "0.52.0", "settings-view": "0.204.0", - "snippets": "0.89.0", + "snippets": "0.90.0", "spell-check": "0.58.0", "status-bar": "0.72.0", "styleguide": "0.44.0", From 2379b3803fbc9e09dce4192dc4c01b93d1c23249 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Thu, 21 May 2015 16:33:17 +0200 Subject: [PATCH 04/12] Revert "Revert "Merge pull request #6757 from atom/ns-less-memory-for-tokens"" This reverts commit 7cb0bc3bc2aaa0f01b26c8a71f7e5890f8e570d3. --- package.json | 4 +- spec/text-editor-presenter-spec.coffee | 36 +- spec/text-editor-spec.coffee | 3 +- spec/tokenized-buffer-spec.coffee | 28 +- spec/tokenized-line-spec.coffee | 21 - src/display-buffer.coffee | 32 +- src/language-mode.coffee | 5 +- src/lines-component.coffee | 160 ++++++-- src/special-token-symbols.coffee | 6 + src/text-editor-presenter.coffee | 24 +- src/text-editor.coffee | 3 +- src/token-iterator.coffee | 83 ++++ src/token.coffee | 199 +--------- src/tokenized-buffer.coffee | 184 ++++----- src/tokenized-line.coffee | 526 ++++++++++++++++--------- 15 files changed, 712 insertions(+), 602 deletions(-) create mode 100644 src/special-token-symbols.coffee create mode 100644 src/token-iterator.coffee diff --git a/package.json b/package.json index 807a67939..70526eb98 100644 --- a/package.json +++ b/package.json @@ -32,7 +32,7 @@ "delegato": "^1", "emissary": "^1.3.3", "event-kit": "^1.2.0", - "first-mate": "^3.1", + "first-mate": "^4.1.4", "fs-plus": "^2.8.0", "fstream": "0.1.24", "fuzzaldrin": "^2.1", @@ -151,7 +151,7 @@ "language-ruby": "0.54.0", "language-ruby-on-rails": "0.21.0", "language-sass": "0.38.0", - "language-shellscript": "0.14.0", + "language-shellscript": "0.15.0", "language-source": "0.9.0", "language-sql": "0.15.0", "language-text": "0.6.0", diff --git a/spec/text-editor-presenter-spec.coffee b/spec/text-editor-presenter-spec.coffee index 7da866ab4..d15c4759d 100644 --- a/spec/text-editor-presenter-spec.coffee +++ b/spec/text-editor-presenter-spec.coffee @@ -670,7 +670,11 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 4), { screenRow: 4 text: line4.text - tokens: line4.tokens + tags: line4.tags + specialTokens: line4.specialTokens + firstNonWhitespaceIndex: line4.firstNonWhitespaceIndex + firstTrailingWhitespaceIndex: line4.firstTrailingWhitespaceIndex + invisibles: line4.invisibles top: 10 * 4 } @@ -678,7 +682,11 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 5), { screenRow: 5 text: line5.text - tokens: line5.tokens + tags: line5.tags + specialTokens: line5.specialTokens + firstNonWhitespaceIndex: line5.firstNonWhitespaceIndex + firstTrailingWhitespaceIndex: line5.firstTrailingWhitespaceIndex + invisibles: line5.invisibles top: 10 * 5 } @@ -686,7 +694,11 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 6), { screenRow: 6 text: line6.text - tokens: line6.tokens + tags: line6.tags + specialTokens: line6.specialTokens + firstNonWhitespaceIndex: line6.firstNonWhitespaceIndex + firstTrailingWhitespaceIndex: line6.firstTrailingWhitespaceIndex + invisibles: line6.invisibles top: 10 * 6 } @@ -694,7 +706,11 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 7), { screenRow: 7 text: line7.text - tokens: line7.tokens + tags: line7.tags + specialTokens: line7.specialTokens + firstNonWhitespaceIndex: line7.firstNonWhitespaceIndex + firstTrailingWhitespaceIndex: line7.firstTrailingWhitespaceIndex + invisibles: line7.invisibles top: 10 * 7 } @@ -702,7 +718,11 @@ describe "TextEditorPresenter", -> expectValues lineStateForScreenRow(presenter, 8), { screenRow: 8 text: line8.text - tokens: line8.tokens + tags: line8.tags + specialTokens: line8.specialTokens + firstNonWhitespaceIndex: line8.firstNonWhitespaceIndex + firstTrailingWhitespaceIndex: line8.firstTrailingWhitespaceIndex + invisibles: line8.invisibles top: 10 * 8 } @@ -797,19 +817,19 @@ describe "TextEditorPresenter", -> line1 = editor.tokenizedLineForScreenRow(1) expectValues lineStateForScreenRow(presenter, 1), { text: line1.text - tokens: line1.tokens + tags: line1.tags } line2 = editor.tokenizedLineForScreenRow(2) expectValues lineStateForScreenRow(presenter, 2), { text: line2.text - tokens: line2.tokens + tags: line2.tags } line3 = editor.tokenizedLineForScreenRow(3) expectValues lineStateForScreenRow(presenter, 3), { text: line3.text - tokens: line3.tokens + tags: line3.tags } it "does not remove out-of-view lines corresponding to ::mouseWheelScreenRow until ::stoppedScrollingDelay elapses", -> diff --git a/spec/text-editor-spec.coffee b/spec/text-editor-spec.coffee index d1d311088..a845619ba 100644 --- a/spec/text-editor-spec.coffee +++ b/spec/text-editor-spec.coffee @@ -4110,8 +4110,9 @@ describe "TextEditor", -> runs -> grammar = atom.grammars.selectGrammar("text.js") - {tokens} = grammar.tokenizeLine("var i; // http://github.com") + {line, tags} = grammar.tokenizeLine("var i; // http://github.com") + tokens = atom.grammars.decodeTokens(line, tags) expect(tokens[0].value).toBe "var" expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"] diff --git a/spec/tokenized-buffer-spec.coffee b/spec/tokenized-buffer-spec.coffee index 9d92335af..45cc03a44 100644 --- a/spec/tokenized-buffer-spec.coffee +++ b/spec/tokenized-buffer-spec.coffee @@ -296,14 +296,6 @@ describe "TokenizedBuffer", -> expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy() expect(tokenizedBuffer.tokenizedLineForRow(6).ruleStack?).toBeTruthy() - describe ".findOpeningBracket(closingBufferPosition)", -> - it "returns the position of the matching bracket, skipping any nested brackets", -> - expect(tokenizedBuffer.findOpeningBracket([9, 2])).toEqual [1, 29] - - describe ".findClosingBracket(startBufferPosition)", -> - it "returns the position of the matching bracket, skipping any nested brackets", -> - expect(tokenizedBuffer.findClosingBracket([1, 29])).toEqual [9, 2] - it "tokenizes leading whitespace based on the new tab length", -> expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].isAtomic).toBeTruthy() expect(tokenizedBuffer.tokenizedLineForRow(5).tokens[0].value).toBe " " @@ -580,7 +572,7 @@ describe "TokenizedBuffer", -> describe "when the selector matches a run of multiple tokens at the position", -> it "returns the range covered by all contigous tokens (within a single line)", -> - expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.function', [1, 18])).toEqual [[1, 6], [1, 28]] + expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.meta.function', [1, 18])).toEqual [[1, 6], [1, 28]] describe "when the editor.tabLength config value changes", -> it "updates the tab length of the tokenized lines", -> @@ -697,22 +689,6 @@ describe "TokenizedBuffer", -> expect(line.tokens[0].firstNonWhitespaceIndex).toBe 2 expect(line.tokens[line.tokens.length - 1].firstTrailingWhitespaceIndex).toBe 0 - it "sets the ::firstNonWhitespaceIndex and ::firstTrailingWhitespaceIndex correctly when tokens are split for soft-wrapping", -> - atom.config.set("editor.showInvisibles", true) - atom.config.set("editor.invisibles", space: 'S') - buffer.setText(" token ") - fullyTokenize(tokenizedBuffer) - token = tokenizedBuffer.tokenizedLines[0].tokens[0] - - [leftToken, rightToken] = token.splitAt(1) - expect(leftToken.hasInvisibleCharacters).toBe true - expect(leftToken.firstNonWhitespaceIndex).toBe 1 - expect(leftToken.firstTrailingWhitespaceIndex).toBe null - - expect(leftToken.hasInvisibleCharacters).toBe true - expect(rightToken.firstNonWhitespaceIndex).toBe null - expect(rightToken.firstTrailingWhitespaceIndex).toBe 5 - describe ".indentLevel on tokenized lines", -> beforeEach -> buffer = atom.project.bufferForPathSync('sample.js') @@ -752,7 +728,7 @@ describe "TokenizedBuffer", -> it "updates empty line indent guides when the empty line is the last line", -> buffer.insert([12, 2], '\n') - # The newline and he tab need to be in two different operations to surface the bug + # The newline and the tab need to be in two different operations to surface the bug buffer.insert([12, 0], ' ') expect(tokenizedBuffer.tokenizedLineForRow(13).indentLevel).toBe 1 diff --git a/spec/tokenized-line-spec.coffee b/spec/tokenized-line-spec.coffee index 0da83c91c..2914ec089 100644 --- a/spec/tokenized-line-spec.coffee +++ b/spec/tokenized-line-spec.coffee @@ -17,24 +17,3 @@ describe "TokenizedLine", -> it "returns false when the line is not only whitespace", -> expect(editor.tokenizedLineForScreenRow(0).isOnlyWhitespace()).toBe false expect(editor.tokenizedLineForScreenRow(2).isOnlyWhitespace()).toBe false - - describe "::getScopeTree()", -> - it "returns a tree whose inner nodes are scopeDescriptor and whose leaf nodes are tokens in those scopeDescriptor", -> - [tokens, tokenIndex] = [] - - ensureValidScopeTree = (scopeTree, scopeDescriptor=[]) -> - if scopeTree.children? - for child in scopeTree.children - ensureValidScopeTree(child, scopeDescriptor.concat([scopeTree.scope])) - else - expect(scopeTree).toBe tokens[tokenIndex++] - expect(scopeDescriptor).toEqual scopeTree.scopes - - waitsForPromise -> - atom.project.open('coffee.coffee').then (o) -> editor = o - - runs -> - tokenIndex = 0 - tokens = editor.tokenizedLineForScreenRow(1).tokens - scopeTree = editor.tokenizedLineForScreenRow(1).getScopeTree() - ensureValidScopeTree(scopeTree) diff --git a/src/display-buffer.coffee b/src/display-buffer.coffee index 26bf43dce..b2460addc 100644 --- a/src/display-buffer.coffee +++ b/src/display-buffer.coffee @@ -2,6 +2,7 @@ _ = require 'underscore-plus' Serializable = require 'serializable' {CompositeDisposable, Emitter} = require 'event-kit' {Point, Range} = require 'text-buffer' +Grim = require 'grim' TokenizedBuffer = require './tokenized-buffer' RowMap = require './row-map' Fold = require './fold' @@ -9,7 +10,6 @@ Model = require './model' Token = require './token' Decoration = require './decoration' Marker = require './marker' -Grim = require 'grim' class BufferToScreenConversionError extends Error constructor: (@message, @metadata) -> @@ -659,16 +659,19 @@ class DisplayBuffer extends Model top = targetRow * @lineHeightInPixels left = 0 column = 0 - for token in @tokenizedLineForScreenRow(targetRow).tokens - charWidths = @getScopedCharWidths(token.scopes) + + iterator = @tokenizedLineForScreenRow(targetRow).getTokenIterator() + while iterator.next() + charWidths = @getScopedCharWidths(iterator.getScopes()) valueIndex = 0 - while valueIndex < token.value.length - if token.hasPairedCharacter - char = token.value.substr(valueIndex, 2) + value = iterator.getText() + while valueIndex < value.length + if iterator.isPairedCharacter() + char = value charLength = 2 valueIndex += 2 else - char = token.value[valueIndex] + char = value[valueIndex] charLength = 1 valueIndex++ @@ -689,16 +692,19 @@ class DisplayBuffer extends Model left = 0 column = 0 - for token in @tokenizedLineForScreenRow(row).tokens - charWidths = @getScopedCharWidths(token.scopes) + + iterator = @tokenizedLineForScreenRow(row).getTokenIterator() + while iterator.next() + charWidths = @getScopedCharWidths(iterator.getScopes()) + value = iterator.getText() valueIndex = 0 - while valueIndex < token.value.length - if token.hasPairedCharacter - char = token.value.substr(valueIndex, 2) + while valueIndex < value.length + if iterator.isPairedCharacter() + char = value charLength = 2 valueIndex += 2 else - char = token.value[valueIndex] + char = value[valueIndex] charLength = 1 valueIndex++ diff --git a/src/language-mode.coffee b/src/language-mode.coffee index b5529a05e..c9401550b 100644 --- a/src/language-mode.coffee +++ b/src/language-mode.coffee @@ -242,8 +242,9 @@ class LanguageMode @suggestedIndentForTokenizedLineAtBufferRow(bufferRow, tokenizedLine, options) suggestedIndentForTokenizedLineAtBufferRow: (bufferRow, tokenizedLine, options) -> - scopes = tokenizedLine.tokens[0].scopes - scopeDescriptor = new ScopeDescriptor({scopes}) + iterator = tokenizedLine.getTokenIterator() + iterator.next() + scopeDescriptor = new ScopeDescriptor(scopes: iterator.getScopes()) currentIndentLevel = @editor.indentationForBufferRow(bufferRow) return currentIndentLevel unless increaseIndentRegex = @increaseIndentRegexForScopeDescriptor(scopeDescriptor) diff --git a/src/lines-component.coffee b/src/lines-component.coffee index fbec40b79..17c904e99 100644 --- a/src/lines-component.coffee +++ b/src/lines-component.coffee @@ -4,10 +4,13 @@ _ = require 'underscore-plus' CursorsComponent = require './cursors-component' HighlightsComponent = require './highlights-component' +TokenIterator = require './token-iterator' DummyLineNode = $$(-> @div className: 'line', style: 'position: absolute; visibility: hidden;', => @span 'x')[0] AcceptFilter = {acceptNode: -> NodeFilter.FILTER_ACCEPT} WrapperDiv = document.createElement('div') +TokenTextEscapeRegex = /[&"'<>]/g +MaxTokenLength = 20000 cloneObject = (object) -> clone = {} @@ -19,6 +22,7 @@ class LinesComponent placeholderTextDiv: null constructor: ({@presenter, @hostElement, @useShadowDOM, visible}) -> + @tokenIterator = new TokenIterator @measuredLines = new Set @lineNodesByLineId = {} @screenRowsByLineId = {} @@ -167,20 +171,116 @@ class LinesComponent @buildEndOfLineHTML(id) or ' ' buildLineInnerHTML: (id) -> - {indentGuidesVisible} = @newState - {tokens, text, isOnlyWhitespace} = @newState.lines[id] + lineState = @newState.lines[id] + {firstNonWhitespaceIndex, firstTrailingWhitespaceIndex, invisibles} = lineState + lineIsWhitespaceOnly = firstTrailingWhitespaceIndex is 0 + innerHTML = "" + @tokenIterator.reset(lineState) - scopeStack = [] - for token in tokens - innerHTML += @updateScopeStack(scopeStack, token.scopes) - hasIndentGuide = indentGuidesVisible and (token.hasLeadingWhitespace() or (token.hasTrailingWhitespace() and isOnlyWhitespace)) - innerHTML += token.getValueAsHtml({hasIndentGuide}) + while @tokenIterator.next() + for scope in @tokenIterator.getScopeEnds() + innerHTML += "" + + for scope in @tokenIterator.getScopeStarts() + innerHTML += "" + + tokenStart = @tokenIterator.getScreenStart() + tokenEnd = @tokenIterator.getScreenEnd() + tokenText = @tokenIterator.getText() + isHardTab = @tokenIterator.isHardTab() + + if hasLeadingWhitespace = tokenStart < firstNonWhitespaceIndex + tokenFirstNonWhitespaceIndex = firstNonWhitespaceIndex - tokenStart + else + tokenFirstNonWhitespaceIndex = null + + if hasTrailingWhitespace = tokenEnd > firstTrailingWhitespaceIndex + tokenFirstTrailingWhitespaceIndex = Math.max(0, firstTrailingWhitespaceIndex - tokenStart) + else + tokenFirstTrailingWhitespaceIndex = null + + hasIndentGuide = + @newState.indentGuidesVisible and + (hasLeadingWhitespace or lineIsWhitespaceOnly) + + hasInvisibleCharacters = + (invisibles?.tab and isHardTab) or + (invisibles?.space and (hasLeadingWhitespace or hasTrailingWhitespace)) + + innerHTML += @buildTokenHTML(tokenText, isHardTab, tokenFirstNonWhitespaceIndex, tokenFirstTrailingWhitespaceIndex, hasIndentGuide, hasInvisibleCharacters) + + for scope in @tokenIterator.getScopeEnds() + innerHTML += "" + + for scope in @tokenIterator.getScopes() + innerHTML += "" - innerHTML += @popScope(scopeStack) while scopeStack.length > 0 innerHTML += @buildEndOfLineHTML(id) innerHTML + buildTokenHTML: (tokenText, isHardTab, firstNonWhitespaceIndex, firstTrailingWhitespaceIndex, hasIndentGuide, hasInvisibleCharacters) -> + if isHardTab + classes = 'hard-tab' + classes += ' leading-whitespace' if firstNonWhitespaceIndex? + classes += ' trailing-whitespace' if firstTrailingWhitespaceIndex? + classes += ' indent-guide' if hasIndentGuide + classes += ' invisible-character' if hasInvisibleCharacters + return "#{@escapeTokenText(tokenText)}" + else + startIndex = 0 + endIndex = tokenText.length + + leadingHtml = '' + trailingHtml = '' + + if firstNonWhitespaceIndex? + leadingWhitespace = tokenText.substring(0, firstNonWhitespaceIndex) + + classes = 'leading-whitespace' + classes += ' indent-guide' if hasIndentGuide + classes += ' invisible-character' if hasInvisibleCharacters + + leadingHtml = "#{leadingWhitespace}" + startIndex = firstNonWhitespaceIndex + + if firstTrailingWhitespaceIndex? + tokenIsOnlyWhitespace = firstTrailingWhitespaceIndex is 0 + trailingWhitespace = tokenText.substring(firstTrailingWhitespaceIndex) + + classes = 'trailing-whitespace' + classes += ' indent-guide' if hasIndentGuide and not firstNonWhitespaceIndex? and tokenIsOnlyWhitespace + classes += ' invisible-character' if hasInvisibleCharacters + + trailingHtml = "#{trailingWhitespace}" + + endIndex = firstTrailingWhitespaceIndex + + html = leadingHtml + if tokenText.length > MaxTokenLength + while startIndex < endIndex + html += "" + @escapeTokenText(tokenText, startIndex, startIndex + MaxTokenLength) + "" + startIndex += MaxTokenLength + else + html += @escapeTokenText(tokenText, startIndex, endIndex) + + html += trailingHtml + html + + escapeTokenText: (tokenText, startIndex, endIndex) -> + if startIndex? and endIndex? and startIndex > 0 or endIndex < tokenText.length + tokenText = tokenText.slice(startIndex, endIndex) + tokenText.replace(TokenTextEscapeRegex, @escapeTokenTextReplace) + + escapeTokenTextReplace: (match) -> + switch match + when '&' then '&' + when '"' then '"' + when "'" then ''' + when '<' then '<' + when '>' then '>' + else match + buildEndOfLineHTML: (id) -> {endOfLineInvisibles} = @newState.lines[id] @@ -190,31 +290,6 @@ class LinesComponent html += "#{invisible}" html - updateScopeStack: (scopeStack, desiredScopeDescriptor) -> - html = "" - - # Find a common prefix - for scope, i in desiredScopeDescriptor - break unless scopeStack[i] is desiredScopeDescriptor[i] - - # Pop scopeDescriptor until we're at the common prefx - until scopeStack.length is i - html += @popScope(scopeStack) - - # Push onto common prefix until scopeStack equals desiredScopeDescriptor - for j in [i...desiredScopeDescriptor.length] - html += @pushScope(scopeStack, desiredScopeDescriptor[j]) - - html - - popScope: (scopeStack) -> - scopeStack.pop() - "" - - pushScope: (scopeStack, scope) -> - scopeStack.push(scope) - "" - updateLineNode: (id) -> oldLineState = @oldState.lines[id] newLineState = @newState.lines[id] @@ -279,19 +354,22 @@ class LinesComponent iterator = null charIndex = 0 - for {value, scopes, hasPairedCharacter} in tokenizedLine.tokens + @tokenIterator.reset(tokenizedLine) + while @tokenIterator.next() + scopes = @tokenIterator.getScopes() + text = @tokenIterator.getText() charWidths = @presenter.getScopedCharacterWidths(scopes) - valueIndex = 0 - while valueIndex < value.length - if hasPairedCharacter - char = value.substr(valueIndex, 2) + textIndex = 0 + while textIndex < text.length + if @tokenIterator.isPairedCharacter() + char = text charLength = 2 - valueIndex += 2 + textIndex += 2 else - char = value[valueIndex] + char = text[textIndex] charLength = 1 - valueIndex++ + textIndex++ continue if char is '\0' diff --git a/src/special-token-symbols.coffee b/src/special-token-symbols.coffee new file mode 100644 index 000000000..06884b85f --- /dev/null +++ b/src/special-token-symbols.coffee @@ -0,0 +1,6 @@ +module.exports = { + SoftTab: Symbol('SoftTab') + HardTab: Symbol('HardTab') + PairedCharacter: Symbol('PairedCharacter') + SoftWrapIndent: Symbol('SoftWrapIndent') +} diff --git a/src/text-editor-presenter.coffee b/src/text-editor-presenter.coffee index 70c26a1a3..3aea57f29 100644 --- a/src/text-editor-presenter.coffee +++ b/src/text-editor-presenter.coffee @@ -336,9 +336,14 @@ class TextEditorPresenter @state.content.lines[line.id] = screenRow: row text: line.text - tokens: line.tokens - isOnlyWhitespace: line.isOnlyWhitespace() + openScopes: line.openScopes + tags: line.tags + specialTokens: line.specialTokens + firstNonWhitespaceIndex: line.firstNonWhitespaceIndex + firstTrailingWhitespaceIndex: line.firstTrailingWhitespaceIndex + invisibles: line.invisibles endOfLineInvisibles: line.endOfLineInvisibles + isOnlyWhitespace: line.isOnlyWhitespace() indentLevel: line.indentLevel tabLength: line.tabLength fold: line.fold @@ -1006,17 +1011,20 @@ class TextEditorPresenter top = targetRow * @lineHeight left = 0 column = 0 - for token in @model.tokenizedLineForScreenRow(targetRow).tokens - characterWidths = @getScopedCharacterWidths(token.scopes) + + iterator = @model.tokenizedLineForScreenRow(targetRow).getTokenIterator() + while iterator.next() + characterWidths = @getScopedCharacterWidths(iterator.getScopes()) valueIndex = 0 - while valueIndex < token.value.length - if token.hasPairedCharacter - char = token.value.substr(valueIndex, 2) + text = iterator.getText() + while valueIndex < text.length + if iterator.isPairedCharacter() + char = text charLength = 2 valueIndex += 2 else - char = token.value[valueIndex] + char = text[valueIndex] charLength = 1 valueIndex++ diff --git a/src/text-editor.coffee b/src/text-editor.coffee index d2bd77522..4489d82af 100644 --- a/src/text-editor.coffee +++ b/src/text-editor.coffee @@ -2457,9 +2457,8 @@ class TextEditor extends Model # Extended: Determine if the given row is entirely a comment isBufferRowCommented: (bufferRow) -> if match = @lineTextForBufferRow(bufferRow).match(/\S/) - scopeDescriptor = @tokenForBufferPosition([bufferRow, match.index]).scopes @commentScopeSelector ?= new TextMateScopeSelector('comment.*') - @commentScopeSelector.matches(scopeDescriptor) + @commentScopeSelector.matches(@scopeDescriptorForBufferPosition([bufferRow, match.index]).scopes) logCursorScope: -> scopeDescriptor = @getLastCursor().getScopeDescriptor() diff --git a/src/token-iterator.coffee b/src/token-iterator.coffee new file mode 100644 index 000000000..202b044ba --- /dev/null +++ b/src/token-iterator.coffee @@ -0,0 +1,83 @@ +{SoftTab, HardTab, PairedCharacter, SoftWrapIndent} = require './special-token-symbols' + +module.exports = +class TokenIterator + constructor: (line) -> + @reset(line) if line? + + reset: (@line) -> + @index = null + @bufferStart = @line.startBufferColumn + @bufferEnd = @bufferStart + @screenStart = 0 + @screenEnd = 0 + @scopes = @line.openScopes.map (id) -> atom.grammars.scopeForId(id) + @scopeStarts = @scopes.slice() + @scopeEnds = [] + this + + next: -> + {tags} = @line + + if @index? + @index++ + @scopeEnds.length = 0 + @scopeStarts.length = 0 + @bufferStart = @bufferEnd + @screenStart = @screenEnd + else + @index = 0 + + while @index < tags.length + tag = tags[@index] + if tag < 0 + if tag % 2 is 0 + @scopeEnds.push(atom.grammars.scopeForId(tag + 1)) + @scopes.pop() + else + scope = atom.grammars.scopeForId(tag) + @scopeStarts.push(scope) + @scopes.push(scope) + @index++ + else + if @isHardTab() + @screenEnd = @screenStart + tag + @bufferEnd = @bufferStart + 1 + else if @isSoftWrapIndentation() + @screenEnd = @screenStart + tag + @bufferEnd = @bufferStart + 0 + else + @screenEnd = @screenStart + tag + @bufferEnd = @bufferStart + tag + return true + + false + + getBufferStart: -> @bufferStart + getBufferEnd: -> @bufferEnd + + getScreenStart: -> @screenStart + getScreenEnd: -> @screenEnd + + getScopeStarts: -> @scopeStarts + getScopeEnds: -> @scopeEnds + + getScopes: -> @scopes + + getText: -> + @line.text.substring(@screenStart, @screenEnd) + + isSoftTab: -> + @line.specialTokens[@index] is SoftTab + + isHardTab: -> + @line.specialTokens[@index] is HardTab + + isSoftWrapIndentation: -> + @line.specialTokens[@index] is SoftWrapIndent + + isPairedCharacter: -> + @line.specialTokens[@index] is PairedCharacter + + isAtomic: -> + @isSoftTab() or @isHardTab() or @isSoftWrapIndentation() or @isPairedCharacter() diff --git a/src/token.coffee b/src/token.coffee index 8aa4a8706..60e8194f8 100644 --- a/src/token.coffee +++ b/src/token.coffee @@ -1,13 +1,8 @@ _ = require 'underscore-plus' -textUtils = require './text-utils' -WhitespaceRegexesByTabLength = {} -EscapeRegex = /[&"'<>]/g StartDotRegex = /^\.?/ WhitespaceRegex = /\S/ -MaxTokenLength = 20000 - # Represents a single unit of text as selected by a grammar. module.exports = class Token @@ -20,10 +15,14 @@ class Token firstTrailingWhitespaceIndex: null hasInvisibleCharacters: false - constructor: ({@value, @scopes, @isAtomic, @bufferDelta, @isHardTab, @hasPairedCharacter, @isSoftWrapIndentation}) -> + constructor: (properties) -> + {@value, @scopes, @isAtomic, @isHardTab, @bufferDelta} = properties + {@hasInvisibleCharacters, @hasPairedCharacter, @isSoftWrapIndentation} = properties + @firstNonWhitespaceIndex = properties.firstNonWhitespaceIndex ? null + @firstTrailingWhitespaceIndex = properties.firstTrailingWhitespaceIndex ? null + @screenDelta = @value.length @bufferDelta ?= @screenDelta - @hasPairedCharacter ?= textUtils.hasPairedCharacter(@value) isEqual: (other) -> # TODO: scopes is deprecated. This is here for the sake of lang package tests @@ -32,126 +31,6 @@ class Token isBracket: -> /^meta\.brace\b/.test(_.last(@scopes)) - splitAt: (splitIndex) -> - leftToken = new Token(value: @value.substring(0, splitIndex), scopes: @scopes) - rightToken = new Token(value: @value.substring(splitIndex), scopes: @scopes) - - if @firstNonWhitespaceIndex? - leftToken.firstNonWhitespaceIndex = Math.min(splitIndex, @firstNonWhitespaceIndex) - leftToken.hasInvisibleCharacters = @hasInvisibleCharacters - - if @firstNonWhitespaceIndex > splitIndex - rightToken.firstNonWhitespaceIndex = @firstNonWhitespaceIndex - splitIndex - rightToken.hasInvisibleCharacters = @hasInvisibleCharacters - - if @firstTrailingWhitespaceIndex? - rightToken.firstTrailingWhitespaceIndex = Math.max(0, @firstTrailingWhitespaceIndex - splitIndex) - rightToken.hasInvisibleCharacters = @hasInvisibleCharacters - - if @firstTrailingWhitespaceIndex < splitIndex - leftToken.firstTrailingWhitespaceIndex = @firstTrailingWhitespaceIndex - leftToken.hasInvisibleCharacters = @hasInvisibleCharacters - - [leftToken, rightToken] - - whitespaceRegexForTabLength: (tabLength) -> - WhitespaceRegexesByTabLength[tabLength] ?= new RegExp("([ ]{#{tabLength}})|(\t)|([^\t]+)", "g") - - breakOutAtomicTokens: (tabLength, breakOutLeadingSoftTabs, startColumn) -> - if @hasPairedCharacter - outputTokens = [] - column = startColumn - - for token in @breakOutPairedCharacters() - if token.isAtomic - outputTokens.push(token) - else - outputTokens.push(token.breakOutAtomicTokens(tabLength, breakOutLeadingSoftTabs, column)...) - breakOutLeadingSoftTabs = token.isOnlyWhitespace() if breakOutLeadingSoftTabs - column += token.value.length - - outputTokens - else - return [this] if @isAtomic - - if breakOutLeadingSoftTabs - return [this] unless /^[ ]|\t/.test(@value) - else - return [this] unless /\t/.test(@value) - - outputTokens = [] - regex = @whitespaceRegexForTabLength(tabLength) - column = startColumn - while match = regex.exec(@value) - [fullMatch, softTab, hardTab] = match - token = null - if softTab and breakOutLeadingSoftTabs - token = @buildSoftTabToken(tabLength) - else if hardTab - breakOutLeadingSoftTabs = false - token = @buildHardTabToken(tabLength, column) - else - breakOutLeadingSoftTabs = false - value = match[0] - token = new Token({value, @scopes}) - column += token.value.length - outputTokens.push(token) - - outputTokens - - breakOutPairedCharacters: -> - outputTokens = [] - index = 0 - nonPairStart = 0 - - while index < @value.length - if textUtils.isPairedCharacter(@value, index) - if nonPairStart isnt index - outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes})) - outputTokens.push(@buildPairedCharacterToken(@value, index)) - index += 2 - nonPairStart = index - else - index++ - - if nonPairStart isnt index - outputTokens.push(new Token({value: @value[nonPairStart...index], @scopes})) - - outputTokens - - buildPairedCharacterToken: (value, index) -> - new Token( - value: value[index..index + 1] - scopes: @scopes - isAtomic: true - hasPairedCharacter: true - ) - - buildHardTabToken: (tabLength, column) -> - @buildTabToken(tabLength, true, column) - - buildSoftTabToken: (tabLength) -> - @buildTabToken(tabLength, false, 0) - - buildTabToken: (tabLength, isHardTab, column=0) -> - tabStop = tabLength - (column % tabLength) - new Token( - value: _.multiplyString(" ", tabStop) - scopes: @scopes - bufferDelta: if isHardTab then 1 else tabStop - isAtomic: true - isHardTab: isHardTab - ) - - buildSoftWrapIndentationToken: (length) -> - new Token( - value: _.multiplyString(" ", length), - scopes: @scopes, - bufferDelta: 0, - isAtomic: true, - isSoftWrapIndentation: true - ) - isOnlyWhitespace: -> not WhitespaceRegex.test(@value) @@ -161,72 +40,6 @@ class Token scopeClasses = scope.split('.') _.isSubset(targetClasses, scopeClasses) - getValueAsHtml: ({hasIndentGuide}) -> - if @isHardTab - classes = 'hard-tab' - classes += ' leading-whitespace' if @hasLeadingWhitespace() - classes += ' trailing-whitespace' if @hasTrailingWhitespace() - classes += ' indent-guide' if hasIndentGuide - classes += ' invisible-character' if @hasInvisibleCharacters - html = "#{@escapeString(@value)}" - else - startIndex = 0 - endIndex = @value.length - - leadingHtml = '' - trailingHtml = '' - - if @hasLeadingWhitespace() - leadingWhitespace = @value.substring(0, @firstNonWhitespaceIndex) - - classes = 'leading-whitespace' - classes += ' indent-guide' if hasIndentGuide - classes += ' invisible-character' if @hasInvisibleCharacters - - leadingHtml = "#{leadingWhitespace}" - startIndex = @firstNonWhitespaceIndex - - if @hasTrailingWhitespace() - tokenIsOnlyWhitespace = @firstTrailingWhitespaceIndex is 0 - trailingWhitespace = @value.substring(@firstTrailingWhitespaceIndex) - - classes = 'trailing-whitespace' - classes += ' indent-guide' if hasIndentGuide and not @hasLeadingWhitespace() and tokenIsOnlyWhitespace - classes += ' invisible-character' if @hasInvisibleCharacters - - trailingHtml = "#{trailingWhitespace}" - - endIndex = @firstTrailingWhitespaceIndex - - html = leadingHtml - if @value.length > MaxTokenLength - while startIndex < endIndex - html += "" + @escapeString(@value, startIndex, startIndex + MaxTokenLength) + "" - startIndex += MaxTokenLength - else - html += @escapeString(@value, startIndex, endIndex) - - html += trailingHtml - html - - escapeString: (str, startIndex, endIndex) -> - strLength = str.length - - startIndex ?= 0 - endIndex ?= strLength - - str = str.slice(startIndex, endIndex) if startIndex > 0 or endIndex < strLength - str.replace(EscapeRegex, @escapeStringReplace) - - escapeStringReplace: (match) -> - switch match - when '&' then '&' - when '"' then '"' - when "'" then ''' - when '<' then '<' - when '>' then '>' - else match - hasLeadingWhitespace: -> @firstNonWhitespaceIndex? and @firstNonWhitespaceIndex > 0 diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index 6d8f0c018..60ebe16f0 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -1,9 +1,11 @@ _ = require 'underscore-plus' {CompositeDisposable, Emitter} = require 'event-kit' {Point, Range} = require 'text-buffer' +{ScopeSelector} = require 'first-mate' Serializable = require 'serializable' Model = require './model' TokenizedLine = require './tokenized-line' +TokenIterator = require './token-iterator' Token = require './token' ScopeDescriptor = require './scope-descriptor' Grim = require 'grim' @@ -25,6 +27,7 @@ class TokenizedBuffer extends Model constructor: ({@buffer, @tabLength, @ignoreInvisibles}) -> @emitter = new Emitter @disposables = new CompositeDisposable + @tokenIterator = new TokenIterator @disposables.add atom.grammars.onDidAddGrammar(@grammarAddedOrUpdated) @disposables.add atom.grammars.onDidUpdateGrammar(@grammarAddedOrUpdated) @@ -167,7 +170,7 @@ class TokenizedBuffer extends Model row = startRow loop previousStack = @stackForRow(row) - @tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1)) + @tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1), @openScopesForRow(row)) if --rowsRemaining is 0 filledRegion = false endRow = row @@ -227,7 +230,7 @@ class TokenizedBuffer extends Model @updateInvalidRows(start, end, delta) previousEndStack = @stackForRow(end) # used in spill detection below - newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1)) + newTokenizedLines = @buildTokenizedLinesForRows(start, end + delta, @stackForRow(start - 1), @openScopesForRow(start)) _.spliceWithArray(@tokenizedLines, start, end - start + 1, newTokenizedLines) start = @retokenizeWhitespaceRowsIfIndentLevelChanged(start - 1, -1) @@ -248,7 +251,7 @@ class TokenizedBuffer extends Model line = @tokenizedLines[row] if line?.isOnlyWhitespace() and @indentLevelForRow(row) isnt line.indentLevel while line?.isOnlyWhitespace() - @tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1)) + @tokenizedLines[row] = @buildTokenizedLineForRow(row, @stackForRow(row - 1), @openScopesForRow(row)) row += increment line = @tokenizedLines[row] @@ -290,16 +293,18 @@ class TokenizedBuffer extends Model @tokenizedLineForRow(row).isComment() and @tokenizedLineForRow(nextRow).isComment() - buildTokenizedLinesForRows: (startRow, endRow, startingStack) -> + buildTokenizedLinesForRows: (startRow, endRow, startingStack, startingopenScopes) -> ruleStack = startingStack + openScopes = startingopenScopes stopTokenizingAt = startRow + @chunkSize tokenizedLines = for row in [startRow..endRow] if (ruleStack or row is 0) and row < stopTokenizingAt - screenLine = @buildTokenizedLineForRow(row, ruleStack) - ruleStack = screenLine.ruleStack + tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes) + ruleStack = tokenizedLine.ruleStack + openScopes = @scopesFromTags(openScopes, tokenizedLine.tags) else - screenLine = @buildPlaceholderTokenizedLineForRow(row) - screenLine + tokenizedLine = @buildPlaceholderTokenizedLineForRow(row, openScopes) + tokenizedLine if endRow >= stopTokenizingAt @invalidateRow(stopTokenizingAt) @@ -311,22 +316,23 @@ class TokenizedBuffer extends Model @buildPlaceholderTokenizedLineForRow(row) for row in [startRow..endRow] buildPlaceholderTokenizedLineForRow: (row) -> - line = @buffer.lineForRow(row) - tokens = [new Token(value: line, scopes: [@grammar.scopeName])] + openScopes = [@grammar.startIdForScope(@grammar.scopeName)] + text = @buffer.lineForRow(row) + tags = [text.length] tabLength = @getTabLength() indentLevel = @indentLevelForRow(row) lineEnding = @buffer.lineEndingForRow(row) - new TokenizedLine({tokens, tabLength, indentLevel, invisibles: @getInvisiblesToShow(), lineEnding}) + new TokenizedLine({openScopes, text, tags, tabLength, indentLevel, invisibles: @getInvisiblesToShow(), lineEnding, @tokenIterator}) - buildTokenizedLineForRow: (row, ruleStack) -> - @buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack) + buildTokenizedLineForRow: (row, ruleStack, openScopes) -> + @buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes) - buildTokenizedLineForRowWithText: (row, line, ruleStack = @stackForRow(row - 1)) -> + buildTokenizedLineForRowWithText: (row, text, ruleStack = @stackForRow(row - 1), openScopes = @openScopesForRow(row)) -> lineEnding = @buffer.lineEndingForRow(row) tabLength = @getTabLength() indentLevel = @indentLevelForRow(row) - {tokens, ruleStack} = @grammar.tokenizeLine(line, ruleStack, row is 0) - new TokenizedLine({tokens, ruleStack, tabLength, lineEnding, indentLevel, invisibles: @getInvisiblesToShow()}) + {tags, ruleStack} = @grammar.tokenizeLine(text, ruleStack, row is 0, false) + new TokenizedLine({openScopes, text, tags, ruleStack, tabLength, lineEnding, indentLevel, invisibles: @getInvisiblesToShow(), @tokenIterator}) getInvisiblesToShow: -> if @configSettings.showInvisibles and not @ignoreInvisibles @@ -340,6 +346,25 @@ class TokenizedBuffer extends Model stackForRow: (bufferRow) -> @tokenizedLines[bufferRow]?.ruleStack + openScopesForRow: (bufferRow) -> + if bufferRow > 0 + precedingLine = @tokenizedLines[bufferRow - 1] + @scopesFromTags(precedingLine.openScopes, precedingLine.tags) + else + [] + + scopesFromTags: (startingScopes, tags) -> + scopes = startingScopes.slice() + for tag in tags when tag < 0 + if (tag % 2) is -1 + scopes.push(tag) + else + expectedScope = tag + 1 + poppedScope = scopes.pop() + unless poppedScope is expectedScope + throw new Error("Encountered an invalid scope end id. Popped #{poppedScope}, expected to pop #{expectedScope}.") + scopes + indentLevelForRow: (bufferRow) -> line = @buffer.lineForRow(bufferRow) indentLevel = 0 @@ -376,7 +401,20 @@ class TokenizedBuffer extends Model 0 scopeDescriptorForPosition: (position) -> - new ScopeDescriptor(scopes: @tokenForPosition(position).scopes) + {row, column} = Point.fromObject(position) + + iterator = @tokenizedLines[row].getTokenIterator() + while iterator.next() + if iterator.getScreenEnd() > column + scopes = iterator.getScopes() + break + + # rebuild scope of last token if we iterated off the end + unless scopes? + scopes = iterator.getScopes() + scopes.push(iterator.getScopeEnds().reverse()...) + + new ScopeDescriptor({scopes}) tokenForPosition: (position) -> {row, column} = Point.fromObject(position) @@ -388,85 +426,53 @@ class TokenizedBuffer extends Model new Point(row, column) bufferRangeForScopeAtPosition: (selector, position) -> + selector = new ScopeSelector(selector.replace(/^\./, '')) position = Point.fromObject(position) - tokenizedLine = @tokenizedLines[position.row] - startIndex = tokenizedLine.tokenIndexAtBufferColumn(position.column) - for index in [startIndex..0] - token = tokenizedLine.tokenAtIndex(index) - break unless token.matchesScopeSelector(selector) - firstToken = token + {openScopes, tags} = @tokenizedLines[position.row] + scopes = openScopes.map (tag) -> atom.grammars.scopeForId(tag) - for index in [startIndex...tokenizedLine.getTokenCount()] - token = tokenizedLine.tokenAtIndex(index) - break unless token.matchesScopeSelector(selector) - lastToken = token + startColumn = 0 + for tag, tokenIndex in tags + if tag < 0 + if tag % 2 is -1 + scopes.push(atom.grammars.scopeForId(tag)) + else + scopes.pop() + else + endColumn = startColumn + tag + if endColumn > position.column + break + else + startColumn = endColumn - return unless firstToken? and lastToken? + return unless selector.matches(scopes) - startColumn = tokenizedLine.bufferColumnForToken(firstToken) - endColumn = tokenizedLine.bufferColumnForToken(lastToken) + lastToken.bufferDelta - new Range([position.row, startColumn], [position.row, endColumn]) + startScopes = scopes.slice() + for startTokenIndex in [(tokenIndex - 1)..0] by -1 + tag = tags[startTokenIndex] + if tag < 0 + if tag % 2 is -1 + startScopes.pop() + else + startScopes.push(atom.grammars.scopeForId(tag)) + else + break unless selector.matches(startScopes) + startColumn -= tag - iterateTokensInBufferRange: (bufferRange, iterator) -> - bufferRange = Range.fromObject(bufferRange) - {start, end} = bufferRange + endScopes = scopes.slice() + for endTokenIndex in [(tokenIndex + 1)...tags.length] by 1 + tag = tags[endTokenIndex] + if tag < 0 + if tag % 2 is -1 + endScopes.push(atom.grammars.scopeForId(tag)) + else + endScopes.pop() + else + break unless selector.matches(endScopes) + endColumn += tag - keepLooping = true - stop = -> keepLooping = false - - for bufferRow in [start.row..end.row] - bufferColumn = 0 - for token in @tokenizedLines[bufferRow].tokens - startOfToken = new Point(bufferRow, bufferColumn) - iterator(token, startOfToken, {stop}) if bufferRange.containsPoint(startOfToken) - return unless keepLooping - bufferColumn += token.bufferDelta - - backwardsIterateTokensInBufferRange: (bufferRange, iterator) -> - bufferRange = Range.fromObject(bufferRange) - {start, end} = bufferRange - - keepLooping = true - stop = -> keepLooping = false - - for bufferRow in [end.row..start.row] - bufferColumn = @buffer.lineLengthForRow(bufferRow) - for token in new Array(@tokenizedLines[bufferRow].tokens...).reverse() - bufferColumn -= token.bufferDelta - startOfToken = new Point(bufferRow, bufferColumn) - iterator(token, startOfToken, {stop}) if bufferRange.containsPoint(startOfToken) - return unless keepLooping - - findOpeningBracket: (startBufferPosition) -> - range = [[0,0], startBufferPosition] - position = null - depth = 0 - @backwardsIterateTokensInBufferRange range, (token, startPosition, {stop}) -> - if token.isBracket() - if token.value is '}' - depth++ - else if token.value is '{' - depth-- - if depth is 0 - position = startPosition - stop() - position - - findClosingBracket: (startBufferPosition) -> - range = [startBufferPosition, @buffer.getEndPosition()] - position = null - depth = 0 - @iterateTokensInBufferRange range, (token, startPosition, {stop}) -> - if token.isBracket() - if token.value is '{' - depth++ - else if token.value is '}' - depth-- - if depth is 0 - position = startPosition - stop() - position + new Range(new Point(position.row, startColumn), new Point(position.row, endColumn)) # Gets the row number of the last line. # diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee index b81d972a0..45af81e57 100644 --- a/src/tokenized-line.coffee +++ b/src/tokenized-line.coffee @@ -1,10 +1,13 @@ _ = require 'underscore-plus' {isPairedCharacter} = require './text-utils' +Token = require './token' +{SoftTab, HardTab, PairedCharacter, SoftWrapIndent} = require './special-token-symbols' NonWhitespaceRegex = /\S/ LeadingWhitespaceRegex = /^\s*/ TrailingWhitespaceRegex = /\s*$/ RepeatedSpaceRegex = /[ ]/g +CommentScopeRegex = /(\b|\.)comment/ idCounter = 1 module.exports = @@ -14,32 +17,181 @@ class TokenizedLine firstNonWhitespaceIndex: 0 foldable: false - constructor: ({tokens, @lineEnding, @ruleStack, @startBufferColumn, @fold, @tabLength, @indentLevel, @invisibles}) -> - @startBufferColumn ?= 0 - @tokens = @breakOutAtomicTokens(tokens) - @text = @buildText() - @bufferDelta = @buildBufferDelta() - @softWrapIndentationTokens = @getSoftWrapIndentationTokens() - @softWrapIndentationDelta = @buildSoftWrapIndentationDelta() - + constructor: (properties) -> @id = idCounter++ - @markLeadingAndTrailingWhitespaceTokens() - if @invisibles - @substituteInvisibleCharacters() - @buildEndOfLineInvisibles() if @lineEnding? - buildText: -> - text = "" - text += token.value for token in @tokens - text + return unless properties? - buildBufferDelta: -> - delta = 0 - delta += token.bufferDelta for token in @tokens - delta + @specialTokens = {} + {@openScopes, @text, @tags, @lineEnding, @ruleStack, @tokenIterator} = properties + {@startBufferColumn, @fold, @tabLength, @indentLevel, @invisibles} = properties + + @startBufferColumn ?= 0 + @bufferDelta = @text.length + + @transformContent() + @buildEndOfLineInvisibles() if @invisibles? and @lineEnding? + + transformContent: -> + text = '' + bufferColumn = 0 + screenColumn = 0 + tokenIndex = 0 + tokenOffset = 0 + firstNonWhitespaceColumn = null + lastNonWhitespaceColumn = null + + while bufferColumn < @text.length + # advance to next token if we've iterated over its length + if tokenOffset is @tags[tokenIndex] + tokenIndex++ + tokenOffset = 0 + + # advance to next token tag + tokenIndex++ while @tags[tokenIndex] < 0 + + character = @text[bufferColumn] + + # split out unicode surrogate pairs + if isPairedCharacter(@text, bufferColumn) + prefix = tokenOffset + suffix = @tags[tokenIndex] - tokenOffset - 2 + splitTokens = [] + splitTokens.push(prefix) if prefix > 0 + splitTokens.push(2) + splitTokens.push(suffix) if suffix > 0 + + @tags.splice(tokenIndex, 1, splitTokens...) + + firstNonWhitespaceColumn ?= screenColumn + lastNonWhitespaceColumn = screenColumn + 1 + + text += @text.substr(bufferColumn, 2) + screenColumn += 2 + bufferColumn += 2 + + tokenIndex++ if prefix > 0 + @specialTokens[tokenIndex] = PairedCharacter + tokenIndex++ + tokenOffset = 0 + + # split out leading soft tabs + else if character is ' ' + if firstNonWhitespaceColumn? + text += ' ' + else + if (screenColumn + 1) % @tabLength is 0 + @specialTokens[tokenIndex] = SoftTab + suffix = @tags[tokenIndex] - @tabLength + @tags.splice(tokenIndex, 1, @tabLength) + @tags.splice(tokenIndex + 1, 0, suffix) if suffix > 0 + text += @invisibles?.space ? ' ' + + screenColumn++ + bufferColumn++ + tokenOffset++ + + # expand hard tabs to the next tab stop + else if character is '\t' + tabLength = @tabLength - (screenColumn % @tabLength) + if @invisibles?.tab + text += @invisibles.tab + else + text += ' ' + text += ' ' for i in [1...tabLength] by 1 + + prefix = tokenOffset + suffix = @tags[tokenIndex] - tokenOffset - 1 + splitTokens = [] + splitTokens.push(prefix) if prefix > 0 + splitTokens.push(tabLength) + splitTokens.push(suffix) if suffix > 0 + + @tags.splice(tokenIndex, 1, splitTokens...) + + screenColumn += tabLength + bufferColumn++ + + tokenIndex++ if prefix > 0 + @specialTokens[tokenIndex] = HardTab + tokenIndex++ + tokenOffset = 0 + + # continue past any other character + else + firstNonWhitespaceColumn ?= screenColumn + lastNonWhitespaceColumn = screenColumn + + text += character + screenColumn++ + bufferColumn++ + tokenOffset++ + + @text = text + + @firstNonWhitespaceIndex = firstNonWhitespaceColumn + if lastNonWhitespaceColumn? + if lastNonWhitespaceColumn + 1 < @text.length + @firstTrailingWhitespaceIndex = lastNonWhitespaceColumn + 1 + if @invisibles?.space + @text = + @text.substring(0, @firstTrailingWhitespaceIndex) + + @text.substring(@firstTrailingWhitespaceIndex) + .replace(RepeatedSpaceRegex, @invisibles.space) + else + @lineIsWhitespaceOnly = true + @firstTrailingWhitespaceIndex = 0 + + getTokenIterator: -> @tokenIterator.reset(this) + + Object.defineProperty @prototype, 'tokens', get: -> + iterator = @getTokenIterator() + tokens = [] + + while iterator.next() + properties = { + value: iterator.getText() + scopes: iterator.getScopes().slice() + isAtomic: iterator.isAtomic() + isHardTab: iterator.isHardTab() + hasPairedCharacter: iterator.isPairedCharacter() + isSoftWrapIndentation: iterator.isSoftWrapIndentation() + } + + if iterator.isHardTab() + properties.bufferDelta = 1 + properties.hasInvisibleCharacters = true if @invisibles?.tab + + if iterator.getScreenStart() < @firstNonWhitespaceIndex + properties.firstNonWhitespaceIndex = + Math.min(@firstNonWhitespaceIndex, iterator.getScreenEnd()) - iterator.getScreenStart() + properties.hasInvisibleCharacters = true if @invisibles?.space + + if @lineEnding? and iterator.getScreenEnd() > @firstTrailingWhitespaceIndex + properties.firstTrailingWhitespaceIndex = + Math.max(0, @firstTrailingWhitespaceIndex - iterator.getScreenStart()) + properties.hasInvisibleCharacters = true if @invisibles?.space + + tokens.push(new Token(properties)) + + tokens copy: -> - new TokenizedLine({@tokens, @lineEnding, @ruleStack, @startBufferColumn, @fold}) + copy = new TokenizedLine + copy.tokenIterator = @tokenIterator + copy.indentLevel = @indentLevel + copy.openScopes = @openScopes + copy.text = @text + copy.tags = @tags + copy.specialTokens = @specialTokens + copy.firstNonWhitespaceIndex = @firstNonWhitespaceIndex + copy.firstTrailingWhitespaceIndex = @firstTrailingWhitespaceIndex + copy.lineEnding = @lineEnding + copy.endOfLineInvisibles = @endOfLineInvisibles + copy.ruleStack = @ruleStack + copy.startBufferColumn = @startBufferColumn + copy.fold = @fold + copy # This clips a given screen column to a valid column that's within the line # and not in the middle of any atomic tokens. @@ -52,49 +204,58 @@ class TokenizedLine # # Returns a {Number} representing the clipped column. clipScreenColumn: (column, options={}) -> - return 0 if @tokens.length is 0 + return 0 if @tags.length is 0 {clip} = options column = Math.min(column, @getMaxScreenColumn()) tokenStartColumn = 0 - for token in @tokens - break if tokenStartColumn + token.screenDelta > column - tokenStartColumn += token.screenDelta - if @isColumnInsideSoftWrapIndentation(tokenStartColumn) - @softWrapIndentationDelta - else if token.isAtomic and tokenStartColumn < column + iterator = @getTokenIterator() + while iterator.next() + break if iterator.getScreenEnd() > column + + if iterator.isSoftWrapIndentation() + iterator.next() while iterator.isSoftWrapIndentation() + iterator.getScreenStart() + else if iterator.isAtomic() and iterator.getScreenStart() < column if clip is 'forward' - tokenStartColumn + token.screenDelta + iterator.getScreenEnd() else if clip is 'backward' - tokenStartColumn + iterator.getScreenStart() else #'closest' - if column > tokenStartColumn + (token.screenDelta / 2) - tokenStartColumn + token.screenDelta + if column > ((iterator.getScreenStart() + iterator.getScreenEnd()) / 2) + iterator.getScreenEnd() else - tokenStartColumn + iterator.getScreenStart() else column - screenColumnForBufferColumn: (bufferColumn, options) -> - bufferColumn = bufferColumn - @startBufferColumn - screenColumn = 0 - currentBufferColumn = 0 - for token in @tokens - break if currentBufferColumn + token.bufferDelta > bufferColumn - screenColumn += token.screenDelta - currentBufferColumn += token.bufferDelta - @clipScreenColumn(screenColumn + (bufferColumn - currentBufferColumn)) + screenColumnForBufferColumn: (targetBufferColumn, options) -> + iterator = @getTokenIterator() + while iterator.next() + tokenBufferStart = iterator.getBufferStart() + tokenBufferEnd = iterator.getBufferEnd() + if tokenBufferStart <= targetBufferColumn < tokenBufferEnd + overshoot = targetBufferColumn - tokenBufferStart + return Math.min( + iterator.getScreenStart() + overshoot, + iterator.getScreenEnd() + ) + iterator.getScreenEnd() - bufferColumnForScreenColumn: (screenColumn, options) -> - bufferColumn = @startBufferColumn - currentScreenColumn = 0 - for token in @tokens - break if currentScreenColumn + token.screenDelta > screenColumn - bufferColumn += token.bufferDelta - currentScreenColumn += token.screenDelta - bufferColumn + (screenColumn - currentScreenColumn) + bufferColumnForScreenColumn: (targetScreenColumn) -> + iterator = @getTokenIterator() + while iterator.next() + tokenScreenStart = iterator.getScreenStart() + tokenScreenEnd = iterator.getScreenEnd() + if tokenScreenStart <= targetScreenColumn < tokenScreenEnd + overshoot = targetScreenColumn - tokenScreenStart + return Math.min( + iterator.getBufferStart() + overshoot, + iterator.getBufferEnd() + ) + iterator.getBufferEnd() getMaxScreenColumn: -> if @fold @@ -128,69 +289,128 @@ class TokenizedLine return maxColumn - buildSoftWrapIndentationTokens: (token, hangingIndent) -> - totalIndentSpaces = (@indentLevel * @tabLength) + hangingIndent - indentTokens = [] - while totalIndentSpaces > 0 - tokenLength = Math.min(@tabLength, totalIndentSpaces) - indentToken = token.buildSoftWrapIndentationToken(tokenLength) - indentTokens.push(indentToken) - totalIndentSpaces -= tokenLength - - indentTokens - softWrapAt: (column, hangingIndent) -> - return [new TokenizedLine([], '', [0, 0], [0, 0]), this] if column is 0 + return [null, this] if column is 0 - rightTokens = new Array(@tokens...) - leftTokens = [] - leftScreenColumn = 0 + leftText = @text.substring(0, column) + rightText = @text.substring(column) - while leftScreenColumn < column - if leftScreenColumn + rightTokens[0].screenDelta > column - rightTokens[0..0] = rightTokens[0].splitAt(column - leftScreenColumn) - nextToken = rightTokens.shift() - leftScreenColumn += nextToken.screenDelta - leftTokens.push nextToken + leftTags = [] + rightTags = [] - indentationTokens = @buildSoftWrapIndentationTokens(leftTokens[0], hangingIndent) + leftSpecialTokens = {} + rightSpecialTokens = {} + + rightOpenScopes = @openScopes.slice() + + screenColumn = 0 + + for tag, index in @tags + # tag represents a token + if tag >= 0 + # token ends before the soft wrap column + if screenColumn + tag <= column + if specialToken = @specialTokens[index] + leftSpecialTokens[index] = specialToken + leftTags.push(tag) + screenColumn += tag + + # token starts before and ends after the split column + else if screenColumn <= column + leftSuffix = column - screenColumn + rightPrefix = screenColumn + tag - column + + leftTags.push(leftSuffix) if leftSuffix > 0 + + softWrapIndent = @indentLevel * @tabLength + (hangingIndent ? 0) + for i in [0...softWrapIndent] by 1 + rightText = ' ' + rightText + remainingSoftWrapIndent = softWrapIndent + while remainingSoftWrapIndent > 0 + indentToken = Math.min(remainingSoftWrapIndent, @tabLength) + rightSpecialTokens[rightTags.length] = SoftWrapIndent + rightTags.push(indentToken) + remainingSoftWrapIndent -= indentToken + + rightTags.push(rightPrefix) if rightPrefix > 0 + + screenColumn += tag + + # token is after split column + else + if specialToken = @specialTokens[index] + rightSpecialTokens[rightTags.length] = specialToken + rightTags.push(tag) + + # tag represents the start or end of a scop + else if (tag % 2) is -1 + if screenColumn < column + leftTags.push(tag) + rightOpenScopes.push(tag) + else + rightTags.push(tag) + else + if screenColumn < column + leftTags.push(tag) + rightOpenScopes.pop() + else + rightTags.push(tag) + + splitBufferColumn = @bufferColumnForScreenColumn(column) + + leftFragment = new TokenizedLine + leftFragment.tokenIterator = @tokenIterator + leftFragment.openScopes = @openScopes + leftFragment.text = leftText + leftFragment.tags = leftTags + leftFragment.specialTokens = leftSpecialTokens + leftFragment.startBufferColumn = @startBufferColumn + leftFragment.bufferDelta = splitBufferColumn - @startBufferColumn + leftFragment.ruleStack = @ruleStack + leftFragment.invisibles = @invisibles + leftFragment.lineEnding = null + leftFragment.indentLevel = @indentLevel + leftFragment.tabLength = @tabLength + leftFragment.firstNonWhitespaceIndex = Math.min(column, @firstNonWhitespaceIndex) + leftFragment.firstTrailingWhitespaceIndex = Math.min(column, @firstTrailingWhitespaceIndex) + + rightFragment = new TokenizedLine + rightFragment.tokenIterator = @tokenIterator + rightFragment.openScopes = rightOpenScopes + rightFragment.text = rightText + rightFragment.tags = rightTags + rightFragment.specialTokens = rightSpecialTokens + rightFragment.startBufferColumn = splitBufferColumn + rightFragment.bufferDelta = @bufferDelta - splitBufferColumn + rightFragment.ruleStack = @ruleStack + rightFragment.invisibles = @invisibles + rightFragment.lineEnding = @lineEnding + rightFragment.indentLevel = @indentLevel + rightFragment.tabLength = @tabLength + rightFragment.endOfLineInvisibles = @endOfLineInvisibles + rightFragment.firstNonWhitespaceIndex = Math.max(softWrapIndent, @firstNonWhitespaceIndex - column + softWrapIndent) + rightFragment.firstTrailingWhitespaceIndex = Math.max(softWrapIndent, @firstTrailingWhitespaceIndex - column + softWrapIndent) - leftFragment = new TokenizedLine( - tokens: leftTokens - startBufferColumn: @startBufferColumn - ruleStack: @ruleStack - invisibles: @invisibles - lineEnding: null, - indentLevel: @indentLevel, - tabLength: @tabLength - ) - rightFragment = new TokenizedLine( - tokens: indentationTokens.concat(rightTokens) - startBufferColumn: @bufferColumnForScreenColumn(column) - ruleStack: @ruleStack - invisibles: @invisibles - lineEnding: @lineEnding, - indentLevel: @indentLevel, - tabLength: @tabLength - ) [leftFragment, rightFragment] isSoftWrapped: -> @lineEnding is null - isColumnInsideSoftWrapIndentation: (column) -> - return false if @softWrapIndentationTokens.length is 0 + isColumnInsideSoftWrapIndentation: (targetColumn) -> + targetColumn < @getSoftWrapIndentationDelta() - column < @softWrapIndentationDelta - - getSoftWrapIndentationTokens: -> - _.select(@tokens, (token) -> token.isSoftWrapIndentation) - - buildSoftWrapIndentationDelta: -> - _.reduce @softWrapIndentationTokens, ((acc, token) -> acc + token.screenDelta), 0 + getSoftWrapIndentationDelta: -> + delta = 0 + for tag, index in @tags + if tag >= 0 + if @specialTokens[index] is SoftWrapIndent + delta += tag + else + break + delta hasOnlySoftWrapIndentation: -> - @tokens.length is @softWrapIndentationTokens.length + @getSoftWrapIndentationDelta() is @text.length tokenAtBufferColumn: (bufferColumn) -> @tokens[@tokenIndexAtBufferColumn(bufferColumn)] @@ -210,58 +430,6 @@ class TokenizedLine delta = nextDelta delta - breakOutAtomicTokens: (inputTokens) -> - outputTokens = [] - breakOutLeadingSoftTabs = true - column = @startBufferColumn - for token in inputTokens - newTokens = token.breakOutAtomicTokens(@tabLength, breakOutLeadingSoftTabs, column) - column += newToken.value.length for newToken in newTokens - outputTokens.push(newTokens...) - breakOutLeadingSoftTabs = token.isOnlyWhitespace() if breakOutLeadingSoftTabs - outputTokens - - markLeadingAndTrailingWhitespaceTokens: -> - @firstNonWhitespaceIndex = @text.search(NonWhitespaceRegex) - if @firstNonWhitespaceIndex > 0 and isPairedCharacter(@text, @firstNonWhitespaceIndex - 1) - @firstNonWhitespaceIndex-- - firstTrailingWhitespaceIndex = @text.search(TrailingWhitespaceRegex) - @lineIsWhitespaceOnly = firstTrailingWhitespaceIndex is 0 - index = 0 - for token in @tokens - if index < @firstNonWhitespaceIndex - token.firstNonWhitespaceIndex = Math.min(index + token.value.length, @firstNonWhitespaceIndex - index) - # Only the *last* segment of a soft-wrapped line can have trailing whitespace - if @lineEnding? and (index + token.value.length > firstTrailingWhitespaceIndex) - token.firstTrailingWhitespaceIndex = Math.max(0, firstTrailingWhitespaceIndex - index) - index += token.value.length - return - - substituteInvisibleCharacters: -> - invisibles = @invisibles - changedText = false - - for token, i in @tokens - if token.isHardTab - if invisibles.tab - token.value = invisibles.tab + token.value.substring(invisibles.tab.length) - token.hasInvisibleCharacters = true - changedText = true - else - if invisibles.space - if token.hasLeadingWhitespace() and not token.isSoftWrapIndentation - token.value = token.value.replace LeadingWhitespaceRegex, (leadingWhitespace) -> - leadingWhitespace.replace RepeatedSpaceRegex, invisibles.space - token.hasInvisibleCharacters = true - changedText = true - if token.hasTrailingWhitespace() - token.value = token.value.replace TrailingWhitespaceRegex, (leadingWhitespace) -> - leadingWhitespace.replace RepeatedSpaceRegex, invisibles.space - token.hasInvisibleCharacters = true - changedText = true - - @text = @buildText() if changedText - buildEndOfLineInvisibles: -> @endOfLineInvisibles = [] {cr, eol} = @invisibles @@ -274,11 +442,13 @@ class TokenizedLine @endOfLineInvisibles.push(eol) if eol isComment: -> - for token in @tokens - continue if token.scopes.length is 1 - continue if token.isOnlyWhitespace() - for scope in token.scopes - return true if _.contains(scope.split('.'), 'comment') + iterator = @getTokenIterator() + while iterator.next() + scopes = iterator.getScopes() + continue if scopes.length is 1 + continue unless NonWhitespaceRegex.test(iterator.getText()) + for scope in scopes + return true if CommentScopeRegex.test(scope) break false @@ -289,42 +459,6 @@ class TokenizedLine @tokens[index] getTokenCount: -> - @tokens.length - - bufferColumnForToken: (targetToken) -> - column = 0 - for token in @tokens - return column if token is targetToken - column += token.bufferDelta - - getScopeTree: -> - return @scopeTree if @scopeTree? - - scopeStack = [] - for token in @tokens - @updateScopeStack(scopeStack, token.scopes) - _.last(scopeStack).children.push(token) - - @scopeTree = scopeStack[0] - @updateScopeStack(scopeStack, []) - @scopeTree - - updateScopeStack: (scopeStack, desiredScopeDescriptor) -> - # Find a common prefix - for scope, i in desiredScopeDescriptor - break unless scopeStack[i]?.scope is desiredScopeDescriptor[i] - - # Pop scopeDescriptor until we're at the common prefx - until scopeStack.length is i - poppedScope = scopeStack.pop() - _.last(scopeStack)?.children.push(poppedScope) - - # Push onto common prefix until scopeStack equals desiredScopeDescriptor - for j in [i...desiredScopeDescriptor.length] - scopeStack.push(new Scope(desiredScopeDescriptor[j])) - - return - -class Scope - constructor: (@scope) -> - @children = [] + count = 0 + count++ for tag in @tags when tag >= 0 + count From b4444df442790cc14f8d0e211e2402bc0fb0d19f Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Thu, 21 May 2015 01:13:33 +0200 Subject: [PATCH 05/12] Minimize substring calls and concatenation in transformContent --- src/tokenized-line.coffee | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee index 45af81e57..8b4c40ff1 100644 --- a/src/tokenized-line.coffee +++ b/src/tokenized-line.coffee @@ -41,6 +41,9 @@ class TokenizedLine firstNonWhitespaceColumn = null lastNonWhitespaceColumn = null + substringStart = 0 + substringEnd = 0 + while bufferColumn < @text.length # advance to next token if we've iterated over its length if tokenOffset is @tags[tokenIndex] @@ -66,7 +69,7 @@ class TokenizedLine firstNonWhitespaceColumn ?= screenColumn lastNonWhitespaceColumn = screenColumn + 1 - text += @text.substr(bufferColumn, 2) + substringEnd += 2 screenColumn += 2 bufferColumn += 2 @@ -78,14 +81,21 @@ class TokenizedLine # split out leading soft tabs else if character is ' ' if firstNonWhitespaceColumn? - text += ' ' + substringEnd += 1 else if (screenColumn + 1) % @tabLength is 0 @specialTokens[tokenIndex] = SoftTab suffix = @tags[tokenIndex] - @tabLength @tags.splice(tokenIndex, 1, @tabLength) @tags.splice(tokenIndex + 1, 0, suffix) if suffix > 0 - text += @invisibles?.space ? ' ' + + if @invisibles?.space + text += @text.substring(substringStart, substringEnd) if substringEnd > substringStart + substringStart = substringEnd + text += @invisibles.space + substringStart += 1 + + substringEnd += 1 screenColumn++ bufferColumn++ @@ -93,6 +103,9 @@ class TokenizedLine # expand hard tabs to the next tab stop else if character is '\t' + text += @text.substring(substringStart, substringEnd) if substringEnd > substringStart + substringStart = substringEnd + tabLength = @tabLength - (screenColumn % @tabLength) if @invisibles?.tab text += @invisibles.tab @@ -100,6 +113,9 @@ class TokenizedLine text += ' ' text += ' ' for i in [1...tabLength] by 1 + substringStart += 1 + substringEnd += 1 + prefix = tokenOffset suffix = @tags[tokenIndex] - tokenOffset - 1 splitTokens = [] @@ -122,12 +138,17 @@ class TokenizedLine firstNonWhitespaceColumn ?= screenColumn lastNonWhitespaceColumn = screenColumn - text += character + substringEnd += 1 screenColumn++ bufferColumn++ tokenOffset++ - @text = text + if substringEnd > substringStart + unless substringStart is 0 and substringEnd is @text.length + text += @text.substring(substringStart, substringEnd) + @text = text + else + @text = text @firstNonWhitespaceIndex = firstNonWhitespaceColumn if lastNonWhitespaceColumn? From 321d310e8a1650397d272ba84d076cd12c201a7a Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Thu, 21 May 2015 02:22:22 +0200 Subject: [PATCH 06/12] Avoid more allocations in transformContent --- src/tokenized-line.coffee | 54 +++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee index 8b4c40ff1..b7812b1f6 100644 --- a/src/tokenized-line.coffee +++ b/src/tokenized-line.coffee @@ -8,8 +8,26 @@ LeadingWhitespaceRegex = /^\s*/ TrailingWhitespaceRegex = /\s*$/ RepeatedSpaceRegex = /[ ]/g CommentScopeRegex = /(\b|\.)comment/ +TabCharCode = 9 +SpaceCharCode = 32 +SpaceString = ' ' +TabStringsByLength = { + 1: ' ' + 2: ' ' + 3: ' ' + 4: ' ' +} + idCounter = 1 +getTabString = (length) -> + TabStringsByLength[length] ?= buildTabString(length) + +buildTabString = (length) -> + string = SpaceString + string += SpaceString for i in [1...length] by 1 + string + module.exports = class TokenizedLine endOfLineInvisibles: null @@ -53,18 +71,18 @@ class TokenizedLine # advance to next token tag tokenIndex++ while @tags[tokenIndex] < 0 - character = @text[bufferColumn] + charCode = @text.charCodeAt(bufferColumn) # split out unicode surrogate pairs if isPairedCharacter(@text, bufferColumn) prefix = tokenOffset suffix = @tags[tokenIndex] - tokenOffset - 2 - splitTokens = [] - splitTokens.push(prefix) if prefix > 0 - splitTokens.push(2) - splitTokens.push(suffix) if suffix > 0 - @tags.splice(tokenIndex, 1, splitTokens...) + i = tokenIndex + @tags.splice(i, 1) + @tags.splice(i++, 0, prefix) if prefix > 0 + @tags.splice(i++, 0, 2) + @tags.splice(i, 0, suffix) if suffix > 0 firstNonWhitespaceColumn ?= screenColumn lastNonWhitespaceColumn = screenColumn + 1 @@ -79,7 +97,7 @@ class TokenizedLine tokenOffset = 0 # split out leading soft tabs - else if character is ' ' + else if charCode is SpaceCharCode if firstNonWhitespaceColumn? substringEnd += 1 else @@ -90,7 +108,8 @@ class TokenizedLine @tags.splice(tokenIndex + 1, 0, suffix) if suffix > 0 if @invisibles?.space - text += @text.substring(substringStart, substringEnd) if substringEnd > substringStart + if substringEnd > substringStart + text += @text.substring(substringStart, substringEnd) substringStart = substringEnd text += @invisibles.space substringStart += 1 @@ -102,28 +121,29 @@ class TokenizedLine tokenOffset++ # expand hard tabs to the next tab stop - else if character is '\t' - text += @text.substring(substringStart, substringEnd) if substringEnd > substringStart + else if charCode is TabCharCode + if substringEnd > substringStart + text += @text.substring(substringStart, substringEnd) substringStart = substringEnd tabLength = @tabLength - (screenColumn % @tabLength) if @invisibles?.tab text += @invisibles.tab + text += getTabString(tabLength - 1) if tabLength > 1 else - text += ' ' - text += ' ' for i in [1...tabLength] by 1 + text += getTabString(tabLength) substringStart += 1 substringEnd += 1 prefix = tokenOffset suffix = @tags[tokenIndex] - tokenOffset - 1 - splitTokens = [] - splitTokens.push(prefix) if prefix > 0 - splitTokens.push(tabLength) - splitTokens.push(suffix) if suffix > 0 - @tags.splice(tokenIndex, 1, splitTokens...) + i = tokenIndex + @tags.splice(i, 1) + @tags.splice(i++, 0, prefix) if prefix > 0 + @tags.splice(i++, 0, tabLength) + @tags.splice(i, 0, suffix) if suffix > 0 screenColumn += tabLength bufferColumn++ From 95806dba46b7c2191f6056b259d80cbc2ddb35d2 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Thu, 21 May 2015 19:28:34 +0200 Subject: [PATCH 07/12] :arrow_up: first-mate --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 70526eb98..c97cdf441 100644 --- a/package.json +++ b/package.json @@ -32,7 +32,7 @@ "delegato": "^1", "emissary": "^1.3.3", "event-kit": "^1.2.0", - "first-mate": "^4.1.4", + "first-mate": "^4.1.5", "fs-plus": "^2.8.0", "fstream": "0.1.24", "fuzzaldrin": "^2.1", From 865015e47dce688b45e84fec4b5ca4d9e1a305f6 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Thu, 21 May 2015 19:56:24 +0200 Subject: [PATCH 08/12] Correctly compute bufferDelta for last soft wrap line segment Fixes #6885 --- spec/display-buffer-spec.coffee | 3 +++ src/tokenized-line.coffee | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/spec/display-buffer-spec.coffee b/spec/display-buffer-spec.coffee index 6389f8105..be89db8ac 100644 --- a/spec/display-buffer-spec.coffee +++ b/spec/display-buffer-spec.coffee @@ -106,8 +106,11 @@ describe "DisplayBuffer", -> buffer.setTextInRange([[0, 0], [1, 0]], 'abcdefghijklmnopqrstuvwxyz\n') displayBuffer.setEditorWidthInChars(10) expect(displayBuffer.tokenizedLineForScreenRow(0).text).toBe 'abcdefghij' + expect(displayBuffer.tokenizedLineForScreenRow(0).bufferDelta).toBe 'abcdefghij'.length expect(displayBuffer.tokenizedLineForScreenRow(1).text).toBe 'klmnopqrst' + expect(displayBuffer.tokenizedLineForScreenRow(1).bufferDelta).toBe 'klmnopqrst'.length expect(displayBuffer.tokenizedLineForScreenRow(2).text).toBe 'uvwxyz' + expect(displayBuffer.tokenizedLineForScreenRow(2).bufferDelta).toBe 'uvwxyz'.length describe "when there is a whitespace character at the max length boundary", -> it "wraps the line at the first non-whitespace character following the boundary", -> diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee index b7812b1f6..6761eecad 100644 --- a/src/tokenized-line.coffee +++ b/src/tokenized-line.coffee @@ -422,7 +422,7 @@ class TokenizedLine rightFragment.tags = rightTags rightFragment.specialTokens = rightSpecialTokens rightFragment.startBufferColumn = splitBufferColumn - rightFragment.bufferDelta = @bufferDelta - splitBufferColumn + rightFragment.bufferDelta = @startBufferColumn + @bufferDelta - splitBufferColumn rightFragment.ruleStack = @ruleStack rightFragment.invisibles = @invisibles rightFragment.lineEnding = @lineEnding From ba6e76947302983a0a7ee94db206e073cb90d1e0 Mon Sep 17 00:00:00 2001 From: Ben Ogle Date: Thu, 21 May 2015 11:00:06 -0700 Subject: [PATCH 09/12] :arrow_up: metrics@0.51.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 807a67939..6caa958bf 100644 --- a/package.json +++ b/package.json @@ -109,7 +109,7 @@ "keybinding-resolver": "0.33.0", "link": "0.30.0", "markdown-preview": "0.149.0", - "metrics": "0.50.0", + "metrics": "0.51.0", "notifications": "0.50.0", "open-on-github": "0.37.0", "package-generator": "0.39.0", From 1046c331b23be988022b7f114b3134f3ad832a9b Mon Sep 17 00:00:00 2001 From: Wliu <50Wliu@users.noreply.github.com> Date: Thu, 21 May 2015 15:56:54 -0400 Subject: [PATCH 10/12] :arrow_up: language-xml@0.30.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 6caa958bf..b632f7bb7 100644 --- a/package.json +++ b/package.json @@ -157,7 +157,7 @@ "language-text": "0.6.0", "language-todo": "0.21.0", "language-toml": "0.16.0", - "language-xml": "0.29.0", + "language-xml": "0.30.0", "language-yaml": "0.22.0" }, "private": true, From 6adbdee3599f322b86a2eea68abfb47303b12b5a Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Thu, 21 May 2015 13:37:38 -0700 Subject: [PATCH 11/12] :arrow_up: settings-view@0.205 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b632f7bb7..0271a3ea9 100644 --- a/package.json +++ b/package.json @@ -114,7 +114,7 @@ "open-on-github": "0.37.0", "package-generator": "0.39.0", "release-notes": "0.52.0", - "settings-view": "0.204.0", + "settings-view": "0.205.0", "snippets": "0.90.0", "spell-check": "0.58.0", "status-bar": "0.72.0", From 135603f8dd3f468a86687d35dad70a347da6891e Mon Sep 17 00:00:00 2001 From: Kevin Sawicki Date: Thu, 21 May 2015 15:21:06 -0700 Subject: [PATCH 12/12] Prepare 0.202 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 0271a3ea9..69a6167b7 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "atom", "productName": "Atom", - "version": "0.201.0", + "version": "0.202.0", "description": "A hackable text editor for the 21st Century.", "main": "./src/browser/main.js", "repository": {