Merge pull request #13898 from atom/mb-set-max-line-length-on-grammars

Use new maxLineLength parameter to GrammarRegistry
This commit is contained in:
Max Brunsfeld
2017-03-01 16:59:59 -08:00
committed by GitHub
3 changed files with 2 additions and 6 deletions

View File

@@ -40,7 +40,7 @@
"devtron": "1.3.0",
"event-kit": "^2.1.0",
"find-parent-dir": "^0.3.0",
"first-mate": "6.1.0",
"first-mate": "6.3.0",
"fs-plus": "2.9.2",
"fstream": "0.1.24",
"fuzzaldrin": "^2.1",

View File

@@ -15,7 +15,7 @@ PathSplitRegex = new RegExp("[/.]")
module.exports =
class GrammarRegistry extends FirstMate.GrammarRegistry
constructor: ({@config}={}) ->
super(maxTokensPerLine: 100)
super(maxTokensPerLine: 100, maxLineLength: 1000)
createToken: (value, scopes) -> new Token({value, scopes})

View File

@@ -8,8 +8,6 @@ ScopeDescriptor = require './scope-descriptor'
TokenizedBufferIterator = require './tokenized-buffer-iterator'
NullGrammar = require './null-grammar'
MAX_LINE_LENGTH_TO_TOKENIZE = 500
module.exports =
class TokenizedBuffer extends Model
grammar: null
@@ -253,8 +251,6 @@ class TokenizedBuffer extends Model
buildTokenizedLineForRowWithText: (row, text, ruleStack = @stackForRow(row - 1), openScopes = @openScopesForRow(row)) ->
lineEnding = @buffer.lineEndingForRow(row)
if text.length > MAX_LINE_LENGTH_TO_TOKENIZE
text = text.slice(0, MAX_LINE_LENGTH_TO_TOKENIZE)
{tags, ruleStack} = @grammar.tokenizeLine(text, ruleStack, row is 0, false)
new TokenizedLine({openScopes, text, tags, ruleStack, lineEnding, @tokenIterator})