mirror of
https://github.com/less/less.js.git
synced 2026-01-22 13:48:03 -05:00
pull out the input processing out of the parser class and a few small improvements - the no js option now applies to all js, including that within quotes. The Javascript node now also returns the right index.
This commit is contained in:
@@ -24,7 +24,6 @@ var parseCopyProperties = [
|
||||
'compress', // option - whether to compress
|
||||
'processImports', // option - whether to process imports. if false then imports will not be imported
|
||||
'syncImport', // option - whether to import synchronously
|
||||
'javascriptEnabled',// option - whether JavaScript is enabled. if undefined, defaults to true
|
||||
'chunkInput', // option - whether to chunk input. more performant but causes parse issues.
|
||||
'mime', // browser only - mime type for sheet import
|
||||
'useFileCache', // browser only - whether to use the per file session cache
|
||||
@@ -77,7 +76,8 @@ var evalCopyProperties = [
|
||||
'cleancss', // whether to compress with clean-css
|
||||
'sourceMap', // whether to output a source map
|
||||
'importMultiple', // whether we are currently importing multiple copies
|
||||
'urlArgs' // whether to add args into url tokens
|
||||
'urlArgs', // whether to add args into url tokens
|
||||
'javascriptEnabled'// option - whether JavaScript is enabled. if undefined, defaults to true
|
||||
];
|
||||
|
||||
contexts.evalEnv = function(options, frames) {
|
||||
|
||||
285
lib/less/parser/parser-input.js
Normal file
285
lib/less/parser/parser-input.js
Normal file
@@ -0,0 +1,285 @@
|
||||
var chunker = require('./chunker.js'),
|
||||
LessError = require('../less-error.js');
|
||||
module.exports = function() {
|
||||
var input, // LeSS input string
|
||||
j, // current chunk
|
||||
saveStack = [], // holds state for backtracking
|
||||
furthest, // furthest index the parser has gone to
|
||||
furthestPossibleErrorMessage,// if this is furthest we got to, this is the probably cause
|
||||
chunks, // chunkified input
|
||||
current, // current chunk
|
||||
currentPos, // index of current chunk, in `input`
|
||||
parserInput = {};
|
||||
|
||||
parserInput.save = function() {
|
||||
currentPos = parserInput.i;
|
||||
saveStack.push( { current: current, i: parserInput.i, j: j });
|
||||
};
|
||||
parserInput.restore = function(possibleErrorMessage) {
|
||||
if (parserInput.i > furthest) {
|
||||
furthest = parserInput.i;
|
||||
furthestPossibleErrorMessage = possibleErrorMessage;
|
||||
}
|
||||
var state = saveStack.pop();
|
||||
current = state.current;
|
||||
currentPos = parserInput.i = state.i;
|
||||
j = state.j;
|
||||
};
|
||||
parserInput.forget = function() {
|
||||
saveStack.pop();
|
||||
};
|
||||
function sync() {
|
||||
if (parserInput.i > currentPos) {
|
||||
current = current.slice(parserInput.i - currentPos);
|
||||
currentPos = parserInput.i;
|
||||
}
|
||||
}
|
||||
parserInput.isWhitespace = function (offset) {
|
||||
var pos = parserInput.i + (offset || 0),
|
||||
code = input.charCodeAt(pos);
|
||||
return (code === CHARCODE_SPACE || code === CHARCODE_CR || code === CHARCODE_TAB || code === CHARCODE_LF);
|
||||
};
|
||||
//
|
||||
// Parse from a token, regexp or string, and move forward if match
|
||||
//
|
||||
parserInput.$ = function(tok) {
|
||||
var tokType = typeof tok,
|
||||
match, length;
|
||||
|
||||
// Either match a single character in the input,
|
||||
// or match a regexp in the current chunk (`current`).
|
||||
//
|
||||
if (tokType === "string") {
|
||||
if (input.charAt(parserInput.i) !== tok) {
|
||||
return null;
|
||||
}
|
||||
skipWhitespace(1);
|
||||
return tok;
|
||||
}
|
||||
|
||||
// regexp
|
||||
sync();
|
||||
if (! (match = tok.exec(current))) {
|
||||
return null;
|
||||
}
|
||||
|
||||
length = match[0].length;
|
||||
|
||||
// The match is confirmed, add the match length to `i`,
|
||||
// and consume any extra white-space characters (' ' || '\n')
|
||||
// which come after that. The reason for this is that LeSS's
|
||||
// grammar is mostly white-space insensitive.
|
||||
//
|
||||
skipWhitespace(length);
|
||||
|
||||
if(typeof(match) === 'string') {
|
||||
return match;
|
||||
} else {
|
||||
return match.length === 1 ? match[0] : match;
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization of $(tok)
|
||||
parserInput.$re = function(tok) {
|
||||
if (parserInput.i > currentPos) {
|
||||
current = current.slice(parserInput.i - currentPos);
|
||||
currentPos = parserInput.i;
|
||||
}
|
||||
var m = tok.exec(current);
|
||||
if (!m) {
|
||||
return null;
|
||||
}
|
||||
|
||||
skipWhitespace(m[0].length);
|
||||
if(typeof m === "string") {
|
||||
return m;
|
||||
}
|
||||
|
||||
return m.length === 1 ? m[0] : m;
|
||||
};
|
||||
|
||||
// Specialization of $(tok)
|
||||
parserInput.$char = function(tok) {
|
||||
if (input.charAt(parserInput.i) !== tok) {
|
||||
return null;
|
||||
}
|
||||
skipWhitespace(1);
|
||||
return tok;
|
||||
};
|
||||
|
||||
var CHARCODE_SPACE = 32,
|
||||
CHARCODE_TAB = 9,
|
||||
CHARCODE_LF = 10,
|
||||
CHARCODE_CR = 13,
|
||||
CHARCODE_PLUS = 43,
|
||||
CHARCODE_COMMA = 44,
|
||||
CHARCODE_FORWARD_SLASH = 47,
|
||||
CHARCODE_9 = 57;
|
||||
|
||||
parserInput.autoCommentAbsorb = true;
|
||||
parserInput.commentStore = [];
|
||||
parserInput.finished = false;
|
||||
|
||||
var skipWhitespace = function(length) {
|
||||
var oldi = parserInput.i, oldj = j,
|
||||
curr = parserInput.i - currentPos,
|
||||
endIndex = parserInput.i + current.length - curr,
|
||||
mem = (parserInput.i += length),
|
||||
inp = input,
|
||||
c, nextChar, comment;
|
||||
|
||||
for (; parserInput.i < endIndex; parserInput.i++) {
|
||||
c = inp.charCodeAt(parserInput.i);
|
||||
|
||||
if (parserInput.autoCommentAbsorb && c === CHARCODE_FORWARD_SLASH) {
|
||||
nextChar = inp[parserInput.i + 1];
|
||||
if (nextChar === '/') {
|
||||
comment = {index: parserInput.i, isLineComment: true};
|
||||
var nextNewLine = inp.indexOf("\n", parserInput.i + 1);
|
||||
if (nextNewLine < 0) {
|
||||
nextNewLine = endIndex;
|
||||
}
|
||||
parserInput.i = nextNewLine;
|
||||
comment.text = inp.substr(comment.i, parserInput.i - comment.i);
|
||||
parserInput.commentStore.push(comment);
|
||||
continue;
|
||||
} else if (nextChar === '*') {
|
||||
var haystack = inp.substr(parserInput.i);
|
||||
var comment_search_result = haystack.match(/^\/\*(?:[^*]|\*+[^\/*])*\*+\//);
|
||||
if (comment_search_result) {
|
||||
comment = {
|
||||
index: parserInput.i,
|
||||
text: comment_search_result[0],
|
||||
isLineComment: false
|
||||
};
|
||||
parserInput.i += comment.text.length - 1;
|
||||
parserInput.commentStore.push(comment);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if ((c !== CHARCODE_SPACE) && (c !== CHARCODE_LF) && (c !== CHARCODE_TAB) && (c !== CHARCODE_CR)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
current = current.slice(length + parserInput.i - mem + curr);
|
||||
currentPos = parserInput.i;
|
||||
|
||||
if (!current.length) {
|
||||
if (j < chunks.length - 1)
|
||||
{
|
||||
current = chunks[++j];
|
||||
skipWhitespace(0); // skip space at the beginning of a chunk
|
||||
return true; // things changed
|
||||
}
|
||||
parserInput.finished = true;
|
||||
}
|
||||
|
||||
return oldi !== parserInput.i || oldj !== j;
|
||||
};
|
||||
|
||||
// Same as $(), but don't change the state of the parser,
|
||||
// just return the match.
|
||||
parserInput.peek = function(tok) {
|
||||
if (typeof(tok) === 'string') {
|
||||
return input.charAt(parserInput.i) === tok;
|
||||
} else {
|
||||
return tok.test(current);
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization of peek()
|
||||
// TODO remove or change some currentChar calls to peekChar
|
||||
parserInput.peekChar = function(tok) {
|
||||
return input.charAt(parserInput.i) === tok;
|
||||
};
|
||||
|
||||
parserInput.currentChar = function() {
|
||||
return input.charAt(parserInput.i);
|
||||
};
|
||||
|
||||
parserInput.getInput = function() {
|
||||
return input;
|
||||
};
|
||||
|
||||
parserInput.peekNotNumeric = function() {
|
||||
var c = input.charCodeAt(parserInput.i);
|
||||
//Is the first char of the dimension 0-9, '.', '+' or '-'
|
||||
return (c > CHARCODE_9 || c < CHARCODE_PLUS) || c === CHARCODE_FORWARD_SLASH || c === CHARCODE_COMMA;
|
||||
};
|
||||
|
||||
parserInput.getLocation = function(index, inputStream) {
|
||||
inputStream = inputStream == null ? input : inputStream;
|
||||
|
||||
var n = index + 1,
|
||||
line = null,
|
||||
column = -1;
|
||||
|
||||
while (--n >= 0 && inputStream.charAt(n) !== '\n') {
|
||||
column++;
|
||||
}
|
||||
|
||||
if (typeof index === 'number') {
|
||||
line = (inputStream.slice(0, index).match(/\n/g) || "").length;
|
||||
}
|
||||
|
||||
return {
|
||||
line: line,
|
||||
column: column
|
||||
};
|
||||
};
|
||||
|
||||
parserInput.start = function(str, chunkInput, parser, env) {
|
||||
input = str;
|
||||
parserInput.i = j = currentPos = furthest = 0;
|
||||
|
||||
// chunking apparantly makes things quicker (but my tests indicate
|
||||
// it might actually make things slower in node at least)
|
||||
// and it is a non-perfect parse - it can't recognise
|
||||
// unquoted urls, meaning it can't distinguish comments
|
||||
// meaning comments with quotes or {}() in them get 'counted'
|
||||
// and then lead to parse errors.
|
||||
// In addition if the chunking chunks in the wrong place we might
|
||||
// not be able to parse a parser statement in one go
|
||||
// this is officially deprecated but can be switched on via an option
|
||||
// in the case it causes too much performance issues.
|
||||
if (chunkInput) {
|
||||
chunks = chunker(str, function fail(msg, index) {
|
||||
throw new(LessError)(parser, {
|
||||
index: index,
|
||||
type: 'Parse',
|
||||
message: msg,
|
||||
filename: env.currentFileInfo.filename
|
||||
}, env);
|
||||
});
|
||||
} else {
|
||||
chunks = [str];
|
||||
}
|
||||
|
||||
current = chunks[0];
|
||||
|
||||
skipWhitespace(0);
|
||||
};
|
||||
|
||||
parserInput.end = function() {
|
||||
var message,
|
||||
isFinished = parserInput.i >= input.length - 1;
|
||||
|
||||
if (parserInput.i < furthest) {
|
||||
message = furthestPossibleErrorMessage;
|
||||
parserInput.i = furthest;
|
||||
}
|
||||
return {
|
||||
isFinished: isFinished,
|
||||
furthest: parserInput.i,
|
||||
furthestPossibleErrorMessage: message,
|
||||
furthestReachedEnd: parserInput.i >= input.length - 1,
|
||||
furthestChar: input[parserInput.i]
|
||||
};
|
||||
};
|
||||
|
||||
return parserInput;
|
||||
};
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,6 +10,11 @@ jsEvalNode.prototype.evaluateJavaScript = function (expression, env) {
|
||||
that = this,
|
||||
context = {};
|
||||
|
||||
if (env.javascriptEnabled !== undefined && !env.javascriptEnabled) {
|
||||
throw { message: "You are using JavaScript, which has been disabled." ,
|
||||
index: this.index };
|
||||
}
|
||||
|
||||
expression = expression.replace(/@\{([\w-]+)\}/g, function (_, name) {
|
||||
return that.jsify(new(Variable)('@' + name, that.index).eval(env));
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
SyntaxError: JavaScript evaluation error: 'TypeError: Cannot read property 'toJS' of undefined' in {path}javascript-error.less on line 2, column 25:
|
||||
SyntaxError: JavaScript evaluation error: 'TypeError: Cannot read property 'toJS' of undefined' in {path}javascript-error.less on line 2, column 10:
|
||||
1 .scope {
|
||||
2 var: `this.foo.toJS`;
|
||||
3 }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
NameError: variable @b is undefined in {path}javascript-undefined-var.less on line 2, column 15:
|
||||
NameError: variable @b is undefined in {path}javascript-undefined-var.less on line 2, column 9:
|
||||
1 .scope {
|
||||
2 @a: `@{b}`;
|
||||
3 }
|
||||
|
||||
Reference in New Issue
Block a user