mirror of
https://github.com/jashkenas/coffeescript.git
synced 2026-01-14 09:17:55 -05:00
Merge branch 'master' of git://github.com/jashkenas/coffee-script
This commit is contained in:
4
Cakefile
4
Cakefile
@@ -28,8 +28,8 @@ task 'build', 'build the CoffeeScript language from source', ->
|
||||
run ['-c', '-o', 'lib'].concat(files)
|
||||
|
||||
|
||||
task 'build:full', 'checkout /lib, rebuild the source twice, and run the tests', ->
|
||||
exec 'git co lib && bin/cake build && bin/cake build && bin/cake test', (err, stdout, stderr) ->
|
||||
task 'build:full', 'rebuild the source twice, and run the tests', ->
|
||||
exec 'bin/cake build && bin/cake build && bin/cake test', (err, stdout, stderr) ->
|
||||
print stdout if stdout
|
||||
print stderr if stderr
|
||||
throw err if err
|
||||
|
||||
@@ -105,6 +105,7 @@ div.code {
|
||||
position: fixed;
|
||||
z-index: 100;
|
||||
height: 50px;
|
||||
min-width: 490px;
|
||||
left: 40px; right: 40px; top: 25px;
|
||||
background: #ddd;
|
||||
padding-left: 235px;
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
<div id="repl_source_wrap"><textarea id="repl_source">reverse: (string) ->
|
||||
string.split('').reverse().join ''
|
||||
|
||||
alert reverse '!tpircseeffoC'</textarea></div>
|
||||
alert reverse '.eeffoC yrT'</textarea></div>
|
||||
<pre id="repl_results"></pre>
|
||||
<button class="full_screen">go full screen</button>
|
||||
<button class="minimize">minimize</button>
|
||||
@@ -90,8 +90,8 @@ alert reverse '!tpircseeffoC'</textarea></div>
|
||||
Annotated Source
|
||||
</div>
|
||||
<div class="contents">
|
||||
<a href="documentation/docs/grammar.html">The Grammar — src/grammar</a>
|
||||
<a href="documentation/docs/lexer.html">The Lexer — src/lexer</a>
|
||||
<a href="documentation/docs/grammar.html">Grammar Rules — src/grammar</a>
|
||||
<a href="documentation/docs/lexer.html">Lexing Tokens — src/lexer</a>
|
||||
<a href="documentation/docs/rewriter.html">The Rewriter — src/rewriter</a>
|
||||
<a href="documentation/docs/nodes.html">The Syntax Tree — src/nodes</a>
|
||||
<a href="documentation/docs/scope.html">Lexical Scope — src/scope</a>
|
||||
@@ -395,6 +395,13 @@ coffee --print app/scripts/*.coffee > concatenation.js</pre>
|
||||
should not be able to change the value of the external variable of the same name, and
|
||||
therefore has a declaration of its own.
|
||||
</p>
|
||||
<p>
|
||||
This behavior is effectively identical to Ruby's scope for local variables.
|
||||
Because you don't have direct access to the <tt>var</tt> keyword,
|
||||
it's impossible to shadow an outer variable on purpose, you may only refer
|
||||
to it. So be careful that you're not reusing the name of an external
|
||||
variable accidentally, if you're writing a deeply nested function.
|
||||
</p>
|
||||
<p>
|
||||
Although suppressed within this documentation for clarity, all
|
||||
CoffeeScript output is wrapped in an anonymous function:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Contributed by Jason Huggins
|
||||
|
||||
process.mixin require 'sys'
|
||||
http: require 'http'
|
||||
|
||||
server: http.createServer (req, res) ->
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
^\s*
|
||||
(.*class
|
||||
|[a-zA-Z\$_](\w|\$|:|\.)*\s*(?=\:(\s*\(.*\))?\s*((=|-)>\s*$)) # function that is not one line
|
||||
|[a-zA-Z\$_](\w|\$|\.)*\s*(?=(?!\::)\:(?!(\s*\(.*\))?\s*((=|-)>))):\s*(if(?!.*?then)|while|for|$) # assignment using multiline if/while/for
|
||||
|if(?!.*?then)|while|for
|
||||
|[a-zA-Z\$_](\w|\$|\.)*\s*(?=(?!\::)\:(?!(\s*\(.*\))?\s*((=|-)>))):\s*((if|while)(?!.*?then)|for|$) # assignment using multiline if/while/for
|
||||
|(if|while)(?!.*?then)|for
|
||||
|.*\{$
|
||||
|.*\[$)</string>
|
||||
</dict>
|
||||
|
||||
@@ -140,7 +140,7 @@
|
||||
</dict>
|
||||
<dict>
|
||||
<key>begin</key>
|
||||
<string>(?<=[=(:]|^|return)\s*(/)(?![/*+{}?])</string>
|
||||
<string>(?<=[=(:\s]|^|return)\s*(/)(?![\s/*+{}?])</string>
|
||||
<key>beginCaptures</key>
|
||||
<dict>
|
||||
<key>1</key>
|
||||
@@ -150,7 +150,7 @@
|
||||
</dict>
|
||||
</dict>
|
||||
<key>end</key>
|
||||
<string>(/)[igm]*</string>
|
||||
<string>(/)[igmy]*</string>
|
||||
<key>endCaptures</key>
|
||||
<dict>
|
||||
<key>1</key>
|
||||
|
||||
15
index.html
15
index.html
@@ -62,7 +62,7 @@
|
||||
<div id="repl_source_wrap"><textarea id="repl_source">reverse: (string) ->
|
||||
string.split('').reverse().join ''
|
||||
|
||||
alert reverse '!tpircseeffoC'</textarea></div>
|
||||
alert reverse '.eeffoC yrT'</textarea></div>
|
||||
<pre id="repl_results"></pre>
|
||||
<button class="full_screen">go full screen</button>
|
||||
<button class="minimize">minimize</button>
|
||||
@@ -76,8 +76,8 @@ alert reverse '!tpircseeffoC'</textarea></div>
|
||||
Annotated Source
|
||||
</div>
|
||||
<div class="contents">
|
||||
<a href="documentation/docs/grammar.html">The Grammar — src/grammar</a>
|
||||
<a href="documentation/docs/lexer.html">The Lexer — src/lexer</a>
|
||||
<a href="documentation/docs/grammar.html">Grammar Rules — src/grammar</a>
|
||||
<a href="documentation/docs/lexer.html">Lexing Tokens — src/lexer</a>
|
||||
<a href="documentation/docs/rewriter.html">The Rewriter — src/rewriter</a>
|
||||
<a href="documentation/docs/nodes.html">The Syntax Tree — src/nodes</a>
|
||||
<a href="documentation/docs/scope.html">Lexical Scope — src/scope</a>
|
||||
@@ -566,6 +566,13 @@ new_num = change_numbers();
|
||||
should not be able to change the value of the external variable of the same name, and
|
||||
therefore has a declaration of its own.
|
||||
</p>
|
||||
<p>
|
||||
This behavior is effectively identical to Ruby's scope for local variables.
|
||||
Because you don't have direct access to the <tt>var</tt> keyword,
|
||||
it's impossible to shadow an outer variable on purpose, you may only refer
|
||||
to it. So be careful that you're not reusing the name of an external
|
||||
variable accidentally, if you're writing a deeply nested function.
|
||||
</p>
|
||||
<p>
|
||||
Although suppressed within this documentation for clarity, all
|
||||
CoffeeScript output is wrapped in an anonymous function:
|
||||
@@ -995,7 +1002,7 @@ numbers_copy = numbers.slice(0, numbers.length);
|
||||
</p>
|
||||
<div class='code'><pre class="idle">numbers<span class="Keyword">:</span> [<span class="Number">0</span>, <span class="Number">1</span>, <span class="Number">2</span>, <span class="Number">3</span>, <span class="Number">4</span>, <span class="Number">5</span>, <span class="Number">6</span>, <span class="Number">7</span>, <span class="Number">8</span>, <span class="Number">9</span>]
|
||||
|
||||
numbers<span class="Keyword">[</span><span class="Number">3</span>..<span class="Number">6</span><span class="Keyword">]:</span> [<span class="Keyword">-</span><span class="Number">3</span>, <span class="Keyword">-</span><span class="Number">4</span>, <span class="Keyword">-</span><span class="Number">5</span>, <span class="Keyword">-</span><span class="Number">6</span>]
|
||||
numbers[<span class="Number">3</span>..<span class="Number">6</span>]<span class="Keyword">:</span> [<span class="Keyword">-</span><span class="Number">3</span>, <span class="Keyword">-</span><span class="Number">4</span>, <span class="Keyword">-</span><span class="Number">5</span>, <span class="Keyword">-</span><span class="Number">6</span>]
|
||||
|
||||
|
||||
</pre><pre class="idle"><span class="Storage">var</span> numbers;
|
||||
|
||||
97
lib/lexer.js
97
lib/lexer.js
@@ -1,5 +1,5 @@
|
||||
(function(){
|
||||
var ACCESSORS, ASSIGNMENT, BEFORE_WHEN, CALLABLE, CODE, COFFEE_KEYWORDS, COMMENT, COMMENT_CLEANER, HEREDOC, HEREDOC_INDENT, IDENTIFIER, INTERPOLATION, JS_CLEANER, JS_FORBIDDEN, JS_KEYWORDS, KEYWORDS, LAST_DENT, LAST_DENTS, Lexer, MULTILINER, MULTI_DENT, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, REGEX, RESERVED, Rewriter, STRING_NEWLINES, WHITESPACE, compact, count, include, starts;
|
||||
var ACCESSORS, ASSIGNMENT, BEFORE_WHEN, CALLABLE, CODE, COFFEE_KEYWORDS, COMMENT, COMMENT_CLEANER, HEREDOC, HEREDOC_INDENT, IDENTIFIER, INTERPOLATION, JS_CLEANER, JS_FORBIDDEN, JS_KEYWORDS, KEYWORDS, LAST_DENT, LAST_DENTS, Lexer, MULTILINER, MULTI_DENT, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, RESERVED, Rewriter, STRING_NEWLINES, WHITESPACE, compact, count, include, starts;
|
||||
// The CoffeeScript Lexer. Uses a series of token-matching regexes to attempt
|
||||
// matches against the beginning of the source code. When a match is found,
|
||||
// a token is produced, we consume the match, and start again. Tokens are in the
|
||||
@@ -33,7 +33,7 @@
|
||||
Lexer.prototype.tokenize = function tokenize(code, options) {
|
||||
var o;
|
||||
o = options || {};
|
||||
this.code = code;
|
||||
this.code = code || '';
|
||||
// The remainder of the source code.
|
||||
this.i = 0;
|
||||
// Current character position we're parsing.
|
||||
@@ -129,18 +129,14 @@
|
||||
// Matches strings, including multi-line strings. Ensures that quotation marks
|
||||
// are balanced within the string's contents, and within nested interpolations.
|
||||
Lexer.prototype.string_token = function string_token() {
|
||||
var string;
|
||||
var merge, string;
|
||||
if (!(starts(this.chunk, '"') || starts(this.chunk, "'"))) {
|
||||
return false;
|
||||
}
|
||||
string = this.balanced_token(['"', '"'], ['${', '}']);
|
||||
if (!(string)) {
|
||||
string = this.balanced_token(["'", "'"]);
|
||||
}
|
||||
if (!(string)) {
|
||||
if (!((string = this.balanced_token(['"', '"'], ['${', '}']) || this.balanced_token(["'", "'"])))) {
|
||||
return false;
|
||||
}
|
||||
this.interpolate_string(string.replace(STRING_NEWLINES, " \\\n"));
|
||||
this.interpolate_string(string.replace(STRING_NEWLINES, " \\\n"), (merge = true));
|
||||
this.line += count(string, "\n");
|
||||
this.i += string.length;
|
||||
return true;
|
||||
@@ -175,14 +171,51 @@
|
||||
// to distinguish from division, so we borrow some basic heuristics from
|
||||
// JavaScript and Ruby.
|
||||
Lexer.prototype.regex_token = function regex_token() {
|
||||
var regex;
|
||||
if (!((regex = this.match(REGEX, 1)))) {
|
||||
var _a, _b, _c, _d, _e, each, flags, i, index, interp_tokens, merge, regex, str;
|
||||
if (!(starts(this.chunk, '/'))) {
|
||||
return false;
|
||||
}
|
||||
if (include(NOT_REGEX, this.tag())) {
|
||||
return false;
|
||||
}
|
||||
this.token('REGEX', regex);
|
||||
if (!((regex = this.balanced_token(['/', '/'])))) {
|
||||
return false;
|
||||
}
|
||||
if (regex.length < 3 || regex.match(/^\/\s+/m)) {
|
||||
return false;
|
||||
}
|
||||
flags = ['i', 'm', 'g', 'y'];
|
||||
while (((index = flags.indexOf(this.chunk.substr(regex.length, 1)))) >= 0) {
|
||||
regex += flags[index];
|
||||
flags.splice(index, 1);
|
||||
}
|
||||
if (((0 < (_e = regex.indexOf('${'))) && (_e < regex.indexOf('}'))) || regex.match(/[^\\]\$[a-zA-Z_@]/)) {
|
||||
_a = regex.substring(1).split('/');
|
||||
str = _a[0];
|
||||
flags = _a[1];
|
||||
str = str.replace(/\\[^\$]/g, function(escaped) {
|
||||
return '\\' + escaped;
|
||||
});
|
||||
this.tokens = this.tokens.concat([['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']]);
|
||||
interp_tokens = this.interpolate_string("\"" + str + "\"", (merge = false));
|
||||
_b = interp_tokens;
|
||||
for (i = 0, _c = _b.length; i < _c; i++) {
|
||||
each = _b[i];
|
||||
if ((_d = each[0]) === 'TOKENS') {
|
||||
this.tokens = this.tokens.concat(each[1]);
|
||||
} else if (_d === 'STRING') {
|
||||
this.token(each[0], each[1].substring(0, 1) + each[1].substring(1, each[1].length - 1).replace(/"/g, '\\"') + each[1].substring(0, 1));
|
||||
} else {
|
||||
this.token(each[0], each[1]);
|
||||
}
|
||||
if (i < interp_tokens.length - 1) {
|
||||
this.token('+', '+');
|
||||
}
|
||||
}
|
||||
this.tokens = this.tokens.concat([[',', ','], ['STRING', "'" + flags + "'"], [')', ')'], [')', ')']]);
|
||||
} else {
|
||||
this.token('REGEX', regex);
|
||||
}
|
||||
this.i += regex.length;
|
||||
return true;
|
||||
};
|
||||
@@ -429,6 +462,9 @@
|
||||
i += 1;
|
||||
}
|
||||
if (levels.length) {
|
||||
if (delimited[0][0] === '/') {
|
||||
return false;
|
||||
}
|
||||
throw new Error("SyntaxError: Unterminated " + (levels.pop()[0]) + " starting on line " + (this.line + 1));
|
||||
}
|
||||
if (i === 0) {
|
||||
@@ -444,8 +480,8 @@
|
||||
// If it encounters an interpolation, this method will recursively create a
|
||||
// new Lexer, tokenize the interpolated contents, and merge them into the
|
||||
// token stream.
|
||||
Lexer.prototype.interpolate_string = function interpolate_string(str) {
|
||||
var _a, _b, _c, _d, _e, each, expr, group, i, inner, interp, lexer, match, nested, pi, quote, tokens;
|
||||
Lexer.prototype.interpolate_string = function interpolate_string(str, merge) {
|
||||
var _a, _b, _c, _d, _e, _f, _g, each, expr, group, has_string, i, inner, interp, lexer, match, nested, pi, quote, tokens;
|
||||
if (str.length < 3 || !starts(str, '"')) {
|
||||
return this.token('STRING', str);
|
||||
} else {
|
||||
@@ -466,14 +502,14 @@
|
||||
interp = "this." + (interp.substring(1));
|
||||
}
|
||||
if (pi < i) {
|
||||
tokens.push(['STRING', quote + (str.substring(pi, i)) + quote]);
|
||||
tokens.push(['STRING', '' + quote + (str.substring(pi, i)) + quote]);
|
||||
}
|
||||
tokens.push(['IDENTIFIER', interp]);
|
||||
i += group.length - 1;
|
||||
pi = i + 1;
|
||||
} else if (((expr = this.balanced_string(str.substring(i), ['${', '}'])))) {
|
||||
if (pi < i) {
|
||||
tokens.push(['STRING', quote + (str.substring(pi, i)) + quote]);
|
||||
tokens.push(['STRING', '' + quote + (str.substring(pi, i)) + quote]);
|
||||
}
|
||||
inner = expr.substring(2, expr.length - 1);
|
||||
if (inner.length) {
|
||||
@@ -484,7 +520,7 @@
|
||||
nested.pop();
|
||||
tokens.push(['TOKENS', nested]);
|
||||
} else {
|
||||
tokens.push(['STRING', quote + quote]);
|
||||
tokens.push(['STRING', '' + quote + quote]);
|
||||
}
|
||||
i += expr.length - 1;
|
||||
pi = i + 1;
|
||||
@@ -492,19 +528,27 @@
|
||||
i += 1;
|
||||
}
|
||||
if (pi < i && pi < str.length - 1) {
|
||||
tokens.push(['STRING', quote + (str.substring(pi, i)) + quote]);
|
||||
tokens.push(['STRING', '' + quote + (str.substring(pi, i)) + quote]);
|
||||
}
|
||||
_c = []; _d = tokens;
|
||||
for (i = 0, _e = _d.length; i < _e; i++) {
|
||||
each = _d[i];
|
||||
_c.push((function() {
|
||||
_c = tokens;
|
||||
for (_d = 0, _e = _c.length; _d < _e; _d++) {
|
||||
each = _c[_d];
|
||||
each[0] === 'STRING' ? ((has_string = true)) : null;
|
||||
}
|
||||
if (!has_string) {
|
||||
tokens.unshift(['STRING', "''"]);
|
||||
}
|
||||
if (((typeof merge !== "undefined" && merge !== null) ? merge : true)) {
|
||||
_f = tokens;
|
||||
for (i = 0, _g = _f.length; i < _g; i++) {
|
||||
each = _f[i];
|
||||
each[0] === 'TOKENS' ? (this.tokens = this.tokens.concat(each[1])) : this.token(each[0], each[1]);
|
||||
if (i < tokens.length - 1) {
|
||||
return this.token('+', '+');
|
||||
this.token('+', '+');
|
||||
}
|
||||
}).call(this));
|
||||
}
|
||||
}
|
||||
return _c;
|
||||
return tokens;
|
||||
}
|
||||
};
|
||||
// Helpers
|
||||
@@ -568,7 +612,7 @@
|
||||
// be used as identifiers or properties.
|
||||
JS_FORBIDDEN = JS_KEYWORDS.concat(RESERVED);
|
||||
// Token matching regexes.
|
||||
IDENTIFIER = /^([a-zA-Z$_](\w|\$)*)/;
|
||||
IDENTIFIER = /^([a-zA-Z\$_](\w|\$)*)/;
|
||||
NUMBER = /^(\b((0(x|X)[0-9a-fA-F]+)|([0-9]+(\.[0-9]+)?(e[+\-]?[0-9]+)?)))\b/i;
|
||||
HEREDOC = /^("{6}|'{6}|"{3}\n?([\s\S]*?)\n?([ \t]*)"{3}|'{3}\n?([\s\S]*?)\n?([ \t]*)'{3})/;
|
||||
INTERPOLATION = /^\$([a-zA-Z_@]\w*(\.\w+)*)/;
|
||||
@@ -576,7 +620,6 @@
|
||||
WHITESPACE = /^([ \t]+)/;
|
||||
COMMENT = /^(((\n?[ \t]*)?#[^\n]*)+)/;
|
||||
CODE = /^((-|=)>)/;
|
||||
REGEX = /^(\/(\S.*?)?([^\\]|\\\\)\/[imgy]{0,4})/;
|
||||
MULTI_DENT = /^((\n([ \t]*))+)(\.)?/;
|
||||
LAST_DENTS = /\n([ \t]*)/g;
|
||||
LAST_DENT = /\n([ \t]*)/;
|
||||
|
||||
@@ -289,7 +289,7 @@ idt += TAB
|
||||
var end, idt;
|
||||
idt = this.is_statement() ? this.idt() : '';
|
||||
end = this.is_statement() ? ';' : '';
|
||||
return idt + this.value + end;
|
||||
return '' + idt + this.value + end;
|
||||
};
|
||||
LiteralNode.prototype.toString = function toString(idt) {
|
||||
return " \"" + this.value + "\"";
|
||||
@@ -762,7 +762,7 @@ idt += TAB
|
||||
props = props.empty() ? '' : props.compile(o) + '\n';
|
||||
extension = extension ? this.idt() + extension.compile(o) + ';\n' : '';
|
||||
returns = ret ? '\n' + this.idt() + 'return ' + this.variable.compile(o) + ';' : '';
|
||||
return construct + extension + props + returns;
|
||||
return '' + construct + extension + props + returns;
|
||||
};
|
||||
return ClassNode;
|
||||
}).call(this);
|
||||
@@ -1540,7 +1540,7 @@ idt += TAB
|
||||
indent: this.idt(),
|
||||
chain_child: true
|
||||
})) : " else {\n" + (Expressions.wrap([this.else_body]).compile(o)) + "\n" + this.tab + "}";
|
||||
return if_part + else_part;
|
||||
return '' + if_part + else_part;
|
||||
};
|
||||
// Compile the IfNode as a ternary operator.
|
||||
IfNode.prototype.compile_ternary = function compile_ternary(o) {
|
||||
@@ -1559,7 +1559,7 @@ idt += TAB
|
||||
// with Git.
|
||||
TRAILING_WHITESPACE = /\s+$/gm;
|
||||
// Keep this identifier regex in sync with the Lexer.
|
||||
IDENTIFIER = /^[a-zA-Z$_](\w|\$)*$/;
|
||||
IDENTIFIER = /^[a-zA-Z\$_](\w|\$)*$/;
|
||||
// Utility Functions
|
||||
// -----------------
|
||||
// Merge objects, returning a fresh copy with attributes from both sides.
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
Rewriter.prototype.remove_leading_newlines = function remove_leading_newlines() {
|
||||
var _a;
|
||||
_a = [];
|
||||
while (this.tokens[0][0] === 'TERMINATOR') {
|
||||
while (this.tokens[0] && this.tokens[0][0] === 'TERMINATOR') {
|
||||
_a.push(this.tokens.shift());
|
||||
}
|
||||
return _a;
|
||||
|
||||
@@ -36,7 +36,7 @@ exports.Lexer: class Lexer
|
||||
# unless explicitly asked not to.
|
||||
tokenize: (code, options) ->
|
||||
o : options or {}
|
||||
@code : code # The remainder of the source code.
|
||||
@code : code or '' # The remainder of the source code.
|
||||
@i : 0 # Current character position we're parsing.
|
||||
@line : o.line or 0 # The current line.
|
||||
@indent : 0 # The current indentation level.
|
||||
@@ -96,10 +96,10 @@ exports.Lexer: class Lexer
|
||||
# are balanced within the string's contents, and within nested interpolations.
|
||||
string_token: ->
|
||||
return false unless starts(@chunk, '"') or starts(@chunk, "'")
|
||||
string: @balanced_token ['"', '"'], ['${', '}']
|
||||
string: @balanced_token ["'", "'"] unless string
|
||||
return false unless string
|
||||
@interpolate_string string.replace STRING_NEWLINES, " \\\n"
|
||||
return false unless string:
|
||||
@balanced_token(['"', '"'], ['${', '}']) or
|
||||
@balanced_token ["'", "'"]
|
||||
@interpolate_string string.replace(STRING_NEWLINES, " \\\n"), merge: true
|
||||
@line += count string, "\n"
|
||||
@i += string.length
|
||||
true
|
||||
@@ -126,9 +126,28 @@ exports.Lexer: class Lexer
|
||||
# to distinguish from division, so we borrow some basic heuristics from
|
||||
# JavaScript and Ruby.
|
||||
regex_token: ->
|
||||
return false unless regex: @match REGEX, 1
|
||||
return false unless starts @chunk, '/'
|
||||
return false if include NOT_REGEX, @tag()
|
||||
@token 'REGEX', regex
|
||||
return false unless regex: @balanced_token ['/', '/']
|
||||
return false if regex.length < 3 or regex.match /^\/\s+/m
|
||||
flags: ['i', 'm', 'g', 'y']
|
||||
while (index: flags.indexOf @chunk.substr regex.length, 1) >= 0
|
||||
regex += flags[index]
|
||||
flags.splice index, 1
|
||||
if (0 < regex.indexOf('${') < regex.indexOf('}')) or regex.match /[^\\]\$[a-zA-Z_@]/
|
||||
[str, flags]: regex.substring(1).split('/')
|
||||
str: str.replace /\\[^\$]/g, (escaped) -> '\\' + escaped
|
||||
@tokens: @tokens.concat [['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']]
|
||||
interp_tokens: @interpolate_string "\"$str\"", merge: false
|
||||
for each, i in interp_tokens
|
||||
switch each[0]
|
||||
when 'TOKENS' then @tokens: @tokens.concat each[1]
|
||||
when 'STRING' then @token each[0], each[1].substring(0, 1) + each[1].substring(1, each[1].length - 1).replace(/"/g, '\\"') + each[1].substring(0, 1)
|
||||
else @token each[0], each[1]
|
||||
@token '+', '+' if i < interp_tokens.length - 1
|
||||
@tokens: @tokens.concat [[',', ','], ['STRING', "'$flags'"], [')', ')'], [')', ')']]
|
||||
else
|
||||
@token 'REGEX', regex
|
||||
@i += regex.length
|
||||
true
|
||||
|
||||
@@ -317,7 +336,9 @@ exports.Lexer: class Lexer
|
||||
break
|
||||
break unless levels.length
|
||||
i += 1
|
||||
throw new Error "SyntaxError: Unterminated ${levels.pop()[0]} starting on line ${@line + 1}" if levels.length
|
||||
if levels.length
|
||||
return false if delimited[0][0] is '/'
|
||||
throw new Error "SyntaxError: Unterminated ${levels.pop()[0]} starting on line ${@line + 1}"
|
||||
return false if i is 0
|
||||
return str.substring(0, i)
|
||||
|
||||
@@ -331,7 +352,7 @@ exports.Lexer: class Lexer
|
||||
# If it encounters an interpolation, this method will recursively create a
|
||||
# new Lexer, tokenize the interpolated contents, and merge them into the
|
||||
# token stream.
|
||||
interpolate_string: (str) ->
|
||||
interpolate_string: (str, merge) ->
|
||||
if str.length < 3 or not starts str, '"'
|
||||
@token 'STRING', str
|
||||
else
|
||||
@@ -362,12 +383,16 @@ exports.Lexer: class Lexer
|
||||
pi: i + 1
|
||||
i += 1
|
||||
tokens.push ['STRING', "$quote${ str.substring(pi, i) }$quote"] if pi < i and pi < str.length - 1
|
||||
for each, i in tokens
|
||||
if each[0] is 'TOKENS'
|
||||
@tokens: @tokens.concat each[1]
|
||||
else
|
||||
@token each[0], each[1]
|
||||
@token '+', '+' if i < tokens.length - 1
|
||||
(has_string: yes) for each in tokens when each[0] is 'STRING'
|
||||
tokens.unshift ['STRING', "''"] if not has_string
|
||||
if (merge ? true)
|
||||
for each, i in tokens
|
||||
if each[0] is 'TOKENS'
|
||||
@tokens: @tokens.concat each[1]
|
||||
else
|
||||
@token each[0], each[1]
|
||||
@token '+', '+' if i < tokens.length - 1
|
||||
tokens
|
||||
|
||||
# Helpers
|
||||
# -------
|
||||
@@ -440,7 +465,7 @@ RESERVED: [
|
||||
JS_FORBIDDEN: JS_KEYWORDS.concat RESERVED
|
||||
|
||||
# Token matching regexes.
|
||||
IDENTIFIER : /^([a-zA-Z$_](\w|\$)*)/
|
||||
IDENTIFIER : /^([a-zA-Z\$_](\w|\$)*)/
|
||||
NUMBER : /^(\b((0(x|X)[0-9a-fA-F]+)|([0-9]+(\.[0-9]+)?(e[+\-]?[0-9]+)?)))\b/i
|
||||
HEREDOC : /^("{6}|'{6}|"{3}\n?([\s\S]*?)\n?([ \t]*)"{3}|'{3}\n?([\s\S]*?)\n?([ \t]*)'{3})/
|
||||
INTERPOLATION : /^\$([a-zA-Z_@]\w*(\.\w+)*)/
|
||||
@@ -448,7 +473,6 @@ OPERATOR : /^([+\*&|\/\-%=<>:!?]+)/
|
||||
WHITESPACE : /^([ \t]+)/
|
||||
COMMENT : /^(((\n?[ \t]*)?#[^\n]*)+)/
|
||||
CODE : /^((-|=)>)/
|
||||
REGEX : /^(\/(\S.*?)?([^\\]|\\\\)\/[imgy]{0,4})/
|
||||
MULTI_DENT : /^((\n([ \t]*))+)(\.)?/
|
||||
LAST_DENTS : /\n([ \t]*)/g
|
||||
LAST_DENT : /\n([ \t]*)/
|
||||
|
||||
@@ -1192,7 +1192,7 @@ TAB: ' '
|
||||
TRAILING_WHITESPACE: /\s+$/gm
|
||||
|
||||
# Keep this identifier regex in sync with the Lexer.
|
||||
IDENTIFIER: /^[a-zA-Z$_](\w|\$)*$/
|
||||
IDENTIFIER: /^[a-zA-Z\$_](\w|\$)*$/
|
||||
|
||||
# Utility Functions
|
||||
# -----------------
|
||||
|
||||
@@ -62,7 +62,7 @@ exports.Rewriter: class Rewriter
|
||||
# Leading newlines would introduce an ambiguity in the grammar, so we
|
||||
# dispatch them here.
|
||||
remove_leading_newlines: ->
|
||||
@tokens.shift() while @tokens[0][0] is 'TERMINATOR'
|
||||
@tokens.shift() while @tokens[0] and @tokens[0][0] is 'TERMINATOR'
|
||||
|
||||
# Some blocks occur in the middle of expressions -- when we're expecting
|
||||
# this, remove their trailing newlines.
|
||||
|
||||
17
test/test_regexp_interpolation.coffee
Normal file
17
test/test_regexp_interpolation.coffee
Normal file
@@ -0,0 +1,17 @@
|
||||
name: 'Bob'
|
||||
|
||||
ok not not '"Bob"'.match(/^"${name}"$/i)
|
||||
ok '"Bobby"'.match(/^"${name}"$/i) is null
|
||||
|
||||
ok not not 'Bob'.match(/^$name$/)
|
||||
ok 'Bobby'.match(/^$name/)
|
||||
|
||||
ok 'Bobby'.match(/${"${"${"$name"}"}"}/imgy)
|
||||
|
||||
ok '$a$b$c'.match(/\$A\$B\$C/i)
|
||||
|
||||
a: 1
|
||||
b: 2
|
||||
c: 3
|
||||
|
||||
ok '123'.match(/$a$b$c/i)
|
||||
@@ -61,3 +61,8 @@ ok "Where is ${"the nested ${obj["name"]}"}?" is 'Where is the nested Joe?'
|
||||
ok "Hello ${world ? "$hello"}" is 'Hello World'
|
||||
|
||||
ok "Hello ${"${"${obj["name"]}" + '!'}"}" is 'Hello Joe!'
|
||||
|
||||
a: 1
|
||||
b: 2
|
||||
c: 3
|
||||
ok "$a$b$c" is '123'
|
||||
|
||||
Reference in New Issue
Block a user