mirror of
https://github.com/jashkenas/coffeescript.git
synced 2026-02-19 03:44:23 -05:00
first little piece of the rewriter
This commit is contained in:
@@ -1,6 +1,5 @@
|
||||
# Executes the `coffee` Ruby program to convert from CoffeeScript to JavaScript.
|
||||
|
||||
sys: require('sys')
|
||||
path: require('path')
|
||||
|
||||
# The path to the CoffeeScript executable.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
sys: require 'sys'
|
||||
Rewriter: require('./rewriter').Rewriter
|
||||
|
||||
# The lexer reads a stream of CoffeeScript and divvys it up into tagged
|
||||
# tokens. A minor bit of the ambiguity in the grammar has been avoided by
|
||||
@@ -70,8 +71,7 @@ lex::tokenize: (code) ->
|
||||
this.extract_next_token()
|
||||
# sys.puts "original stream: " + this.tokens if process.ENV['VERBOSE']
|
||||
this.close_indentation()
|
||||
# (new Rewriter()).rewrite(this.tokens)
|
||||
this.tokens
|
||||
(new Rewriter()).rewrite this.tokens
|
||||
|
||||
# At every position, run through this list of attempted matches,
|
||||
# short-circuiting if any of them succeed.
|
||||
@@ -105,12 +105,14 @@ lex::identifier_token: ->
|
||||
this.tag(-1, 'PROPERTY_ACCESS')
|
||||
this.token(tag, id)
|
||||
this.i += id.length
|
||||
true
|
||||
|
||||
# Matches numbers, including decimals, hex, and exponential notation.
|
||||
lex::number_token: ->
|
||||
return false unless number: this.match NUMBER, 1
|
||||
this.token 'NUMBER', number
|
||||
this.i += number.length
|
||||
true
|
||||
|
||||
# Matches strings, including multi-line strings.
|
||||
lex::string_token: ->
|
||||
@@ -119,6 +121,7 @@ lex::string_token: ->
|
||||
this.token 'STRING', escaped
|
||||
this.line += this.count string, "\n"
|
||||
this.i += string.length
|
||||
true
|
||||
|
||||
# Matches heredocs, adjusting indentation to the correct level.
|
||||
lex::heredoc_token: ->
|
||||
@@ -131,12 +134,14 @@ lex::heredoc_token: ->
|
||||
this.token 'STRING', '"' + doc + '"'
|
||||
this.line += this.count match[1], "\n"
|
||||
this.i += match[1].length
|
||||
true
|
||||
|
||||
# Matches interpolated JavaScript.
|
||||
lex::js_token: ->
|
||||
return false unless script: this.match JS, 1
|
||||
this.token 'JS', script.replace(JS_CLEANER, '')
|
||||
this.i += script.length
|
||||
true
|
||||
|
||||
# Matches regular expression literals.
|
||||
lex::regex_token: ->
|
||||
@@ -144,6 +149,7 @@ lex::regex_token: ->
|
||||
return false if NOT_REGEX.indexOf(this.tag()) >= 0
|
||||
this.token 'REGEX', regex
|
||||
this.i += regex.length
|
||||
true
|
||||
|
||||
# Matches and conumes comments.
|
||||
lex::comment_token: ->
|
||||
@@ -152,6 +158,7 @@ lex::comment_token: ->
|
||||
this.token 'COMMENT', comment.replace(COMMENT_CLEANER, '').split(MULTILINER)
|
||||
this.token "\n", "\n"
|
||||
this.i += comment.length
|
||||
true
|
||||
|
||||
# Record tokens for indentation differing from the previous line.
|
||||
lex::indent_token: ->
|
||||
@@ -170,6 +177,7 @@ lex::indent_token: ->
|
||||
else
|
||||
this.outdent_token this.indent - size
|
||||
this.indent: size
|
||||
true
|
||||
|
||||
# Record an oudent token or tokens, if we're moving back inwards past
|
||||
# multiple recorded indents.
|
||||
@@ -179,12 +187,14 @@ lex::outdent_token: (move_out) ->
|
||||
this.token 'OUTDENT', last_indent
|
||||
move_out -= last_indent
|
||||
this.token "\n", "\n"
|
||||
true
|
||||
|
||||
# Matches and consumes non-meaningful whitespace.
|
||||
lex::whitespace_token: ->
|
||||
return false unless space: this.match WHITESPACE, 1
|
||||
this.value().spaced: true
|
||||
this.i += space.length
|
||||
true
|
||||
|
||||
# Multiple newlines get merged together.
|
||||
# Use a trailing \ to escape newlines.
|
||||
@@ -211,6 +221,7 @@ lex::literal_token: ->
|
||||
tag: 'INDEX_START' if value is '['
|
||||
this.token tag, value
|
||||
this.i += value.length
|
||||
true
|
||||
|
||||
# Helpers =============================================================
|
||||
|
||||
|
||||
120
src/rewriter.coffee
Normal file
120
src/rewriter.coffee
Normal file
@@ -0,0 +1,120 @@
|
||||
# In order to keep the grammar simple, the stream of tokens that the Lexer
|
||||
# emits is rewritten by the Rewriter, smoothing out ambiguities, mis-nested
|
||||
# indentation, and single-line flavors of expressions.
|
||||
exports.Rewriter: re: ->
|
||||
|
||||
# Tokens that must be balanced.
|
||||
BALANCED_PAIRS: [['(', ')'], ['[', ']'], ['{', '}'], ['INDENT', 'OUTDENT'],
|
||||
['PARAM_START', 'PARAM_END'], ['CALL_START', 'CALL_END'], ['INDEX_START', 'INDEX_END']]
|
||||
|
||||
# Tokens that signal the start of a balanced pair.
|
||||
EXPRESSION_START: pair[0] for pair in BALANCED_PAIRS
|
||||
|
||||
# Tokens that signal the end of a balanced pair.
|
||||
EXPRESSION_TAIL: pair[1] for pair in BALANCED_PAIRS
|
||||
|
||||
# Tokens that indicate the close of a clause of an expression.
|
||||
EXPRESSION_CLOSE: ['CATCH', 'WHEN', 'ELSE', 'FINALLY'].concat(EXPRESSION_TAIL)
|
||||
|
||||
# Tokens pairs that, in immediate succession, indicate an implicit call.
|
||||
IMPLICIT_FUNC: ['IDENTIFIER', 'SUPER', ')', 'CALL_END', ']', 'INDEX_END']
|
||||
IMPLICIT_END: ['IF', 'UNLESS', 'FOR', 'WHILE', "\n", 'OUTDENT']
|
||||
IMPLICIT_CALL: ['IDENTIFIER', 'NUMBER', 'STRING', 'JS', 'REGEX', 'NEW', 'PARAM_START',
|
||||
'TRY', 'DELETE', 'TYPEOF', 'SWITCH', 'ARGUMENTS',
|
||||
'TRUE', 'FALSE', 'YES', 'NO', 'ON', 'OFF', '!', '!!', 'NOT',
|
||||
'->', '=>', '[', '(', '{']
|
||||
|
||||
# The inverse mappings of token pairs we're trying to fix up.
|
||||
INVERSES: {}
|
||||
for pair in BALANCED_PAIRS
|
||||
INVERSES[pair[0]]: pair[1]
|
||||
INVERSES[pair[1]]: pair[0]
|
||||
|
||||
# Single-line flavors of block expressions that have unclosed endings.
|
||||
# The grammar can't disambiguate them, so we insert the implicit indentation.
|
||||
SINGLE_LINERS: ['ELSE', "->", "=>", 'TRY', 'FINALLY', 'THEN']
|
||||
SINGLE_CLOSERS: ["\n", 'CATCH', 'FINALLY', 'ELSE', 'OUTDENT', 'LEADING_WHEN', 'PARAM_START']
|
||||
|
||||
# Rewrite the token stream in multiple passes, one logical filter at
|
||||
# a time. This could certainly be changed into a single pass through the
|
||||
# stream, with a big ol' efficient switch, but it's much nicer like this.
|
||||
re::rewrite: (tokens) ->
|
||||
this.tokens: tokens
|
||||
this.adjust_comments()
|
||||
# this.remove_leading_newlines()
|
||||
# this.remove_mid_expression_newlines()
|
||||
# this.move_commas_outside_outdents()
|
||||
# this.close_open_calls_and_indexes()
|
||||
# this.add_implicit_parentheses()
|
||||
# this.add_implicit_indentation()
|
||||
# this.ensure_balance(BALANCED_PAIRS)
|
||||
# this.rewrite_closing_parens()
|
||||
this.tokens
|
||||
|
||||
# Rewrite the token stream, looking one token ahead and behind.
|
||||
# Allow the return value of the block to tell us how many tokens to move
|
||||
# forwards (or backwards) in the stream, to make sure we don't miss anything
|
||||
# as the stream changes length under our feet.
|
||||
re::scan_tokens: (yield) ->
|
||||
i = 0
|
||||
while true
|
||||
break unless this.tokens[i]
|
||||
move: yield(this.tokens[i - 1], this.tokens[i], this.tokens[i + 1], i)
|
||||
i += move
|
||||
true
|
||||
|
||||
# Massage newlines and indentations so that comments don't have to be
|
||||
# correctly indented, or appear on their own line.
|
||||
re::adjust_comments: ->
|
||||
this.scan_tokens (prev, token, post, i) ->
|
||||
return 1 unless token[0] is 'COMMENT'
|
||||
before: this.tokens[i - 2]
|
||||
after: this.tokens[i + 2]
|
||||
if before and after and
|
||||
((before[0] is 'INDENT' and after[0] is 'OUTDENT') or
|
||||
(before[0] is 'OUTDENT' and after[0] is 'INDENT')) and
|
||||
before[1] is after[1]
|
||||
this.tokens.splice(i + 2, 1)
|
||||
this.tokens.splice(i - 2, 1)
|
||||
return 0
|
||||
else if prev[0] is "\n" and after[0] is 'INDENT'
|
||||
this.tokens.splice(i + 2, 1)
|
||||
this.tokens[i - 1]: after
|
||||
return 1
|
||||
else if prev[0] isnt "\n" and prev[0] isnt 'INDENT' and prev[0] isnt 'OUTDENT'
|
||||
this.tokens.splice(i, 0, ["\n", "\n"])
|
||||
return 2
|
||||
else
|
||||
return 1
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user