Skip to content

Commit

Permalink
clarified errors on implicit tokens (#214)
Browse files Browse the repository at this point in the history
  • Loading branch information
satyr committed Apr 10, 2013
1 parent 263a464 commit 33d6bbc
Show file tree
Hide file tree
Showing 8 changed files with 44 additions and 15 deletions.
5 changes: 5 additions & 0 deletions lib/coco.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ parser.lexer = {
return '';
}
};
parser.parseError = function(){
var token;
token = this.lexer.tokens[this.lexer.pos];
throw SyntaxError("unexpected " + lexer.pp(token) + " on line " + (token[2] + 1));
};
exports.VERSION = '0.9.2b';
exports.compile = function(code, options){
var e, that;
Expand Down
21 changes: 16 additions & 5 deletions lib/lexer.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,17 @@ exports.rewrite = function(it){
}
return it;
};
exports.pp = function(token){
var tag, val;
tag = token[0], val = token[1];
if (!val) {
return "`" + tag + "` (implicit)";
}
if (/^\s+/.test(val)) {
return tag;
}
return "`" + val + "`";
};
exports.tokenize = function(code, o){
var i, c, that;
this.inter || (code = code.replace(/[\r\u2028\u2029\uFEFF]/g, ''));
Expand Down Expand Up @@ -1395,12 +1406,12 @@ function expandLiterals(tokens){
i += ts.length - 1;
break;
case 'WORDS':
ts = [['[', '[', lno = token[2]]];
ts = [['[', '<[', lno = token[2]]];
for (i$ = 0, len$ = (ref4$ = token[1].slice(2, -2).match(/\S+/g) || '').length; i$ < len$; ++i$) {
word = ref4$[i$];
ts.push(['STRNUM', string('\'', word, lno), lno], [',', ',', lno]);
ts.push(['STRNUM', string('\'', word, lno), lno], [',', '', lno]);
}
tokens.splice.apply(tokens, [i, 1].concat(slice$.call(ts), [[']', ']', lno]]));
tokens.splice.apply(tokens, [i, 1].concat(slice$.call(ts), [[']', ']>', lno]]));
i += ts.length;
break;
case 'INDENT':
Expand Down Expand Up @@ -1437,11 +1448,11 @@ function expandLiterals(tokens){
continue;
}
if (token.spaced && of$(tokens[i + 1][0], ARG)) {
tokens.splice(++i, 0, [',', ',', token[2]]);
tokens.splice(++i, 0, [',', '', token[2]]);
}
}
function fn$(){
if (0x10000 < ts.push(['STRNUM', enc(n), lno], [',', ',', lno])) {
if (0x10000 < ts.push(['STRNUM', enc(n), lno], [',', '', lno])) {
carp('range limit exceeded', lno);
}
}
Expand Down
9 changes: 7 additions & 2 deletions src/coco.co
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,17 @@ lexer = require \./lexer
# the generic stream of tokens our lexer produces.
{parser} = require \./parser
parser import
yy : require \./ast
lexer :
yy: require \./ast

lexer:
lex : -> [tag, @yytext, @yylineno] = @tokens[++@pos] or ['']; tag
setInput : -> @pos = -1; @tokens = it
upcomingInput : -> ''

parseError: ->
token = @lexer.tokens[@lexer.pos]
throw SyntaxError "unexpected #{lexer.pp token} on line #{token.2 + 1}"

exports import
VERSION: \0.9.2b

Expand Down
16 changes: 11 additions & 5 deletions src/lexer.co
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,12 @@ exports import
it.shift! if it.0?0 is \NEWLINE
it

# Pretty-prints a token for errors.
pp: ([tag, val]:token) ->
return "`#tag` (implicit)" unless val
return tag if /^\s+/test val
"`#val`"

#### Main Loop

tokenize: (code, o) ->
Expand Down Expand Up @@ -947,7 +953,7 @@ character = if JSON!? then uxxxx else ->
ts = []
enc = if char then character else String
add = !->
if 0x10000 < ts.push [\STRNUM enc n; lno] [\, \, lno]
if 0x10000 < ts.push [\STRNUM enc n; lno] [\, '' lno]
carp 'range limit exceeded' lno
if token.op is \to
then add! for n from from to to by by
Expand All @@ -956,10 +962,10 @@ character = if JSON!? then uxxxx else ->
tokens.splice i, 2 + 2 * byp, ...ts
i += ts.length - 1
case \WORDS
ts = [[\[ \[ lno = token.2]]
ts = [[\[ \<[ lno = token.2]]
for word of token.1.slice 2 -2 .match /\S+/g or ''
ts.push [\STRNUM; string \', word, lno; lno] [\, \, lno]
tokens.splice i, 1, ...ts, [\] \] lno]
ts.push [\STRNUM; string \', word, lno; lno] [\, '' lno]
tokens.splice i, 1, ...ts, [\] \]> lno]
i += ts.length
case \INDENT
if tokens[i-1]
Expand All @@ -976,7 +982,7 @@ character = if JSON!? then uxxxx else ->
case \CREMENT then continue unless able tokens, i
default continue
if token.spaced and tokens[i+1]0 of ARG
tokens.splice ++i, 0 [\, \, token.2]
tokens.splice ++i, 0 [\, '' token.2]

# Seeks `tokens` from `i`ndex and `go` for a token of the same level
# that's `ok` or an unmatched closer.
Expand Down
2 changes: 1 addition & 1 deletion test/compilation.co
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ eq '''
''', Coco.compile '''try for k in o then let then ^@'''


eq 'STRNUM,0,0 ,,,,0 STRNUM,1,1' Coco.tokens('''
eq 'STRNUM,0,0 ,,,0 STRNUM,1,1' Coco.tokens('''
0 \\
1
''').slice(0 3).join ' '
Expand Down
2 changes: 1 addition & 1 deletion test/if.co
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ ok if true then -> 1


# [coffee#1026](https://github.com/jashkenas/coffee-script/issues/1026)
throws "Parse error on line 2: Unexpected 'ELSE'", -> Coco.ast '''
throws "unexpected `else` on line 3", -> Coco.ast '''
if a then b
else then c
else then d
Expand Down
2 changes: 2 additions & 0 deletions test/literal.co
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,8 @@ eq o.1, \1
eq o.3, 3
eq o.5, 5

compileThrows 'unexpected `,` (implicit)' 1 '0 1'


### Numeric/Character Ranges
show = -> @@ * ' '
Expand Down
2 changes: 1 addition & 1 deletion test/string.co
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ eq 'multiline nested "interpolations" work', """multiline #{

compileThrows 'unterminated interpolation' 2 '"#{\n'

throws "Parse error on line 1: Unexpected ')'" -> Coco.compile '"(#{+})"'
compileThrows "unexpected `)`" 1 '"(#{+})"'

compileThrows 'invalid variable interpolation "if"' 1 '"#if"'

Expand Down

0 comments on commit 33d6bbc

Please sign in to comment.