1 # Based on GardenSnake - a parser generator demonstration program
2 # GardenSnake was released into the Public Domain by Andrew Dalke.
4 # Portions of this work are derived from Python's Grammar definition
5 # and may be covered under the Python copyright and license
7 # Andrew Dalke / Dalke Scientific Software, LLC
8 # 30 August 2006 / Cape Town, South Africa
10 # Modifications for inclusion in PLY distribution
12 from pprint
import pprint
14 from ply
import lex
, yacc
29 'NUMBER', # Python decimals
30 'STRING', # single quoted strings only; syntax of raw strings
55 # taken from decmial.py but without the leading sign
57 r
"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
58 t
.value
= int(t
.value
)
62 r
"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
63 t
.value
=t
.value
[1:-1].decode("string-escape") # .swapcase() # for fun
78 # Ply nicely documented how to do this.
91 r
'[a-zA-Z_][a-zA-Z0-9_]*'
92 t
.type = RESERVED
.get(t
.value
, "NAME")
95 # Putting this before t_WS let it consume lines with only comments in
96 # them so the latter code never sees the WS part. Not consuming the
97 # newline. Needed for "if 1: #comment"
99 r
"[ ]*\043[^\n]*" # \043 is '#'
106 if t
.lexer
.at_line_start
and t
.lexer
.paren_count
== 0 and \
107 t
.lexer
.brack_count
== 0:
110 # Don't generate newline tokens when inside of parenthesis, eg
115 t
.lexer
.lineno
+= len(t
.value
)
117 if t
.lexer
.paren_count
== 0 and t
.lexer
.brack_count
== 0:
122 t
.lexer
.brack_count
+= 1
127 # check for underflow? should be the job of the parser
128 t
.lexer
.brack_count
-= 1
133 t
.lexer
.paren_count
+= 1
138 # check for underflow? should be the job of the parser
139 t
.lexer
.paren_count
-= 1
145 raise SyntaxError("Unknown symbol %r" % (t
.value
[0],))
146 print ("Skipping", repr(t
.value
[0]))
149 ## I implemented INDENT / DEDENT generation as a post-processing filter
151 # The original lex token stream contains WS and NEWLINE characters.
152 # WS will only occur before any other tokens on a line.
154 # I have three filters. One tags tokens by adding two attributes.
155 # "must_indent" is True if the token must be indented from the
156 # previous code. The other is "at_line_start" which is True for WS
157 # and the first non-WS/non-NEWLINE on a line. It flags the check so
158 # see if the new line has changed indication level.
160 # Python's syntax has three INDENT states
161 # 0) no colon hence no need to indent
162 # 1) "if 1: go()" - simple statements have a COLON but no need for an indent
163 # 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
168 # only care about whitespace at the start of a line
169 def track_tokens_filter(lexer
, tokens
):
170 oldignore
= lexer
.lexignore
171 lexer
.at_line_start
= at_line_start
= True
175 #print ("token", token)
176 token
.at_line_start
= at_line_start
178 if token
.type == "COLON":
179 at_line_start
= False
181 token
.must_indent
= False
183 elif token
.type == "NEWLINE":
185 if indent
== MAY_INDENT
:
187 token
.must_indent
= False
189 elif token
.type == "WS":
190 assert token
.at_line_start
== True
192 token
.must_indent
= False
195 # A real token; only indent after COLON NEWLINE
196 if indent
== MUST_INDENT
:
197 token
.must_indent
= True
199 token
.must_indent
= False
200 at_line_start
= False
203 # really bad hack that changes ignore lexer state.
204 # when "must indent" is seen (basically "real tokens" seen)
205 # then ignore whitespace.
206 if token
.must_indent
:
207 lexer
.lexignore
= ('ignore', ' ')
209 lexer
.lexignore
= oldignore
211 token
.indent
= indent
213 lexer
.at_line_start
= at_line_start
215 def _new_token(type, lineno
):
223 # Synthesize a DEDENT tag
225 return _new_token("DEDENT", lineno
)
227 # Synthesize an INDENT tag
229 return _new_token("INDENT", lineno
)
232 # Track the indentation level and emit the right INDENT / DEDENT events.
233 def indentation_filter(tokens
):
234 # A stack of indentation levels; will never pop item 0
241 print "Process", depth
, token
.indent
, token
,
242 if token
.at_line_start
:
243 print "at_line_start",
244 if token
.must_indent
:
248 # WS only occurs at the start of the line
249 # There may be WS followed by NEWLINE so
250 # only track the depth here. Don't indent/dedent
251 # until there's something real.
252 if token
.type == "WS":
254 depth
= len(token
.value
)
256 # WS tokens are never passed to the parser
259 if token
.type == "NEWLINE":
261 if prev_was_ws
or token
.at_line_start
:
264 # pass the other cases on through
268 # then it must be a real token (not WS, not NEWLINE)
269 # which can affect the indentation level
272 if token
.must_indent
:
273 # The current depth must be larger than the previous level
274 if not (depth
> levels
[-1]):
275 raise IndentationError("expected an indented block")
278 yield INDENT(token
.lineno
)
280 elif token
.at_line_start
:
281 # Must be on the same level or one of the previous levels
282 if depth
== levels
[-1]:
285 elif depth
> levels
[-1]:
286 raise IndentationError("indentation increase but not in new block")
288 # Back up; but only if it matches a previous level
290 i
= levels
.index(depth
)
292 raise IndentationError("inconsistent indentation")
293 for _
in range(i
+1, len(levels
)):
294 yield DEDENT(token
.lineno
)
299 ### Finished processing ###
301 # Must dedent any remaining levels
303 assert token
is not None
304 for _
in range(1, len(levels
)):
305 yield DEDENT(token
.lineno
)
308 # The top-level filter adds an ENDMARKER, if requested.
309 # Python's grammar uses it.
310 def filter(lexer
, add_endmarker
= True):
312 tokens
= iter(lexer
.token
, None)
313 tokens
= track_tokens_filter(lexer
, tokens
)
314 for token
in indentation_filter(tokens
):
319 if token
is not None:
320 lineno
= token
.lineno
321 yield _new_token("ENDMARKER", lineno
)
323 # Combine Ply and my filters into a new lexer
325 class IndentLexer(object):
326 def __init__(self
, debug
=0, optimize
=0, lextab
='lextab', reflags
=0):
327 self
.lexer
= lex
.lex(debug
=debug
, optimize
=optimize
, lextab
=lextab
, reflags
=reflags
)
328 self
.token_stream
= None
329 def input(self
, s
, add_endmarker
=True):
330 self
.lexer
.paren_count
= 0
331 self
.lexer
.brack_count
= 0
333 self
.token_stream
= filter(self
.lexer
, add_endmarker
)
336 return self
.token_stream
.next()
337 except StopIteration:
340 ########## Parser (tokens -> AST) ######
345 # I use the Python AST
346 from compiler
import ast
349 def Assign(left
, right
):
351 if isinstance(left
, ast
.Name
):
352 # Single assignment on left
353 return ast
.Assign([ast
.AssName(left
.name
, 'OP_ASSIGN')], right
)
354 elif isinstance(left
, ast
.Tuple
):
355 # List of things - make sure they are Name nodes
357 for child
in left
.getChildren():
358 if not isinstance(child
, ast
.Name
):
359 raise SyntaxError("that assignment not supported")
360 names
.append(child
.name
)
361 ass_list
= [ast
.AssName(name
, 'OP_ASSIGN') for name
in names
]
362 return ast
.Assign([ast
.AssTuple(ass_list
)], right
)
364 raise SyntaxError("Can't do that yet")
367 # The grammar comments come from Python's Grammar/Grammar file
369 ## NB: compound_stmt in single_input is followed by extra NEWLINE!
370 # file_input: (NEWLINE | stmt)* ENDMARKER
371 def p_file_input_end(p
):
372 """file_input_end : file_input ENDMARKER"""
374 p
[0] = ast
.Stmt(p
[1])
377 """file_input : file_input NEWLINE
381 if isinstance(p
[len(p
)-1], basestring
):
385 p
[0] = [] # p == 2 --> only a blank line
393 # funcdef: [decorators] 'def' NAME parameters ':' suite
394 # ignoring decorators
396 "funcdef : DEF NAME parameters COLON suite"
397 p
[0] = ast
.Function(None, p
[2], list(p
[3]), (), 0, None, p
[5])
399 # parameters: '(' [varargslist] ')'
401 """parameters : LPAR RPAR
402 | LPAR varargslist RPAR"""
409 # varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) |
411 def p_varargslist(p
):
412 """varargslist : varargslist COMMA NAME
419 # stmt: simple_stmt | compound_stmt
420 def p_stmt_simple(p
):
421 """stmt : simple_stmt"""
422 # simple_stmt is a list
425 def p_stmt_compound(p
):
426 """stmt : compound_stmt"""
429 # simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
430 def p_simple_stmt(p
):
431 """simple_stmt : small_stmts NEWLINE
432 | small_stmts SEMICOLON NEWLINE"""
435 def p_small_stmts(p
):
436 """small_stmts : small_stmts SEMICOLON small_stmt
443 # small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
444 # import_stmt | global_stmt | exec_stmt | assert_stmt
446 """small_stmt : flow_stmt
450 # expr_stmt: testlist (augassign (yield_expr|testlist) |
451 # ('=' (yield_expr|testlist))*)
452 # augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
453 # '<<=' | '>>=' | '**=' | '//=')
455 """expr_stmt : testlist ASSIGN testlist
458 # a list of expressions
459 #p[0] = ast.Discard(p[1])
462 p
[0] = Assign(p
[1], p
[3])
465 "flow_stmt : return_stmt"
468 # return_stmt: 'return' [testlist]
469 def p_return_stmt(p
):
470 "return_stmt : RETURN testlist"
471 p
[0] = ast
.Return(p
[2])
474 def p_compound_stmt(p
):
475 """compound_stmt : if_stmt
482 """while_stmt : WHILE test COLON suite ELSE COLON suite
483 | WHILE test COLON suite
486 p
[0] = ast
.While(p
[2], p
[4], None)
488 p
[0] = ast
.While(p
[2], p
[4], p
[7])
491 """if_stmt : IF test COLON suite ELSE COLON suite
492 | IF test COLON suite
495 p
[0] = ast
.If([(p
[2], p
[4])], None)
497 p
[0] = ast
.If([(p
[2], p
[4])], p
[7])
500 """suite : simple_stmt
501 | NEWLINE INDENT stmts DEDENT"""
503 p
[0] = ast
.Stmt(p
[1])
505 p
[0] = ast
.Stmt(p
[3])
509 """stmts : stmts stmt
516 ## No using Python's approach because Ply supports precedence
518 # comparison: expr (comp_op expr)*
519 # arith_expr: term (('+'|'-') term)*
520 # term: factor (('*'|'/'|'%'|'//') factor)*
521 # factor: ('+'|'-'|'~') factor | power
522 # comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
524 def make_lt_compare(arg
):
526 return ast
.Compare(left
, [('<', right
),])
527 def make_gt_compare(arg
):
529 return ast
.Compare(left
, [('>', right
),])
530 def make_eq_compare(arg
):
532 return ast
.Compare(left
, [('==', right
),])
540 "<": make_lt_compare
,
541 ">": make_gt_compare
,
542 "==": make_eq_compare
,
549 ("left", "EQ", "GT", "LT"),
550 ("left", "PLUS", "MINUS"),
551 ("left", "MULT", "DIV"),
555 """comparison : comparison PLUS comparison
556 | comparison MINUS comparison
557 | comparison MULT comparison
558 | comparison DIV comparison
559 | comparison LT comparison
560 | comparison EQ comparison
561 | comparison GT comparison
566 p
[0] = binary_ops
[p
[2]]((p
[1], p
[3]))
568 p
[0] = unary_ops
[p
[1]](p
[2])
572 # power: atom trailer* ['**' factor]
573 # trailers enables function calls (and subscripts).
574 # I only allow one level of calls
575 # so this is 'trailer'
582 if p
[2][0] == "CALL":
583 if p
[1].name
== 'print':
584 p
[0] = ast
.Printnl(ast
.Tuple(p
[2][1]), None, None)
586 p
[0] = ast
.CallFunc(p
[1], p
[2][1], None, None)
589 #raise AssertionError("not implemented %s" % p[2][0])
592 p
[0] = ast
.Subscript(p
[1], 'OP_APPLY', subs
[0])
594 p
[0] = ast
.Slice(p
[1], 'OP_APPLY', subs
[0], subs
[1])
598 p
[0] = ast
.Name(p
[1])
600 def p_atom_number(p
):
603 p
[0] = ast
.Const(p
[1])
605 #'[' [listmaker] ']' |
607 def p_atom_listmaker(p
):
608 """atom : LBRACK listmaker RBRACK"""
612 """listmaker : test COMMA listmaker
616 p
[0] = ast
.List([p
[1]])
618 p
[0] = ast
.List([p
[1]] + p
[3].nodes
)
621 """atom : LPAR testlist RPAR"""
624 # trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
626 """trailer : trailer_arglist
631 def p_trailer_arglist(p
):
632 "trailer_arglist : LPAR arglist RPAR"
633 p
[0] = ("CALL", p
[2])
635 def p_trailer_subscript(p
):
636 "trailer_subscript : LBRACK subscript RBRACK"
637 p
[0] = ("SUBS", p
[2])
639 #subscript: '.' '.' '.' | test | [test] ':' [test]
642 """subscript : test COLON test
651 # testlist: test (',' test)* [',']
652 # Contains shift/reduce error
654 """testlist : testlist_multi COMMA
659 # May need to promote singleton to tuple
660 if isinstance(p
[1], list):
664 # Convert into a tuple?
665 if isinstance(p
[0], list):
666 p
[0] = ast
.Tuple(p
[0])
668 def p_testlist_multi(p
):
669 """testlist_multi : testlist_multi COMMA test
675 if isinstance(p
[1], list):
682 # test: or_test ['if' or_test 'else' test] | lambdef
683 # as I don't support 'and', 'or', and 'not' this works down to 'comparison'
690 # arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
691 # XXX INCOMPLETE: this doesn't allow the trailing comma
693 """arglist : arglist COMMA argument
700 # argument: test [gen_for] | test '=' test # Really [keyword '='] test
706 #print "Error!", repr(p)
710 class GardenSnakeParser(object):
711 def __init__(self
, lexer
= None):
713 lexer
= IndentLexer(debug
=1)
715 self
.parser
= yacc
.yacc(start
="file_input_end",
716 debug
=False, write_tables
=False)
718 def parse(self
, code
):
719 self
.lexer
.input(code
)
720 result
= self
.parser
.parse(lexer
= self
.lexer
, debug
=False)
721 return ast
.Module(None, result
)
724 ###### Code generation ######
726 from compiler
import misc
, syntax
, pycodegen
728 class GardenSnakeCompiler(object):
730 self
.parser
= GardenSnakeParser()
731 def compile(self
, code
, mode
="exec", filename
="<string>"):
732 tree
= self
.parser
.parse(code
)
735 misc
.set_filename(filename
, tree
)
737 gen
= pycodegen
.ModuleCodeGenerator(tree
)
741 ####### Test code #######
745 index <- (RS)[8*i:8*i+7]
750 RA <- [0]*56|| perm[0:7]
754 #index <- (RS)[8*i:8*i+7]
755 RA <- [0]*56 # || perm[0:7]
761 lexer
= IndentLexer(debug
=1)
762 # Give the lexer some input
771 break # No more input
776 # Set up the GardenSnake run-time environment
779 print ("-->", " ".join(map(str,args
)))
788 compile = GardenSnakeCompiler().compile
790 compiled_code
= compile(code
, mode
="single", filename
="string")
792 from compiler
import parse
793 tree
= parse(code
, "exec")
796 print (compiled_code
)
798 exec (compiled_code
, d
)