1 # Based on GardenSnake - a parser generator demonstration program
2 # GardenSnake was released into the Public Domain by Andrew Dalke.
4 # Portions of this work are derived from Python's Grammar definition
5 # and may be covered under the Python copyright and license
7 # Andrew Dalke / Dalke Scientific Software, LLC
8 # 30 August 2006 / Cape Town, South Africa
10 # Modifications for inclusion in PLY distribution
12 from pprint
import pprint
14 from ply
import lex
, yacc
17 from soc
.decoder
.power_fieldsn
import create_sigdecode
20 # I use the Python AST
21 #from compiler import ast
25 def Assign(left
, right
):
27 if isinstance(left
, ast
.Name
):
28 # Single assignment on left
29 return ast
.Assign([ast
.Name(left
.id, ast
.Store())], right
)
30 elif isinstance(left
, ast
.Tuple
):
31 # List of things - make sure they are Name nodes
33 for child
in left
.getChildren():
34 if not isinstance(child
, ast
.Name
):
35 raise SyntaxError("that assignment not supported")
36 names
.append(child
.name
)
37 ass_list
= [ast
.AssName(name
, 'OP_ASSIGN') for name
in names
]
38 return ast
.Assign([ast
.AssTuple(ass_list
)], right
)
40 raise SyntaxError("Can't do that yet")
43 ## I implemented INDENT / DEDENT generation as a post-processing filter
45 # The original lex token stream contains WS and NEWLINE characters.
46 # WS will only occur before any other tokens on a line.
48 # I have three filters. One tags tokens by adding two attributes.
49 # "must_indent" is True if the token must be indented from the
50 # previous code. The other is "at_line_start" which is True for WS
51 # and the first non-WS/non-NEWLINE on a line. It flags the check so
52 # see if the new line has changed indication level.
54 # Python's syntax has three INDENT states
55 # 0) no colon hence no need to indent
56 # 1) "if 1: go()" - simple statements have a COLON but no need for an indent
57 # 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
62 # turn into python-like colon syntax from pseudo-code syntax
63 def python_colonify(lexer
, tokens
):
67 #print ("track colon token", token, token.type)
69 if token
.type == 'THEN':
70 # turn then into colon
73 elif token
.type == 'ELSE':
78 elif token
.type in ['WHILE', 'FOR']:
81 elif token
.type == 'NEWLINE':
92 # only care about whitespace at the start of a line
93 def track_tokens_filter(lexer
, tokens
):
94 oldignore
= lexer
.lexignore
95 lexer
.at_line_start
= at_line_start
= True
99 #print ("track token", token, token.type)
100 token
.at_line_start
= at_line_start
102 if token
.type == "COLON":
103 at_line_start
= False
105 token
.must_indent
= False
107 elif token
.type == "NEWLINE":
109 if indent
== MAY_INDENT
:
111 token
.must_indent
= False
113 elif token
.type == "WS":
114 assert token
.at_line_start
== True
116 token
.must_indent
= False
119 # A real token; only indent after COLON NEWLINE
120 if indent
== MUST_INDENT
:
121 token
.must_indent
= True
123 token
.must_indent
= False
124 at_line_start
= False
127 # really bad hack that changes ignore lexer state.
128 # when "must indent" is seen (basically "real tokens" seen)
129 # then ignore whitespace.
130 if token
.must_indent
:
131 lexer
.lexignore
= ('ignore', ' ')
133 lexer
.lexignore
= oldignore
135 token
.indent
= indent
137 lexer
.at_line_start
= at_line_start
139 def _new_token(type, lineno
):
147 # Synthesize a DEDENT tag
149 return _new_token("DEDENT", lineno
)
151 # Synthesize an INDENT tag
153 return _new_token("INDENT", lineno
)
156 # Track the indentation level and emit the right INDENT / DEDENT events.
157 def indentation_filter(tokens
):
158 # A stack of indentation levels; will never pop item 0
165 print ("Process", depth
, token
.indent
, token
,)
166 if token
.at_line_start
:
167 print ("at_line_start",)
168 if token
.must_indent
:
169 print ("must_indent",)
172 # WS only occurs at the start of the line
173 # There may be WS followed by NEWLINE so
174 # only track the depth here. Don't indent/dedent
175 # until there's something real.
176 if token
.type == "WS":
178 depth
= len(token
.value
)
180 # WS tokens are never passed to the parser
183 if token
.type == "NEWLINE":
185 if prev_was_ws
or token
.at_line_start
:
188 # pass the other cases on through
192 # then it must be a real token (not WS, not NEWLINE)
193 # which can affect the indentation level
196 if token
.must_indent
:
197 # The current depth must be larger than the previous level
198 if not (depth
> levels
[-1]):
199 raise IndentationError("expected an indented block")
202 yield INDENT(token
.lineno
)
204 elif token
.at_line_start
:
205 # Must be on the same level or one of the previous levels
206 if depth
== levels
[-1]:
209 elif depth
> levels
[-1]:
210 raise IndentationError("indent increase but not in new block")
212 # Back up; but only if it matches a previous level
214 i
= levels
.index(depth
)
216 raise IndentationError("inconsistent indentation")
217 for _
in range(i
+1, len(levels
)):
218 yield DEDENT(token
.lineno
)
223 ### Finished processing ###
225 # Must dedent any remaining levels
227 assert token
is not None
228 for _
in range(1, len(levels
)):
229 yield DEDENT(token
.lineno
)
232 # The top-level filter adds an ENDMARKER, if requested.
233 # Python's grammar uses it.
234 def filter(lexer
, add_endmarker
= True):
236 tokens
= iter(lexer
.token
, None)
237 tokens
= python_colonify(lexer
, tokens
)
238 tokens
= track_tokens_filter(lexer
, tokens
)
239 for token
in indentation_filter(tokens
):
244 if token
is not None:
245 lineno
= token
.lineno
246 yield _new_token("ENDMARKER", lineno
)
248 ## No using Python's approach because Ply supports precedence
250 # comparison: expr (comp_op expr)*
251 # arith_expr: term (('+'|'-') term)*
252 # term: factor (('*'|'/'|'%'|'//') factor)*
253 # factor: ('+'|'-'|'~') factor | power
254 # comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
256 def make_lt_compare(arg
):
258 return ast
.Compare(left
, [ast
.Lt()], [right
])
259 def make_gt_compare(arg
):
261 return ast
.Compare(left
, [ast
.Gt()], [right
])
262 def make_eq_compare(arg
):
264 return ast
.Compare(left
, [ast
.Eq()], [right
])
272 "<": make_lt_compare
,
273 ">": make_gt_compare
,
274 "=": make_eq_compare
,
281 ("left", "EQ", "GT", "LT"),
282 ("left", "PLUS", "MINUS"),
283 ("left", "MULT", "DIV"),
286 def check_concat(node
): # checks if the comparison is already a concat
288 if not isinstance(node
, ast
.Call
):
290 if node
[0].id != 'concat':
309 'NUMBER', # Python decimals
310 'BINARY', # Python binary
311 'STRING', # single quoted strings only; syntax of raw strings
337 def build(self
,**kwargs
):
338 self
.lexer
= lex
.lex(module
=self
, **kwargs
)
340 def t_BINARY(self
, t
):
342 t
.value
= int(t
.value
, 2)
346 # taken from decmial.py but without the leading sign
347 def t_NUMBER(self
, t
):
348 r
"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
349 t
.value
= int(t
.value
)
352 def t_STRING(self
, t
):
353 r
"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
354 t
.value
=t
.value
[1:-1].decode("string-escape") # .swapcase() # for fun
370 # Ply nicely documented how to do this.
386 r
'[a-zA-Z_][a-zA-Z0-9_]*'
387 t
.type = self
.RESERVED
.get(t
.value
, "NAME")
390 # Putting this before t_WS let it consume lines with only comments in
391 # them so the latter code never sees the WS part. Not consuming the
392 # newline. Needed for "if 1: #comment"
393 def t_comment(self
, t
):
394 r
"[ ]*\043[^\n]*" # \043 is '#'
401 if t
.lexer
.at_line_start
and t
.lexer
.paren_count
== 0 and \
402 t
.lexer
.brack_count
== 0:
405 # Don't generate newline tokens when inside of parenthesis, eg
408 def t_newline(self
, t
):
410 t
.lexer
.lineno
+= len(t
.value
)
412 if t
.lexer
.paren_count
== 0 and t
.lexer
.brack_count
== 0:
415 def t_LBRACK(self
, t
):
417 t
.lexer
.brack_count
+= 1
420 def t_RBRACK(self
, t
):
422 # check for underflow? should be the job of the parser
423 t
.lexer
.brack_count
-= 1
428 t
.lexer
.paren_count
+= 1
433 # check for underflow? should be the job of the parser
434 t
.lexer
.paren_count
-= 1
439 def t_error(self
, t
):
440 raise SyntaxError("Unknown symbol %r" % (t
.value
[0],))
441 print ("Skipping", repr(t
.value
[0]))
444 # Combine Ply and my filters into a new lexer
446 class IndentLexer(PowerLexer
):
447 def __init__(self
, debug
=0, optimize
=0, lextab
='lextab', reflags
=0):
448 self
.build(debug
=debug
, optimize
=optimize
,
449 lextab
=lextab
, reflags
=reflags
)
450 self
.token_stream
= None
451 def input(self
, s
, add_endmarker
=True):
452 self
.lexer
.paren_count
= 0
453 self
.lexer
.brack_count
= 0
455 self
.token_stream
= filter(self
.lexer
, add_endmarker
)
459 return next(self
.token_stream
)
460 except StopIteration:
464 ########## Parser (tokens -> AST) ######
473 for rname
in ['RA', 'RB', 'RC', 'RT']:
474 self
.gprs
[rname
] = None
476 # The grammar comments come from Python's Grammar/Grammar file
478 ## NB: compound_stmt in single_input is followed by extra NEWLINE!
479 # file_input: (NEWLINE | stmt)* ENDMARKER
481 def p_file_input_end(self
, p
):
482 """file_input_end : file_input ENDMARKER"""
486 def p_file_input(self
, p
):
487 """file_input : file_input NEWLINE
491 if isinstance(p
[len(p
)-1], str):
495 p
[0] = [] # p == 2 --> only a blank line
503 # funcdef: [decorators] 'def' NAME parameters ':' suite
504 # ignoring decorators
505 def p_funcdef(self
, p
):
506 "funcdef : DEF NAME parameters COLON suite"
507 p
[0] = ast
.Function(None, p
[2], list(p
[3]), (), 0, None, p
[5])
509 # parameters: '(' [varargslist] ')'
510 def p_parameters(self
, p
):
511 """parameters : LPAR RPAR
512 | LPAR varargslist RPAR"""
519 # varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] |
522 def p_varargslist(self
, p
):
523 """varargslist : varargslist COMMA NAME
530 # stmt: simple_stmt | compound_stmt
531 def p_stmt_simple(self
, p
):
532 """stmt : simple_stmt"""
533 # simple_stmt is a list
536 def p_stmt_compound(self
, p
):
537 """stmt : compound_stmt"""
540 # simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
541 def p_simple_stmt(self
, p
):
542 """simple_stmt : small_stmts NEWLINE
543 | small_stmts SEMICOLON NEWLINE"""
546 def p_small_stmts(self
, p
):
547 """small_stmts : small_stmts SEMICOLON small_stmt
554 # small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
555 # import_stmt | global_stmt | exec_stmt | assert_stmt
556 def p_small_stmt(self
, p
):
557 """small_stmt : flow_stmt
562 # expr_stmt: testlist (augassign (yield_expr|testlist) |
563 # ('=' (yield_expr|testlist))*)
564 # augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
565 # '<<=' | '>>=' | '**=' | '//=')
566 def p_expr_stmt(self
, p
):
567 """expr_stmt : testlist ASSIGN testlist
570 # a list of expressions
571 #p[0] = ast.Discard(p[1])
574 p
[0] = Assign(p
[1], p
[3])
576 def p_flow_stmt(self
, p
):
577 "flow_stmt : return_stmt"
580 # return_stmt: 'return' [testlist]
581 def p_return_stmt(self
, p
):
582 "return_stmt : RETURN testlist"
583 p
[0] = ast
.Return(p
[2])
586 def p_compound_stmt(self
, p
):
587 """compound_stmt : if_stmt
594 def p_break_stmt(self
, p
):
595 """break_stmt : BREAK
599 def p_for_stmt(self
, p
):
600 """for_stmt : FOR test EQ test TO test COLON suite
602 p
[0] = ast
.While(p
[2], p
[4], [])
603 # auto-add-one (sigh) due to python range
605 end
= ast
.BinOp(p
[6], ast
.Add(), ast
.Constant(1))
606 it
= ast
.Call(ast
.Name("range"), [start
, end
], [])
607 p
[0] = ast
.For(p
[2], it
, p
[8], [])
609 def p_while_stmt(self
, p
):
610 """while_stmt : DO WHILE test COLON suite ELSE COLON suite
611 | DO WHILE test COLON suite
614 p
[0] = ast
.While(p
[3], p
[5], [])
616 p
[0] = ast
.While(p
[3], p
[5], p
[8])
618 def p_if_stmt(self
, p
):
619 """if_stmt : IF test COLON suite ELSE COLON suite
620 | IF test COLON suite
623 p
[0] = ast
.If(p
[2], p
[4], [])
625 p
[0] = ast
.If(p
[2], p
[4], p
[7])
627 def p_suite(self
, p
):
628 """suite : simple_stmt
629 | NEWLINE INDENT stmts DEDENT"""
636 def p_stmts(self
, p
):
637 """stmts : stmts stmt
644 def p_comparison(self
, p
):
645 """comparison : comparison PLUS comparison
646 | comparison MINUS comparison
647 | comparison MULT comparison
648 | comparison DIV comparison
649 | comparison LT comparison
650 | comparison EQ comparison
651 | comparison GT comparison
654 | comparison APPEND comparison
659 l
= check_concat(p
[1]) + check_concat(p
[3])
660 p
[0] = ast
.Call(ast
.Name("concat"), l
, [])
661 elif p
[2] in ['<', '>', '=']:
662 p
[0] = binary_ops
[p
[2]]((p
[1],p
[3]))
664 p
[0] = ast
.BinOp(p
[1], binary_ops
[p
[2]], p
[3])
666 p
[0] = unary_ops
[p
[1]](p
[2])
670 # power: atom trailer* ['**' factor]
671 # trailers enables function calls (and subscripts).
672 # I only allow one level of calls
673 # so this is 'trailer'
674 def p_power(self
, p
):
680 if p
[2][0] == "CALL":
681 #p[0] = ast.Expr(ast.Call(p[1], p[2][1], []))
682 p
[0] = ast
.Call(p
[1], p
[2][1], [])
683 #if p[1].id == 'print':
684 # p[0] = ast.Printnl(ast.Tuple(p[2][1]), None, None)
686 # p[0] = ast.CallFunc(p[1], p[2][1], None, None)
689 #raise AssertionError("not implemented %s" % p[2][0])
694 idx
= ast
.Slice(subs
[0], subs
[1], None)
695 p
[0] = ast
.Subscript(p
[1], idx
)
697 def p_atom_name(self
, p
):
699 p
[0] = ast
.Name(p
[1], ctx
=ast
.Load())
701 def p_atom_number(self
, p
):
705 p
[0] = ast
.Constant(p
[1])
707 #'[' [listmaker] ']' |
709 def p_atom_listmaker(self
, p
):
710 """atom : LBRACK listmaker RBRACK"""
713 def p_listmaker(self
, p
):
714 """listmaker : test COMMA listmaker
718 p
[0] = ast
.List([p
[1]])
720 p
[0] = ast
.List([p
[1]] + p
[3].nodes
)
722 def p_atom_tuple(self
, p
):
723 """atom : LPAR testlist RPAR"""
726 # trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
727 def p_trailer(self
, p
):
728 """trailer : trailer_arglist
733 def p_trailer_arglist(self
, p
):
734 "trailer_arglist : LPAR arglist RPAR"
735 p
[0] = ("CALL", p
[2])
737 def p_trailer_subscript(self
, p
):
738 "trailer_subscript : LBRACK subscript RBRACK"
739 p
[0] = ("SUBS", p
[2])
741 #subscript: '.' '.' '.' | test | [test] ':' [test]
743 def p_subscript(self
, p
):
744 """subscript : test COLON test
753 # testlist: test (',' test)* [',']
754 # Contains shift/reduce error
755 def p_testlist(self
, p
):
756 """testlist : testlist_multi COMMA
761 # May need to promote singleton to tuple
762 if isinstance(p
[1], list):
766 # Convert into a tuple?
767 if isinstance(p
[0], list):
768 p
[0] = ast
.Tuple(p
[0])
770 def p_testlist_multi(self
, p
):
771 """testlist_multi : testlist_multi COMMA test
777 if isinstance(p
[1], list):
784 # test: or_test ['if' or_test 'else' test] | lambdef
785 # as I don't support 'and', 'or', and 'not' this works down to 'comparison'
792 # arglist: (argument ',')* (argument [',']| '*' test [',' '**' test]
794 # XXX INCOMPLETE: this doesn't allow the trailing comma
795 def p_arglist(self
, p
):
796 """arglist : arglist COMMA argument
803 # argument: test [gen_for] | test '=' test # Really [keyword '='] test
804 def p_argument(self
, p
):
808 def p_error(self
, p
):
809 #print "Error!", repr(p)
813 class GardenSnakeParser(PowerParser
):
814 def __init__(self
, lexer
= None):
816 lexer
= IndentLexer(debug
=1)
818 self
.tokens
= lexer
.tokens
819 self
.parser
= yacc
.yacc(module
=self
, start
="file_input_end",
820 debug
=False, write_tables
=False)
822 self
.sd
= create_sigdecode()
824 def parse(self
, code
):
825 self
.lexer
.input(code
)
826 result
= self
.parser
.parse(lexer
= self
.lexer
, debug
=False)
827 return ast
.Module(result
)
830 ###### Code generation ######
832 #from compiler import misc, syntax, pycodegen
834 class GardenSnakeCompiler(object):
836 self
.parser
= GardenSnakeParser()
837 def compile(self
, code
, mode
="exec", filename
="<string>"):
838 tree
= self
.parser
.parse(code
)
842 #misc.set_filename(filename, tree)
843 return compile(tree
, mode
="exec", filename
="<string>")
845 gen
= pycodegen
.ModuleCodeGenerator(tree
)
849 ####### Test code #######
851 from soc
.decoder
.power_fieldsn
import create_sigdecode
856 index <- (RS)[8*i:8*i+7]
857 RA <- [0]*56 || perm[0:7]
862 if index < 64 then index <- 0
873 index <- (RS)[8*i:8*i+7]
878 RA <- [0]*56|| perm[0:7]
884 if (RS)[63-n] = 0b1 then
893 lexer
= IndentLexer(debug
=1)
894 # Give the lexer some input
903 break # No more input
908 sd
= create_sigdecode()
909 print ("forms", sd
.df
.forms
)
910 for f
in sd
.df
.FormX
:
913 gsc
= GardenSnakeCompiler()
914 _compile
= gsc
.compile
916 tree
= _compile(code
, mode
="single", filename
="string")
918 tree
= ast
.fix_missing_locations(tree
)
919 print ( ast
.dump(tree
) )
922 print (astor
.dump_tree(tree
))
924 source
= astor
.to_source(tree
)
929 # Set up the GardenSnake run-time environment
932 print ("-->", " ".join(map(str,args
)))
934 def listconcat(l1
, l2
):
939 d
["concat"] = listconcat
942 getform
= getattr(gsc
.parser
.sd
.df
, "Form%s" % form
)
944 for k
, f
in sd
.df
.instrs
[form
].items():
945 d
[k
] = getattr(getform
, k
)
947 compiled_code
= compile(source
, mode
="exec", filename
="<string>")
949 exec (compiled_code
, d
)