annoying. convert to python3
[soc.git] / src / soc / decoder / power_pseudo.py
1 # Based on GardenSnake - a parser generator demonstration program
2 # GardenSnake was released into the Public Domain by Andrew Dalke.
3
4 # Portions of this work are derived from Python's Grammar definition
5 # and may be covered under the Python copyright and license
6 #
7 # Andrew Dalke / Dalke Scientific Software, LLC
8 # 30 August 2006 / Cape Town, South Africa
9
10 # Modifications for inclusion in PLY distribution
11 import sys
12 from pprint import pprint
13 from copy import copy
14 from ply import lex, yacc
15
16 ##### Lexer ######
17 #import lex
18 import decimal
19
20 tokens = (
21 'DEF',
22 'IF',
23 'ELSE',
24 'FOR',
25 'TO',
26 'THEN',
27 'WHILE',
28 'NAME',
29 'NUMBER', # Python decimals
30 'STRING', # single quoted strings only; syntax of raw strings
31 'LPAR',
32 'RPAR',
33 'LBRACK',
34 'RBRACK',
35 'COLON',
36 'EQ',
37 'ASSIGN',
38 'LT',
39 'GT',
40 'PLUS',
41 'MINUS',
42 'MULT',
43 'DIV',
44 'RETURN',
45 'WS',
46 'NEWLINE',
47 'COMMA',
48 'SEMICOLON',
49 'INDENT',
50 'DEDENT',
51 'ENDMARKER',
52 )
53
54 #t_NUMBER = r'\d+'
55 # taken from decmial.py but without the leading sign
56 def t_NUMBER(t):
57 r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
58 t.value = int(t.value)
59 return t
60
61 def t_STRING(t):
62 r"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
63 t.value=t.value[1:-1].decode("string-escape") # .swapcase() # for fun
64 return t
65
66 t_COLON = r':'
67 t_EQ = r'=='
68 t_ASSIGN = r'<-'
69 t_LT = r'<'
70 t_GT = r'>'
71 t_PLUS = r'\+'
72 t_MINUS = r'-'
73 t_MULT = r'\*'
74 t_DIV = r'/'
75 t_COMMA = r','
76 t_SEMICOLON = r';'
77
78 # Ply nicely documented how to do this.
79
80 RESERVED = {
81 "def": "DEF",
82 "if": "IF",
83 "else": "ELSE",
84 "for": "FOR",
85 "to": "TO",
86 "while": "WHILE",
87 "return": "RETURN",
88 }
89
90 def t_NAME(t):
91 r'[a-zA-Z_][a-zA-Z0-9_]*'
92 t.type = RESERVED.get(t.value, "NAME")
93 return t
94
95 # Putting this before t_WS let it consume lines with only comments in
96 # them so the latter code never sees the WS part. Not consuming the
97 # newline. Needed for "if 1: #comment"
98 def t_comment(t):
99 r"[ ]*\043[^\n]*" # \043 is '#'
100 pass
101
102
103 # Whitespace
104 def t_WS(t):
105 r'[ ]+'
106 if t.lexer.at_line_start and t.lexer.paren_count == 0 and \
107 t.lexer.brack_count == 0:
108 return t
109
110 # Don't generate newline tokens when inside of parenthesis, eg
111 # a = (1,
112 # 2, 3)
113 def t_newline(t):
114 r'\n+'
115 t.lexer.lineno += len(t.value)
116 t.type = "NEWLINE"
117 if t.lexer.paren_count == 0 and t.lexer.brack_count == 0:
118 return t
119
120 def t_LBRACK(t):
121 r'\['
122 t.lexer.brack_count += 1
123 return t
124
125 def t_RBRACK(t):
126 r'\]'
127 # check for underflow? should be the job of the parser
128 t.lexer.brack_count -= 1
129 return t
130
131 def t_LPAR(t):
132 r'\('
133 t.lexer.paren_count += 1
134 return t
135
136 def t_RPAR(t):
137 r'\)'
138 # check for underflow? should be the job of the parser
139 t.lexer.paren_count -= 1
140 return t
141
142 #t_ignore = " "
143
144 def t_error(t):
145 raise SyntaxError("Unknown symbol %r" % (t.value[0],))
146 print ("Skipping", repr(t.value[0]))
147 t.lexer.skip(1)
148
149 ## I implemented INDENT / DEDENT generation as a post-processing filter
150
151 # The original lex token stream contains WS and NEWLINE characters.
152 # WS will only occur before any other tokens on a line.
153
154 # I have three filters. One tags tokens by adding two attributes.
155 # "must_indent" is True if the token must be indented from the
156 # previous code. The other is "at_line_start" which is True for WS
157 # and the first non-WS/non-NEWLINE on a line. It flags the check so
158 # see if the new line has changed indication level.
159
160 # Python's syntax has three INDENT states
161 # 0) no colon hence no need to indent
162 # 1) "if 1: go()" - simple statements have a COLON but no need for an indent
163 # 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
164 NO_INDENT = 0
165 MAY_INDENT = 1
166 MUST_INDENT = 2
167
168 # only care about whitespace at the start of a line
169 def track_tokens_filter(lexer, tokens):
170 oldignore = lexer.lexignore
171 lexer.at_line_start = at_line_start = True
172 indent = NO_INDENT
173 saw_colon = False
174 for token in tokens:
175 #print ("token", token)
176 token.at_line_start = at_line_start
177
178 if token.type == "COLON":
179 at_line_start = False
180 indent = MAY_INDENT
181 token.must_indent = False
182
183 elif token.type == "NEWLINE":
184 at_line_start = True
185 if indent == MAY_INDENT:
186 indent = MUST_INDENT
187 token.must_indent = False
188
189 elif token.type == "WS":
190 assert token.at_line_start == True
191 at_line_start = True
192 token.must_indent = False
193
194 else:
195 # A real token; only indent after COLON NEWLINE
196 if indent == MUST_INDENT:
197 token.must_indent = True
198 else:
199 token.must_indent = False
200 at_line_start = False
201 indent = NO_INDENT
202
203 # really bad hack that changes ignore lexer state.
204 # when "must indent" is seen (basically "real tokens" seen)
205 # then ignore whitespace.
206 if token.must_indent:
207 lexer.lexignore = ('ignore', ' ')
208 else:
209 lexer.lexignore = oldignore
210
211 token.indent = indent
212 yield token
213 lexer.at_line_start = at_line_start
214
215 def _new_token(type, lineno):
216 tok = lex.LexToken()
217 tok.type = type
218 tok.value = None
219 tok.lineno = lineno
220 tok.lexpos = -1
221 return tok
222
223 # Synthesize a DEDENT tag
224 def DEDENT(lineno):
225 return _new_token("DEDENT", lineno)
226
227 # Synthesize an INDENT tag
228 def INDENT(lineno):
229 return _new_token("INDENT", lineno)
230
231
232 # Track the indentation level and emit the right INDENT / DEDENT events.
233 def indentation_filter(tokens):
234 # A stack of indentation levels; will never pop item 0
235 levels = [0]
236 token = None
237 depth = 0
238 prev_was_ws = False
239 for token in tokens:
240 if 1:
241 print ("Process", depth, token.indent, token,)
242 if token.at_line_start:
243 print ("at_line_start",)
244 if token.must_indent:
245 print ("must_indent",)
246 print
247
248 # WS only occurs at the start of the line
249 # There may be WS followed by NEWLINE so
250 # only track the depth here. Don't indent/dedent
251 # until there's something real.
252 if token.type == "WS":
253 assert depth == 0
254 depth = len(token.value)
255 prev_was_ws = True
256 # WS tokens are never passed to the parser
257 continue
258
259 if token.type == "NEWLINE":
260 depth = 0
261 if prev_was_ws or token.at_line_start:
262 # ignore blank lines
263 continue
264 # pass the other cases on through
265 yield token
266 continue
267
268 # then it must be a real token (not WS, not NEWLINE)
269 # which can affect the indentation level
270
271 prev_was_ws = False
272 if token.must_indent:
273 # The current depth must be larger than the previous level
274 if not (depth > levels[-1]):
275 raise IndentationError("expected an indented block")
276
277 levels.append(depth)
278 yield INDENT(token.lineno)
279
280 elif token.at_line_start:
281 # Must be on the same level or one of the previous levels
282 if depth == levels[-1]:
283 # At the same level
284 pass
285 elif depth > levels[-1]:
286 raise IndentationError("indentation increase but not in new block")
287 else:
288 # Back up; but only if it matches a previous level
289 try:
290 i = levels.index(depth)
291 except ValueError:
292 raise IndentationError("inconsistent indentation")
293 for _ in range(i+1, len(levels)):
294 yield DEDENT(token.lineno)
295 levels.pop()
296
297 yield token
298
299 ### Finished processing ###
300
301 # Must dedent any remaining levels
302 if len(levels) > 1:
303 assert token is not None
304 for _ in range(1, len(levels)):
305 yield DEDENT(token.lineno)
306
307
308 # The top-level filter adds an ENDMARKER, if requested.
309 # Python's grammar uses it.
310 def filter(lexer, add_endmarker = True):
311 token = None
312 tokens = iter(lexer.token, None)
313 tokens = track_tokens_filter(lexer, tokens)
314 for token in indentation_filter(tokens):
315 yield token
316
317 if add_endmarker:
318 lineno = 1
319 if token is not None:
320 lineno = token.lineno
321 yield _new_token("ENDMARKER", lineno)
322
323 # Combine Ply and my filters into a new lexer
324
325 class IndentLexer(object):
326 def __init__(self, debug=0, optimize=0, lextab='lextab', reflags=0):
327 self.lexer = lex.lex(debug=debug, optimize=optimize, lextab=lextab, reflags=reflags)
328 self.token_stream = None
329 def input(self, s, add_endmarker=True):
330 self.lexer.paren_count = 0
331 self.lexer.brack_count = 0
332 self.lexer.input(s)
333 self.token_stream = filter(self.lexer, add_endmarker)
334 def token(self):
335 try:
336 return next(self.token_stream)
337 except StopIteration:
338 return None
339
340 ########## Parser (tokens -> AST) ######
341
342 # also part of Ply
343 #import yacc
344
345 # I use the Python AST
346 #from compiler import ast
347 import ast
348
349 # Helper function
350 def Assign(left, right):
351 names = []
352 if isinstance(left, ast.Name):
353 # Single assignment on left
354 return ast.Assign([ast.Name(left.id, ast.Store())], right)
355 elif isinstance(left, ast.Tuple):
356 # List of things - make sure they are Name nodes
357 names = []
358 for child in left.getChildren():
359 if not isinstance(child, ast.Name):
360 raise SyntaxError("that assignment not supported")
361 names.append(child.name)
362 ass_list = [ast.AssName(name, 'OP_ASSIGN') for name in names]
363 return ast.Assign([ast.AssTuple(ass_list)], right)
364 else:
365 raise SyntaxError("Can't do that yet")
366
367
368 # The grammar comments come from Python's Grammar/Grammar file
369
370 ## NB: compound_stmt in single_input is followed by extra NEWLINE!
371 # file_input: (NEWLINE | stmt)* ENDMARKER
372 def p_file_input_end(p):
373 """file_input_end : file_input ENDMARKER"""
374 print ("end", p[1])
375 p[0] = p[1]
376
377 def p_file_input(p):
378 """file_input : file_input NEWLINE
379 | file_input stmt
380 | NEWLINE
381 | stmt"""
382 if isinstance(p[len(p)-1], str):
383 if len(p) == 3:
384 p[0] = p[1]
385 else:
386 p[0] = [] # p == 2 --> only a blank line
387 else:
388 if len(p) == 3:
389 p[0] = p[1] + p[2]
390 else:
391 p[0] = p[1]
392
393
394 # funcdef: [decorators] 'def' NAME parameters ':' suite
395 # ignoring decorators
396 def p_funcdef(p):
397 "funcdef : DEF NAME parameters COLON suite"
398 p[0] = ast.Function(None, p[2], list(p[3]), (), 0, None, p[5])
399
400 # parameters: '(' [varargslist] ')'
401 def p_parameters(p):
402 """parameters : LPAR RPAR
403 | LPAR varargslist RPAR"""
404 if len(p) == 3:
405 p[0] = []
406 else:
407 p[0] = p[2]
408
409
410 # varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) |
411 # highly simplified
412 def p_varargslist(p):
413 """varargslist : varargslist COMMA NAME
414 | NAME"""
415 if len(p) == 4:
416 p[0] = p[1] + p[3]
417 else:
418 p[0] = [p[1]]
419
420 # stmt: simple_stmt | compound_stmt
421 def p_stmt_simple(p):
422 """stmt : simple_stmt"""
423 # simple_stmt is a list
424 p[0] = p[1]
425
426 def p_stmt_compound(p):
427 """stmt : compound_stmt"""
428 p[0] = [p[1]]
429
430 # simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
431 def p_simple_stmt(p):
432 """simple_stmt : small_stmts NEWLINE
433 | small_stmts SEMICOLON NEWLINE"""
434 p[0] = p[1]
435
436 def p_small_stmts(p):
437 """small_stmts : small_stmts SEMICOLON small_stmt
438 | small_stmt"""
439 if len(p) == 4:
440 p[0] = p[1] + [p[3]]
441 else:
442 p[0] = [p[1]]
443
444 # small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
445 # import_stmt | global_stmt | exec_stmt | assert_stmt
446 def p_small_stmt(p):
447 """small_stmt : flow_stmt
448 | expr_stmt"""
449 p[0] = p[1]
450
451 # expr_stmt: testlist (augassign (yield_expr|testlist) |
452 # ('=' (yield_expr|testlist))*)
453 # augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
454 # '<<=' | '>>=' | '**=' | '//=')
455 def p_expr_stmt(p):
456 """expr_stmt : testlist ASSIGN testlist
457 | testlist """
458 if len(p) == 2:
459 # a list of expressions
460 #p[0] = ast.Discard(p[1])
461 p[0] = p[1]
462 else:
463 p[0] = Assign(p[1], p[3])
464
465 def p_flow_stmt(p):
466 "flow_stmt : return_stmt"
467 p[0] = p[1]
468
469 # return_stmt: 'return' [testlist]
470 def p_return_stmt(p):
471 "return_stmt : RETURN testlist"
472 p[0] = ast.Return(p[2])
473
474
475 def p_compound_stmt(p):
476 """compound_stmt : if_stmt
477 | while_stmt
478 | funcdef
479 """
480 p[0] = p[1]
481
482 def p_while_stmt(p):
483 """while_stmt : WHILE test COLON suite ELSE COLON suite
484 | WHILE test COLON suite
485 """
486 if len(p) == 5:
487 p[0] = ast.While(p[2], p[4], None)
488 else:
489 p[0] = ast.While(p[2], p[4], p[7])
490
491 def p_if_stmt(p):
492 """if_stmt : IF test COLON suite ELSE COLON suite
493 | IF test COLON suite
494 """
495 if len(p) == 5:
496 p[0] = ast.If([(p[2], p[4])], None)
497 else:
498 p[0] = ast.If([(p[2], p[4])], p[7])
499
500 def p_suite(p):
501 """suite : simple_stmt
502 | NEWLINE INDENT stmts DEDENT"""
503 if len(p) == 2:
504 p[0] = p[1]
505 else:
506 p[0] = p[3]
507
508
509 def p_stmts(p):
510 """stmts : stmts stmt
511 | stmt"""
512 if len(p) == 3:
513 p[0] = p[1] + p[2]
514 else:
515 p[0] = p[1]
516
517 ## No using Python's approach because Ply supports precedence
518
519 # comparison: expr (comp_op expr)*
520 # arith_expr: term (('+'|'-') term)*
521 # term: factor (('*'|'/'|'%'|'//') factor)*
522 # factor: ('+'|'-'|'~') factor | power
523 # comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
524
525 def make_lt_compare(arg):
526 (left, right) = arg
527 return ast.Compare(left, [('<', right),])
528 def make_gt_compare(arg):
529 (left, right) = arg
530 return ast.Compare(left, [('>', right),])
531 def make_eq_compare(arg):
532 (left, right) = arg
533 return ast.Compare(left, [('==', right),])
534
535
536 binary_ops = {
537 "+": ast.Add,
538 "-": ast.Sub,
539 "*": ast.Mult(),
540 "/": ast.Div,
541 "<": make_lt_compare,
542 ">": make_gt_compare,
543 "==": make_eq_compare,
544 }
545 unary_ops = {
546 "+": ast.Add,
547 "-": ast.Sub,
548 }
549 precedence = (
550 ("left", "EQ", "GT", "LT"),
551 ("left", "PLUS", "MINUS"),
552 ("left", "MULT", "DIV"),
553 )
554
555 def p_comparison(p):
556 """comparison : comparison PLUS comparison
557 | comparison MINUS comparison
558 | comparison MULT comparison
559 | comparison DIV comparison
560 | comparison LT comparison
561 | comparison EQ comparison
562 | comparison GT comparison
563 | PLUS comparison
564 | MINUS comparison
565 | power"""
566 if len(p) == 4:
567 p[0] = ast.BinOp(p[1], binary_ops[p[2]], p[3])
568 elif len(p) == 3:
569 p[0] = unary_ops[p[1]](p[2])
570 else:
571 p[0] = p[1]
572
573 # power: atom trailer* ['**' factor]
574 # trailers enables function calls (and subscripts).
575 # I only allow one level of calls
576 # so this is 'trailer'
577 def p_power(p):
578 """power : atom
579 | atom trailer"""
580 if len(p) == 2:
581 p[0] = p[1]
582 else:
583 if p[2][0] == "CALL":
584 p[0] = ast.Expr(ast.Call(p[1], p[2][1], []))
585 #if p[1].id == 'print':
586 # p[0] = ast.Printnl(ast.Tuple(p[2][1]), None, None)
587 #else:
588 # p[0] = ast.CallFunc(p[1], p[2][1], None, None)
589 else:
590 print (p[2][1])
591 #raise AssertionError("not implemented %s" % p[2][0])
592 subs = p[2][1]
593 if len(subs) == 1:
594 p[0] = ast.Subscript(p[1], 'OP_APPLY', subs[0])
595 else:
596 p[0] = ast.Slice(p[1], 'OP_APPLY', subs[0], subs[1])
597
598 def p_atom_name(p):
599 """atom : NAME"""
600 p[0] = ast.Name(p[1], ctx=ast.Load())
601
602 def p_atom_number(p):
603 """atom : NUMBER
604 | STRING"""
605 p[0] = ast.Constant(p[1])
606
607 #'[' [listmaker] ']' |
608
609 def p_atom_listmaker(p):
610 """atom : LBRACK listmaker RBRACK"""
611 p[0] = p[2]
612
613 def p_listmaker(p):
614 """listmaker : test COMMA listmaker
615 | test
616 """
617 if len(p) == 2:
618 p[0] = ast.List([p[1]])
619 else:
620 p[0] = ast.List([p[1]] + p[3].nodes)
621
622 def p_atom_tuple(p):
623 """atom : LPAR testlist RPAR"""
624 p[0] = p[2]
625
626 # trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
627 def p_trailer(p):
628 """trailer : trailer_arglist
629 | trailer_subscript
630 """
631 p[0] = p[1]
632
633 def p_trailer_arglist(p):
634 "trailer_arglist : LPAR arglist RPAR"
635 p[0] = ("CALL", p[2])
636
637 def p_trailer_subscript(p):
638 "trailer_subscript : LBRACK subscript RBRACK"
639 p[0] = ("SUBS", p[2])
640
641 #subscript: '.' '.' '.' | test | [test] ':' [test]
642
643 def p_subscript(p):
644 """subscript : test COLON test
645 | test
646 """
647 if len(p) == 4:
648 p[0] = [p[1], p[3]]
649 else:
650 p[0] = [p[1]]
651
652
653 # testlist: test (',' test)* [',']
654 # Contains shift/reduce error
655 def p_testlist(p):
656 """testlist : testlist_multi COMMA
657 | testlist_multi """
658 if len(p) == 2:
659 p[0] = p[1]
660 else:
661 # May need to promote singleton to tuple
662 if isinstance(p[1], list):
663 p[0] = p[1]
664 else:
665 p[0] = [p[1]]
666 # Convert into a tuple?
667 if isinstance(p[0], list):
668 p[0] = ast.Tuple(p[0])
669
670 def p_testlist_multi(p):
671 """testlist_multi : testlist_multi COMMA test
672 | test"""
673 if len(p) == 2:
674 # singleton
675 p[0] = p[1]
676 else:
677 if isinstance(p[1], list):
678 p[0] = p[1] + [p[3]]
679 else:
680 # singleton -> tuple
681 p[0] = [p[1], p[3]]
682
683
684 # test: or_test ['if' or_test 'else' test] | lambdef
685 # as I don't support 'and', 'or', and 'not' this works down to 'comparison'
686 def p_test(p):
687 "test : comparison"
688 p[0] = p[1]
689
690
691
692 # arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
693 # XXX INCOMPLETE: this doesn't allow the trailing comma
694 def p_arglist(p):
695 """arglist : arglist COMMA argument
696 | argument"""
697 if len(p) == 4:
698 p[0] = p[1] + [p[3]]
699 else:
700 p[0] = [p[1]]
701
702 # argument: test [gen_for] | test '=' test # Really [keyword '='] test
703 def p_argument(p):
704 "argument : test"
705 p[0] = p[1]
706
707 def p_error(p):
708 #print "Error!", repr(p)
709 raise SyntaxError(p)
710
711
712 class GardenSnakeParser(object):
713 def __init__(self, lexer = None):
714 if lexer is None:
715 lexer = IndentLexer(debug=1)
716 self.lexer = lexer
717 self.parser = yacc.yacc(start="file_input_end",
718 debug=False, write_tables=False)
719
720 def parse(self, code):
721 self.lexer.input(code)
722 result = self.parser.parse(lexer = self.lexer, debug=False)
723 return ast.Module(result)
724
725
726 ###### Code generation ######
727
728 #from compiler import misc, syntax, pycodegen
729
730 class GardenSnakeCompiler(object):
731 def __init__(self):
732 self.parser = GardenSnakeParser()
733 def compile(self, code, mode="exec", filename="<string>"):
734 tree = self.parser.parse(code)
735 print ("snake")
736 pprint(tree)
737 return tree
738 #misc.set_filename(filename, tree)
739 return compile(tree, mode="exec", filename="<string>")
740 #syntax.check(tree)
741 gen = pycodegen.ModuleCodeGenerator(tree)
742 code = gen.getCode()
743 return code
744
745 ####### Test code #######
746
747 from soc.decoder.power_fieldsn import create_sigdecode
748
749 bpermd = r"""
750 for i = 0 to 7
751 index <- (RS)[8*i:8*i+7]
752 if index < 64 then
753 permi <- (RB)[index]
754 else
755 permi <- 0
756 RA <- [0]*56|| perm[0:7]
757 """
758
759 bpermd = r"""
760 perm <- [0] * 8
761 #index <- (RS)[8*i:8*i+7]
762 RA <- [0]*56 # || perm[0:7]
763 print (RA)
764 """
765
766 code = bpermd
767
768 lexer = IndentLexer(debug=1)
769 # Give the lexer some input
770 print ("code")
771 print (code)
772 lexer.input(code)
773
774 # Tokenize
775 while True:
776 tok = lexer.token()
777 if not tok:
778 break # No more input
779 print(tok)
780
781 #sys.exit(0)
782
783 # Set up the GardenSnake run-time environment
784 def print_(*args):
785 print ("args", args)
786 print ("-->", " ".join(map(str,args)))
787
788 #d = copy(globals())
789 d = {}
790 d["print"] = print_
791
792 sd = create_sigdecode()
793 print ("forms", sd.df.forms)
794 for f in sd.df.FormX:
795 print (f)
796
797 _compile = GardenSnakeCompiler().compile
798
799 tree = _compile(code, mode="single", filename="string")
800 import ast
801 tree = ast.fix_missing_locations(tree)
802 print ( ast.dump(tree) )
803
804 import astor
805 print ("astor dump")
806 print (astor.dump_tree(tree))
807 print ("to source")
808 source = astor.to_source(tree)
809 print (source)
810
811 #from compiler import parse
812 #tree = parse(code, "exec")
813
814 print (compiled_code)
815
816 exec (compiled_code, d)
817 print ("Done")
818
819 #print d
820 #print l