get if/else comparisons working with python3
[soc.git] / src / soc / decoder / power_pseudo.py
1 # Based on GardenSnake - a parser generator demonstration program
2 # GardenSnake was released into the Public Domain by Andrew Dalke.
3
4 # Portions of this work are derived from Python's Grammar definition
5 # and may be covered under the Python copyright and license
6 #
7 # Andrew Dalke / Dalke Scientific Software, LLC
8 # 30 August 2006 / Cape Town, South Africa
9
10 # Modifications for inclusion in PLY distribution
11 import sys
12 from pprint import pprint
13 from copy import copy
14 from ply import lex, yacc
15 import astor
16
17 ##### Lexer ######
18 #import lex
19 import decimal
20
21 tokens = (
22 'DEF',
23 'IF',
24 'ELSE',
25 'FOR',
26 'TO',
27 'THEN',
28 'WHILE',
29 'NAME',
30 'NUMBER', # Python decimals
31 'STRING', # single quoted strings only; syntax of raw strings
32 'LPAR',
33 'RPAR',
34 'LBRACK',
35 'RBRACK',
36 'COLON',
37 'EQ',
38 'ASSIGN',
39 'LT',
40 'GT',
41 'PLUS',
42 'MINUS',
43 'MULT',
44 'DIV',
45 'APPEND',
46 'RETURN',
47 'WS',
48 'NEWLINE',
49 'COMMA',
50 'SEMICOLON',
51 'INDENT',
52 'DEDENT',
53 'ENDMARKER',
54 )
55
56 #t_NUMBER = r'\d+'
57 # taken from decmial.py but without the leading sign
58 def t_NUMBER(t):
59 r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
60 t.value = int(t.value)
61 return t
62
63 def t_STRING(t):
64 r"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
65 t.value=t.value[1:-1].decode("string-escape") # .swapcase() # for fun
66 return t
67
68 t_COLON = r':'
69 t_EQ = r'=='
70 t_ASSIGN = r'<-'
71 t_LT = r'<'
72 t_GT = r'>'
73 t_PLUS = r'\+'
74 t_MINUS = r'-'
75 t_MULT = r'\*'
76 t_DIV = r'/'
77 t_COMMA = r','
78 t_SEMICOLON = r';'
79 t_APPEND = r'\|\|'
80
81 # Ply nicely documented how to do this.
82
83 RESERVED = {
84 "def": "DEF",
85 "if": "IF",
86 "else": "ELSE",
87 "for": "FOR",
88 "to": "TO",
89 "while": "WHILE",
90 "return": "RETURN",
91 }
92
93 def t_NAME(t):
94 r'[a-zA-Z_][a-zA-Z0-9_]*'
95 t.type = RESERVED.get(t.value, "NAME")
96 return t
97
98 # Putting this before t_WS let it consume lines with only comments in
99 # them so the latter code never sees the WS part. Not consuming the
100 # newline. Needed for "if 1: #comment"
101 def t_comment(t):
102 r"[ ]*\043[^\n]*" # \043 is '#'
103 pass
104
105
106 # Whitespace
107 def t_WS(t):
108 r'[ ]+'
109 if t.lexer.at_line_start and t.lexer.paren_count == 0 and \
110 t.lexer.brack_count == 0:
111 return t
112
113 # Don't generate newline tokens when inside of parenthesis, eg
114 # a = (1,
115 # 2, 3)
116 def t_newline(t):
117 r'\n+'
118 t.lexer.lineno += len(t.value)
119 t.type = "NEWLINE"
120 if t.lexer.paren_count == 0 and t.lexer.brack_count == 0:
121 return t
122
123 def t_LBRACK(t):
124 r'\['
125 t.lexer.brack_count += 1
126 return t
127
128 def t_RBRACK(t):
129 r'\]'
130 # check for underflow? should be the job of the parser
131 t.lexer.brack_count -= 1
132 return t
133
134 def t_LPAR(t):
135 r'\('
136 t.lexer.paren_count += 1
137 return t
138
139 def t_RPAR(t):
140 r'\)'
141 # check for underflow? should be the job of the parser
142 t.lexer.paren_count -= 1
143 return t
144
145 #t_ignore = " "
146
147 def t_error(t):
148 raise SyntaxError("Unknown symbol %r" % (t.value[0],))
149 print ("Skipping", repr(t.value[0]))
150 t.lexer.skip(1)
151
152 ## I implemented INDENT / DEDENT generation as a post-processing filter
153
154 # The original lex token stream contains WS and NEWLINE characters.
155 # WS will only occur before any other tokens on a line.
156
157 # I have three filters. One tags tokens by adding two attributes.
158 # "must_indent" is True if the token must be indented from the
159 # previous code. The other is "at_line_start" which is True for WS
160 # and the first non-WS/non-NEWLINE on a line. It flags the check so
161 # see if the new line has changed indication level.
162
163 # Python's syntax has three INDENT states
164 # 0) no colon hence no need to indent
165 # 1) "if 1: go()" - simple statements have a COLON but no need for an indent
166 # 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
167 NO_INDENT = 0
168 MAY_INDENT = 1
169 MUST_INDENT = 2
170
171 # only care about whitespace at the start of a line
172 def track_tokens_filter(lexer, tokens):
173 oldignore = lexer.lexignore
174 lexer.at_line_start = at_line_start = True
175 indent = NO_INDENT
176 saw_colon = False
177 for token in tokens:
178 #print ("token", token)
179 token.at_line_start = at_line_start
180
181 if token.type == "COLON":
182 at_line_start = False
183 indent = MAY_INDENT
184 token.must_indent = False
185
186 elif token.type == "NEWLINE":
187 at_line_start = True
188 if indent == MAY_INDENT:
189 indent = MUST_INDENT
190 token.must_indent = False
191
192 elif token.type == "WS":
193 assert token.at_line_start == True
194 at_line_start = True
195 token.must_indent = False
196
197 else:
198 # A real token; only indent after COLON NEWLINE
199 if indent == MUST_INDENT:
200 token.must_indent = True
201 else:
202 token.must_indent = False
203 at_line_start = False
204 indent = NO_INDENT
205
206 # really bad hack that changes ignore lexer state.
207 # when "must indent" is seen (basically "real tokens" seen)
208 # then ignore whitespace.
209 if token.must_indent:
210 lexer.lexignore = ('ignore', ' ')
211 else:
212 lexer.lexignore = oldignore
213
214 token.indent = indent
215 yield token
216 lexer.at_line_start = at_line_start
217
218 def _new_token(type, lineno):
219 tok = lex.LexToken()
220 tok.type = type
221 tok.value = None
222 tok.lineno = lineno
223 tok.lexpos = -1
224 return tok
225
226 # Synthesize a DEDENT tag
227 def DEDENT(lineno):
228 return _new_token("DEDENT", lineno)
229
230 # Synthesize an INDENT tag
231 def INDENT(lineno):
232 return _new_token("INDENT", lineno)
233
234
235 # Track the indentation level and emit the right INDENT / DEDENT events.
236 def indentation_filter(tokens):
237 # A stack of indentation levels; will never pop item 0
238 levels = [0]
239 token = None
240 depth = 0
241 prev_was_ws = False
242 for token in tokens:
243 if 1:
244 print ("Process", depth, token.indent, token,)
245 if token.at_line_start:
246 print ("at_line_start",)
247 if token.must_indent:
248 print ("must_indent",)
249 print
250
251 # WS only occurs at the start of the line
252 # There may be WS followed by NEWLINE so
253 # only track the depth here. Don't indent/dedent
254 # until there's something real.
255 if token.type == "WS":
256 assert depth == 0
257 depth = len(token.value)
258 prev_was_ws = True
259 # WS tokens are never passed to the parser
260 continue
261
262 if token.type == "NEWLINE":
263 depth = 0
264 if prev_was_ws or token.at_line_start:
265 # ignore blank lines
266 continue
267 # pass the other cases on through
268 yield token
269 continue
270
271 # then it must be a real token (not WS, not NEWLINE)
272 # which can affect the indentation level
273
274 prev_was_ws = False
275 if token.must_indent:
276 # The current depth must be larger than the previous level
277 if not (depth > levels[-1]):
278 raise IndentationError("expected an indented block")
279
280 levels.append(depth)
281 yield INDENT(token.lineno)
282
283 elif token.at_line_start:
284 # Must be on the same level or one of the previous levels
285 if depth == levels[-1]:
286 # At the same level
287 pass
288 elif depth > levels[-1]:
289 raise IndentationError("indentation increase but not in new block")
290 else:
291 # Back up; but only if it matches a previous level
292 try:
293 i = levels.index(depth)
294 except ValueError:
295 raise IndentationError("inconsistent indentation")
296 for _ in range(i+1, len(levels)):
297 yield DEDENT(token.lineno)
298 levels.pop()
299
300 yield token
301
302 ### Finished processing ###
303
304 # Must dedent any remaining levels
305 if len(levels) > 1:
306 assert token is not None
307 for _ in range(1, len(levels)):
308 yield DEDENT(token.lineno)
309
310
311 # The top-level filter adds an ENDMARKER, if requested.
312 # Python's grammar uses it.
313 def filter(lexer, add_endmarker = True):
314 token = None
315 tokens = iter(lexer.token, None)
316 tokens = track_tokens_filter(lexer, tokens)
317 for token in indentation_filter(tokens):
318 yield token
319
320 if add_endmarker:
321 lineno = 1
322 if token is not None:
323 lineno = token.lineno
324 yield _new_token("ENDMARKER", lineno)
325
326 # Combine Ply and my filters into a new lexer
327
328 class IndentLexer(object):
329 def __init__(self, debug=0, optimize=0, lextab='lextab', reflags=0):
330 self.lexer = lex.lex(debug=debug, optimize=optimize, lextab=lextab, reflags=reflags)
331 self.token_stream = None
332 def input(self, s, add_endmarker=True):
333 self.lexer.paren_count = 0
334 self.lexer.brack_count = 0
335 self.lexer.input(s)
336 self.token_stream = filter(self.lexer, add_endmarker)
337 def token(self):
338 try:
339 return next(self.token_stream)
340 except StopIteration:
341 return None
342
343 ########## Parser (tokens -> AST) ######
344
345 # also part of Ply
346 #import yacc
347
348 # I use the Python AST
349 #from compiler import ast
350 import ast
351
352 # Helper function
353 def Assign(left, right):
354 names = []
355 if isinstance(left, ast.Name):
356 # Single assignment on left
357 return ast.Assign([ast.Name(left.id, ast.Store())], right)
358 elif isinstance(left, ast.Tuple):
359 # List of things - make sure they are Name nodes
360 names = []
361 for child in left.getChildren():
362 if not isinstance(child, ast.Name):
363 raise SyntaxError("that assignment not supported")
364 names.append(child.name)
365 ass_list = [ast.AssName(name, 'OP_ASSIGN') for name in names]
366 return ast.Assign([ast.AssTuple(ass_list)], right)
367 else:
368 raise SyntaxError("Can't do that yet")
369
370
371 # The grammar comments come from Python's Grammar/Grammar file
372
373 ## NB: compound_stmt in single_input is followed by extra NEWLINE!
374 # file_input: (NEWLINE | stmt)* ENDMARKER
375 def p_file_input_end(p):
376 """file_input_end : file_input ENDMARKER"""
377 print ("end", p[1])
378 p[0] = p[1]
379
380 def p_file_input(p):
381 """file_input : file_input NEWLINE
382 | file_input stmt
383 | NEWLINE
384 | stmt"""
385 if isinstance(p[len(p)-1], str):
386 if len(p) == 3:
387 p[0] = p[1]
388 else:
389 p[0] = [] # p == 2 --> only a blank line
390 else:
391 if len(p) == 3:
392 p[0] = p[1] + p[2]
393 else:
394 p[0] = p[1]
395
396
397 # funcdef: [decorators] 'def' NAME parameters ':' suite
398 # ignoring decorators
399 def p_funcdef(p):
400 "funcdef : DEF NAME parameters COLON suite"
401 p[0] = ast.Function(None, p[2], list(p[3]), (), 0, None, p[5])
402
403 # parameters: '(' [varargslist] ')'
404 def p_parameters(p):
405 """parameters : LPAR RPAR
406 | LPAR varargslist RPAR"""
407 if len(p) == 3:
408 p[0] = []
409 else:
410 p[0] = p[2]
411
412
413 # varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) |
414 # highly simplified
415 def p_varargslist(p):
416 """varargslist : varargslist COMMA NAME
417 | NAME"""
418 if len(p) == 4:
419 p[0] = p[1] + p[3]
420 else:
421 p[0] = [p[1]]
422
423 # stmt: simple_stmt | compound_stmt
424 def p_stmt_simple(p):
425 """stmt : simple_stmt"""
426 # simple_stmt is a list
427 p[0] = p[1]
428
429 def p_stmt_compound(p):
430 """stmt : compound_stmt"""
431 p[0] = [p[1]]
432
433 # simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
434 def p_simple_stmt(p):
435 """simple_stmt : small_stmts NEWLINE
436 | small_stmts SEMICOLON NEWLINE"""
437 p[0] = p[1]
438
439 def p_small_stmts(p):
440 """small_stmts : small_stmts SEMICOLON small_stmt
441 | small_stmt"""
442 if len(p) == 4:
443 p[0] = p[1] + [p[3]]
444 else:
445 p[0] = [p[1]]
446
447 # small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
448 # import_stmt | global_stmt | exec_stmt | assert_stmt
449 def p_small_stmt(p):
450 """small_stmt : flow_stmt
451 | expr_stmt"""
452 p[0] = p[1]
453
454 # expr_stmt: testlist (augassign (yield_expr|testlist) |
455 # ('=' (yield_expr|testlist))*)
456 # augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
457 # '<<=' | '>>=' | '**=' | '//=')
458 def p_expr_stmt(p):
459 """expr_stmt : testlist ASSIGN testlist
460 | testlist """
461 if len(p) == 2:
462 # a list of expressions
463 #p[0] = ast.Discard(p[1])
464 p[0] = p[1]
465 else:
466 p[0] = Assign(p[1], p[3])
467
468 def p_flow_stmt(p):
469 "flow_stmt : return_stmt"
470 p[0] = p[1]
471
472 # return_stmt: 'return' [testlist]
473 def p_return_stmt(p):
474 "return_stmt : RETURN testlist"
475 p[0] = ast.Return(p[2])
476
477
478 def p_compound_stmt(p):
479 """compound_stmt : if_stmt
480 | while_stmt
481 | funcdef
482 """
483 p[0] = p[1]
484
485 def p_while_stmt(p):
486 """while_stmt : WHILE test COLON suite ELSE COLON suite
487 | WHILE test COLON suite
488 """
489 if len(p) == 5:
490 p[0] = ast.While(p[2], p[4], [])
491 else:
492 p[0] = ast.While(p[2], p[4], p[7])
493
494 def p_if_stmt(p):
495 """if_stmt : IF test COLON suite ELSE COLON suite
496 | IF test COLON suite
497 """
498 if len(p) == 5:
499 p[0] = ast.If(p[2], p[4], [])
500 else:
501 p[0] = ast.If(p[2], p[4], p[7])
502
503 def p_suite(p):
504 """suite : simple_stmt
505 | NEWLINE INDENT stmts DEDENT"""
506 if len(p) == 2:
507 p[0] = p[1]
508 else:
509 p[0] = p[3]
510
511
512 def p_stmts(p):
513 """stmts : stmts stmt
514 | stmt"""
515 if len(p) == 3:
516 p[0] = p[1] + p[2]
517 else:
518 p[0] = p[1]
519
520 ## No using Python's approach because Ply supports precedence
521
522 # comparison: expr (comp_op expr)*
523 # arith_expr: term (('+'|'-') term)*
524 # term: factor (('*'|'/'|'%'|'//') factor)*
525 # factor: ('+'|'-'|'~') factor | power
526 # comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
527
528 def make_lt_compare(arg):
529 (left, right) = arg
530 return ast.Compare(left, [ast.Lt()], [right])
531 def make_gt_compare(arg):
532 (left, right) = arg
533 return ast.Compare(left, [ast.Gt()], [right])
534 def make_eq_compare(arg):
535 (left, right) = arg
536 return ast.Compare(left, [ast.Eq()], [right])
537
538
539 binary_ops = {
540 "+": ast.Add(),
541 "-": ast.Sub(),
542 "*": ast.Mult(),
543 "/": ast.Div(),
544 "<": make_lt_compare,
545 ">": make_gt_compare,
546 "==": make_eq_compare,
547 }
548 unary_ops = {
549 "+": ast.Add,
550 "-": ast.Sub,
551 }
552 precedence = (
553 ("left", "EQ", "GT", "LT"),
554 ("left", "PLUS", "MINUS"),
555 ("left", "MULT", "DIV"),
556 )
557
558 def check_concat(node): # checks if the comparison is already a concat
559 print (node)
560 if not isinstance(node, ast.Call):
561 return [node]
562 if node[0].id != 'concat':
563 return node
564 return node[1]
565
566 def p_comparison(p):
567 """comparison : comparison PLUS comparison
568 | comparison MINUS comparison
569 | comparison MULT comparison
570 | comparison DIV comparison
571 | comparison LT comparison
572 | comparison EQ comparison
573 | comparison GT comparison
574 | PLUS comparison
575 | MINUS comparison
576 | comparison APPEND comparison
577 | power"""
578 if len(p) == 4:
579 print (list(p))
580 if p[2] == '||':
581 l = check_concat(p[1]) + check_concat(p[3])
582 p[0] = ast.Call(ast.Name("concat"), l, [])
583 elif p[2] in ['<', '>', '==']:
584 p[0] = binary_ops[p[2]]((p[1],p[3]))
585 else:
586 p[0] = ast.BinOp(p[1], binary_ops[p[2]], p[3])
587 elif len(p) == 3:
588 p[0] = unary_ops[p[1]](p[2])
589 else:
590 p[0] = p[1]
591
592 # power: atom trailer* ['**' factor]
593 # trailers enables function calls (and subscripts).
594 # I only allow one level of calls
595 # so this is 'trailer'
596 def p_power(p):
597 """power : atom
598 | atom trailer"""
599 if len(p) == 2:
600 p[0] = p[1]
601 else:
602 if p[2][0] == "CALL":
603 p[0] = ast.Expr(ast.Call(p[1], p[2][1], []))
604 #if p[1].id == 'print':
605 # p[0] = ast.Printnl(ast.Tuple(p[2][1]), None, None)
606 #else:
607 # p[0] = ast.CallFunc(p[1], p[2][1], None, None)
608 else:
609 print (p[2][1])
610 #raise AssertionError("not implemented %s" % p[2][0])
611 subs = p[2][1]
612 if len(subs) == 1:
613 idx = subs[0]
614 else:
615 idx = ast.Slice(subs[0], subs[1], None)
616 p[0] = ast.Subscript(p[1], idx)
617
618 def p_atom_name(p):
619 """atom : NAME"""
620 p[0] = ast.Name(p[1], ctx=ast.Load())
621
622 def p_atom_number(p):
623 """atom : NUMBER
624 | STRING"""
625 p[0] = ast.Constant(p[1])
626
627 #'[' [listmaker] ']' |
628
629 def p_atom_listmaker(p):
630 """atom : LBRACK listmaker RBRACK"""
631 p[0] = p[2]
632
633 def p_listmaker(p):
634 """listmaker : test COMMA listmaker
635 | test
636 """
637 if len(p) == 2:
638 p[0] = ast.List([p[1]])
639 else:
640 p[0] = ast.List([p[1]] + p[3].nodes)
641
642 def p_atom_tuple(p):
643 """atom : LPAR testlist RPAR"""
644 p[0] = p[2]
645
646 # trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
647 def p_trailer(p):
648 """trailer : trailer_arglist
649 | trailer_subscript
650 """
651 p[0] = p[1]
652
653 def p_trailer_arglist(p):
654 "trailer_arglist : LPAR arglist RPAR"
655 p[0] = ("CALL", p[2])
656
657 def p_trailer_subscript(p):
658 "trailer_subscript : LBRACK subscript RBRACK"
659 p[0] = ("SUBS", p[2])
660
661 #subscript: '.' '.' '.' | test | [test] ':' [test]
662
663 def p_subscript(p):
664 """subscript : test COLON test
665 | test
666 """
667 if len(p) == 4:
668 p[0] = [p[1], p[3]]
669 else:
670 p[0] = [p[1]]
671
672
673 # testlist: test (',' test)* [',']
674 # Contains shift/reduce error
675 def p_testlist(p):
676 """testlist : testlist_multi COMMA
677 | testlist_multi """
678 if len(p) == 2:
679 p[0] = p[1]
680 else:
681 # May need to promote singleton to tuple
682 if isinstance(p[1], list):
683 p[0] = p[1]
684 else:
685 p[0] = [p[1]]
686 # Convert into a tuple?
687 if isinstance(p[0], list):
688 p[0] = ast.Tuple(p[0])
689
690 def p_testlist_multi(p):
691 """testlist_multi : testlist_multi COMMA test
692 | test"""
693 if len(p) == 2:
694 # singleton
695 p[0] = p[1]
696 else:
697 if isinstance(p[1], list):
698 p[0] = p[1] + [p[3]]
699 else:
700 # singleton -> tuple
701 p[0] = [p[1], p[3]]
702
703
704 # test: or_test ['if' or_test 'else' test] | lambdef
705 # as I don't support 'and', 'or', and 'not' this works down to 'comparison'
706 def p_test(p):
707 "test : comparison"
708 p[0] = p[1]
709
710
711
712 # arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
713 # XXX INCOMPLETE: this doesn't allow the trailing comma
714 def p_arglist(p):
715 """arglist : arglist COMMA argument
716 | argument"""
717 if len(p) == 4:
718 p[0] = p[1] + [p[3]]
719 else:
720 p[0] = [p[1]]
721
722 # argument: test [gen_for] | test '=' test # Really [keyword '='] test
723 def p_argument(p):
724 "argument : test"
725 p[0] = p[1]
726
727 def p_error(p):
728 #print "Error!", repr(p)
729 raise SyntaxError(p)
730
731
732 class GardenSnakeParser(object):
733 def __init__(self, lexer = None):
734 if lexer is None:
735 lexer = IndentLexer(debug=1)
736 self.lexer = lexer
737 self.parser = yacc.yacc(start="file_input_end",
738 debug=False, write_tables=False)
739
740 def parse(self, code):
741 self.lexer.input(code)
742 result = self.parser.parse(lexer = self.lexer, debug=False)
743 return ast.Module(result)
744
745
746 ###### Code generation ######
747
748 #from compiler import misc, syntax, pycodegen
749
750 class GardenSnakeCompiler(object):
751 def __init__(self):
752 self.parser = GardenSnakeParser()
753 def compile(self, code, mode="exec", filename="<string>"):
754 tree = self.parser.parse(code)
755 print ("snake")
756 pprint(tree)
757 return tree
758 #misc.set_filename(filename, tree)
759 return compile(tree, mode="exec", filename="<string>")
760 #syntax.check(tree)
761 gen = pycodegen.ModuleCodeGenerator(tree)
762 code = gen.getCode()
763 return code
764
765 ####### Test code #######
766
767 from soc.decoder.power_fieldsn import create_sigdecode
768
769 bpermd = r"""
770 for i = 0 to 7
771 index <- (RS)[8*i:8*i+7]
772 if index < 64 then
773 permi <- (RB)[index]
774 else
775 permi <- 0
776 RA <- [0]*56|| perm[0:7]
777 """
778
779 bpermd = r"""
780 perm <- [0] * 8
781 if index < 64:
782 index <- (RS)[8*i:8*i+7]
783 RA <- [0]*56 || perm[0:7]
784 print (RA)
785 """
786
787 code = bpermd
788
789 lexer = IndentLexer(debug=1)
790 # Give the lexer some input
791 print ("code")
792 print (code)
793 lexer.input(code)
794
795 # Tokenize
796 while True:
797 tok = lexer.token()
798 if not tok:
799 break # No more input
800 print(tok)
801
802 #sys.exit(0)
803
804 # Set up the GardenSnake run-time environment
805 def print_(*args):
806 print ("args", args)
807 print ("-->", " ".join(map(str,args)))
808
809 #d = copy(globals())
810 d = {}
811 d["print"] = print_
812
813 sd = create_sigdecode()
814 print ("forms", sd.df.forms)
815 for f in sd.df.FormX:
816 print (f)
817
818 _compile = GardenSnakeCompiler().compile
819
820 tree = _compile(code, mode="single", filename="string")
821 import ast
822 tree = ast.fix_missing_locations(tree)
823 print ( ast.dump(tree) )
824
825 print ("astor dump")
826 print (astor.dump_tree(tree))
827 print ("to source")
828 source = astor.to_source(tree)
829 print (source)
830
831 #from compiler import parse
832 #tree = parse(code, "exec")
833
834 print (compiled_code)
835
836 exec (compiled_code, d)
837 print ("Done")
838
839 #print d
840 #print l