add token-preprocessing step to make code look more like python
[soc.git] / src / soc / decoder / power_pseudo.py
1 # Based on GardenSnake - a parser generator demonstration program
2 # GardenSnake was released into the Public Domain by Andrew Dalke.
3
4 # Portions of this work are derived from Python's Grammar definition
5 # and may be covered under the Python copyright and license
6 #
7 # Andrew Dalke / Dalke Scientific Software, LLC
8 # 30 August 2006 / Cape Town, South Africa
9
10 # Modifications for inclusion in PLY distribution
11 import sys
12 from pprint import pprint
13 from copy import copy
14 from ply import lex, yacc
15 import astor
16
17 ##### Lexer ######
18 #import lex
19 import decimal
20
21 tokens = (
22 'DEF',
23 'IF',
24 'THEN',
25 'ELSE',
26 'FOR',
27 'TO',
28 'DO',
29 'WHILE',
30 'NAME',
31 'NUMBER', # Python decimals
32 'STRING', # single quoted strings only; syntax of raw strings
33 'LPAR',
34 'RPAR',
35 'LBRACK',
36 'RBRACK',
37 'COLON',
38 'EQ',
39 'ASSIGN',
40 'LT',
41 'GT',
42 'PLUS',
43 'MINUS',
44 'MULT',
45 'DIV',
46 'APPEND',
47 'RETURN',
48 'WS',
49 'NEWLINE',
50 'COMMA',
51 'SEMICOLON',
52 'INDENT',
53 'DEDENT',
54 'ENDMARKER',
55 )
56
57 #t_NUMBER = r'\d+'
58 # taken from decmial.py but without the leading sign
59 def t_NUMBER(t):
60 r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
61 t.value = int(t.value)
62 return t
63
64 def t_STRING(t):
65 r"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
66 t.value=t.value[1:-1].decode("string-escape") # .swapcase() # for fun
67 return t
68
69 t_COLON = r':'
70 t_EQ = r'=='
71 t_ASSIGN = r'<-'
72 t_LT = r'<'
73 t_GT = r'>'
74 t_PLUS = r'\+'
75 t_MINUS = r'-'
76 t_MULT = r'\*'
77 t_DIV = r'/'
78 t_COMMA = r','
79 t_SEMICOLON = r';'
80 t_APPEND = r'\|\|'
81
82 # Ply nicely documented how to do this.
83
84 RESERVED = {
85 "def": "DEF",
86 "if": "IF",
87 "then": "THEN",
88 "else": "ELSE",
89 "for": "FOR",
90 "to": "TO",
91 "while": "WHILE",
92 "do": "do",
93 "return": "RETURN",
94 }
95
96 def t_NAME(t):
97 r'[a-zA-Z_][a-zA-Z0-9_]*'
98 t.type = RESERVED.get(t.value, "NAME")
99 return t
100
101 # Putting this before t_WS let it consume lines with only comments in
102 # them so the latter code never sees the WS part. Not consuming the
103 # newline. Needed for "if 1: #comment"
104 def t_comment(t):
105 r"[ ]*\043[^\n]*" # \043 is '#'
106 pass
107
108
109 # Whitespace
110 def t_WS(t):
111 r'[ ]+'
112 if t.lexer.at_line_start and t.lexer.paren_count == 0 and \
113 t.lexer.brack_count == 0:
114 return t
115
116 # Don't generate newline tokens when inside of parenthesis, eg
117 # a = (1,
118 # 2, 3)
119 def t_newline(t):
120 r'\n+'
121 t.lexer.lineno += len(t.value)
122 t.type = "NEWLINE"
123 if t.lexer.paren_count == 0 and t.lexer.brack_count == 0:
124 return t
125
126 def t_LBRACK(t):
127 r'\['
128 t.lexer.brack_count += 1
129 return t
130
131 def t_RBRACK(t):
132 r'\]'
133 # check for underflow? should be the job of the parser
134 t.lexer.brack_count -= 1
135 return t
136
137 def t_LPAR(t):
138 r'\('
139 t.lexer.paren_count += 1
140 return t
141
142 def t_RPAR(t):
143 r'\)'
144 # check for underflow? should be the job of the parser
145 t.lexer.paren_count -= 1
146 return t
147
148 #t_ignore = " "
149
150 def t_error(t):
151 raise SyntaxError("Unknown symbol %r" % (t.value[0],))
152 print ("Skipping", repr(t.value[0]))
153 t.lexer.skip(1)
154
155 ## I implemented INDENT / DEDENT generation as a post-processing filter
156
157 # The original lex token stream contains WS and NEWLINE characters.
158 # WS will only occur before any other tokens on a line.
159
160 # I have three filters. One tags tokens by adding two attributes.
161 # "must_indent" is True if the token must be indented from the
162 # previous code. The other is "at_line_start" which is True for WS
163 # and the first non-WS/non-NEWLINE on a line. It flags the check so
164 # see if the new line has changed indication level.
165
166 # Python's syntax has three INDENT states
167 # 0) no colon hence no need to indent
168 # 1) "if 1: go()" - simple statements have a COLON but no need for an indent
169 # 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
170 NO_INDENT = 0
171 MAY_INDENT = 1
172 MUST_INDENT = 2
173
174 # turn into python-like colon syntax from pseudo-code syntax
175 def python_colonify(lexer, tokens):
176
177 while_seen = False
178 for token in tokens:
179 print ("track colon token", token, token.type)
180
181 if token.type == 'DO':
182 continue # skip. do while is redundant
183 elif token.type == 'THEN':
184 # turn then into colon
185 token.type = "COLON"
186 yield token
187 elif token.type == 'ELSE':
188 yield token
189 token = copy(token)
190 token.type = "COLON"
191 yield token
192 elif token.type == 'WHILE':
193 while_seen = True
194 yield token
195 elif token.type == 'NEWLINE':
196 if while_seen:
197 ctok = copy(token)
198 ctok.type = "COLON"
199 yield ctok
200 while_seen = False
201 yield token
202 else:
203 yield token
204
205
206 # only care about whitespace at the start of a line
207 def track_tokens_filter(lexer, tokens):
208 oldignore = lexer.lexignore
209 lexer.at_line_start = at_line_start = True
210 indent = NO_INDENT
211 saw_colon = False
212 for token in tokens:
213 print ("track token", token, token.type)
214 token.at_line_start = at_line_start
215
216 if token.type == "COLON":
217 at_line_start = False
218 indent = MAY_INDENT
219 token.must_indent = False
220
221 elif token.type == "NEWLINE":
222 at_line_start = True
223 if indent == MAY_INDENT:
224 indent = MUST_INDENT
225 token.must_indent = False
226
227 elif token.type == "WS":
228 assert token.at_line_start == True
229 at_line_start = True
230 token.must_indent = False
231
232 else:
233 # A real token; only indent after COLON NEWLINE
234 if indent == MUST_INDENT:
235 token.must_indent = True
236 else:
237 token.must_indent = False
238 at_line_start = False
239 indent = NO_INDENT
240
241 # really bad hack that changes ignore lexer state.
242 # when "must indent" is seen (basically "real tokens" seen)
243 # then ignore whitespace.
244 if token.must_indent:
245 lexer.lexignore = ('ignore', ' ')
246 else:
247 lexer.lexignore = oldignore
248
249 token.indent = indent
250 yield token
251 lexer.at_line_start = at_line_start
252
253 def _new_token(type, lineno):
254 tok = lex.LexToken()
255 tok.type = type
256 tok.value = None
257 tok.lineno = lineno
258 tok.lexpos = -1
259 return tok
260
261 # Synthesize a DEDENT tag
262 def DEDENT(lineno):
263 return _new_token("DEDENT", lineno)
264
265 # Synthesize an INDENT tag
266 def INDENT(lineno):
267 return _new_token("INDENT", lineno)
268
269
270 # Track the indentation level and emit the right INDENT / DEDENT events.
271 def indentation_filter(tokens):
272 # A stack of indentation levels; will never pop item 0
273 levels = [0]
274 token = None
275 depth = 0
276 prev_was_ws = False
277 for token in tokens:
278 if 1:
279 print ("Process", depth, token.indent, token,)
280 if token.at_line_start:
281 print ("at_line_start",)
282 if token.must_indent:
283 print ("must_indent",)
284 print
285
286 # WS only occurs at the start of the line
287 # There may be WS followed by NEWLINE so
288 # only track the depth here. Don't indent/dedent
289 # until there's something real.
290 if token.type == "WS":
291 assert depth == 0
292 depth = len(token.value)
293 prev_was_ws = True
294 # WS tokens are never passed to the parser
295 continue
296
297 if token.type == "NEWLINE":
298 depth = 0
299 if prev_was_ws or token.at_line_start:
300 # ignore blank lines
301 continue
302 # pass the other cases on through
303 yield token
304 continue
305
306 # then it must be a real token (not WS, not NEWLINE)
307 # which can affect the indentation level
308
309 prev_was_ws = False
310 if token.must_indent:
311 # The current depth must be larger than the previous level
312 if not (depth > levels[-1]):
313 raise IndentationError("expected an indented block")
314
315 levels.append(depth)
316 yield INDENT(token.lineno)
317
318 elif token.at_line_start:
319 # Must be on the same level or one of the previous levels
320 if depth == levels[-1]:
321 # At the same level
322 pass
323 elif depth > levels[-1]:
324 raise IndentationError("indentation increase but not in new block")
325 else:
326 # Back up; but only if it matches a previous level
327 try:
328 i = levels.index(depth)
329 except ValueError:
330 raise IndentationError("inconsistent indentation")
331 for _ in range(i+1, len(levels)):
332 yield DEDENT(token.lineno)
333 levels.pop()
334
335 yield token
336
337 ### Finished processing ###
338
339 # Must dedent any remaining levels
340 if len(levels) > 1:
341 assert token is not None
342 for _ in range(1, len(levels)):
343 yield DEDENT(token.lineno)
344
345
346 # The top-level filter adds an ENDMARKER, if requested.
347 # Python's grammar uses it.
348 def filter(lexer, add_endmarker = True):
349 token = None
350 tokens = iter(lexer.token, None)
351 tokens = python_colonify(lexer, tokens)
352 tokens = track_tokens_filter(lexer, tokens)
353 for token in indentation_filter(tokens):
354 yield token
355
356 if add_endmarker:
357 lineno = 1
358 if token is not None:
359 lineno = token.lineno
360 yield _new_token("ENDMARKER", lineno)
361
362 # Combine Ply and my filters into a new lexer
363
364 class IndentLexer(object):
365 def __init__(self, debug=0, optimize=0, lextab='lextab', reflags=0):
366 self.lexer = lex.lex(debug=debug, optimize=optimize, lextab=lextab, reflags=reflags)
367 self.token_stream = None
368 def input(self, s, add_endmarker=True):
369 self.lexer.paren_count = 0
370 self.lexer.brack_count = 0
371 self.lexer.input(s)
372 self.token_stream = filter(self.lexer, add_endmarker)
373 def token(self):
374 try:
375 return next(self.token_stream)
376 except StopIteration:
377 return None
378
379 ########## Parser (tokens -> AST) ######
380
381 # also part of Ply
382 #import yacc
383
384 # I use the Python AST
385 #from compiler import ast
386 import ast
387
388 # Helper function
389 def Assign(left, right):
390 names = []
391 if isinstance(left, ast.Name):
392 # Single assignment on left
393 return ast.Assign([ast.Name(left.id, ast.Store())], right)
394 elif isinstance(left, ast.Tuple):
395 # List of things - make sure they are Name nodes
396 names = []
397 for child in left.getChildren():
398 if not isinstance(child, ast.Name):
399 raise SyntaxError("that assignment not supported")
400 names.append(child.name)
401 ass_list = [ast.AssName(name, 'OP_ASSIGN') for name in names]
402 return ast.Assign([ast.AssTuple(ass_list)], right)
403 else:
404 raise SyntaxError("Can't do that yet")
405
406
407 # The grammar comments come from Python's Grammar/Grammar file
408
409 ## NB: compound_stmt in single_input is followed by extra NEWLINE!
410 # file_input: (NEWLINE | stmt)* ENDMARKER
411 def p_file_input_end(p):
412 """file_input_end : file_input ENDMARKER"""
413 print ("end", p[1])
414 p[0] = p[1]
415
416 def p_file_input(p):
417 """file_input : file_input NEWLINE
418 | file_input stmt
419 | NEWLINE
420 | stmt"""
421 if isinstance(p[len(p)-1], str):
422 if len(p) == 3:
423 p[0] = p[1]
424 else:
425 p[0] = [] # p == 2 --> only a blank line
426 else:
427 if len(p) == 3:
428 p[0] = p[1] + p[2]
429 else:
430 p[0] = p[1]
431
432
433 # funcdef: [decorators] 'def' NAME parameters ':' suite
434 # ignoring decorators
435 def p_funcdef(p):
436 "funcdef : DEF NAME parameters COLON suite"
437 p[0] = ast.Function(None, p[2], list(p[3]), (), 0, None, p[5])
438
439 # parameters: '(' [varargslist] ')'
440 def p_parameters(p):
441 """parameters : LPAR RPAR
442 | LPAR varargslist RPAR"""
443 if len(p) == 3:
444 p[0] = []
445 else:
446 p[0] = p[2]
447
448
449 # varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) |
450 # highly simplified
451 def p_varargslist(p):
452 """varargslist : varargslist COMMA NAME
453 | NAME"""
454 if len(p) == 4:
455 p[0] = p[1] + p[3]
456 else:
457 p[0] = [p[1]]
458
459 # stmt: simple_stmt | compound_stmt
460 def p_stmt_simple(p):
461 """stmt : simple_stmt"""
462 # simple_stmt is a list
463 p[0] = p[1]
464
465 def p_stmt_compound(p):
466 """stmt : compound_stmt"""
467 p[0] = [p[1]]
468
469 # simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
470 def p_simple_stmt(p):
471 """simple_stmt : small_stmts NEWLINE
472 | small_stmts SEMICOLON NEWLINE"""
473 p[0] = p[1]
474
475 def p_small_stmts(p):
476 """small_stmts : small_stmts SEMICOLON small_stmt
477 | small_stmt"""
478 if len(p) == 4:
479 p[0] = p[1] + [p[3]]
480 else:
481 p[0] = [p[1]]
482
483 # small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
484 # import_stmt | global_stmt | exec_stmt | assert_stmt
485 def p_small_stmt(p):
486 """small_stmt : flow_stmt
487 | expr_stmt"""
488 p[0] = p[1]
489
490 # expr_stmt: testlist (augassign (yield_expr|testlist) |
491 # ('=' (yield_expr|testlist))*)
492 # augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
493 # '<<=' | '>>=' | '**=' | '//=')
494 def p_expr_stmt(p):
495 """expr_stmt : testlist ASSIGN testlist
496 | testlist """
497 if len(p) == 2:
498 # a list of expressions
499 #p[0] = ast.Discard(p[1])
500 p[0] = p[1]
501 else:
502 p[0] = Assign(p[1], p[3])
503
504 def p_flow_stmt(p):
505 "flow_stmt : return_stmt"
506 p[0] = p[1]
507
508 # return_stmt: 'return' [testlist]
509 def p_return_stmt(p):
510 "return_stmt : RETURN testlist"
511 p[0] = ast.Return(p[2])
512
513
514 def p_compound_stmt(p):
515 """compound_stmt : if_stmt
516 | while_stmt
517 | funcdef
518 """
519 p[0] = p[1]
520
521 def p_while_stmt(p):
522 """while_stmt : WHILE test COLON suite ELSE COLON suite
523 | WHILE test COLON suite
524 """
525 if len(p) == 5:
526 p[0] = ast.While(p[2], p[4], [])
527 else:
528 p[0] = ast.While(p[2], p[4], p[7])
529
530 def p_if_stmt(p):
531 """if_stmt : IF test COLON suite ELSE COLON suite
532 | IF test COLON suite
533 """
534 if len(p) == 5:
535 p[0] = ast.If(p[2], p[4], [])
536 else:
537 p[0] = ast.If(p[2], p[4], p[7])
538
539 def p_suite(p):
540 """suite : simple_stmt
541 | NEWLINE INDENT stmts DEDENT"""
542 if len(p) == 2:
543 p[0] = p[1]
544 else:
545 p[0] = p[3]
546
547
548 def p_stmts(p):
549 """stmts : stmts stmt
550 | stmt"""
551 if len(p) == 3:
552 p[0] = p[1] + p[2]
553 else:
554 p[0] = p[1]
555
556 ## No using Python's approach because Ply supports precedence
557
558 # comparison: expr (comp_op expr)*
559 # arith_expr: term (('+'|'-') term)*
560 # term: factor (('*'|'/'|'%'|'//') factor)*
561 # factor: ('+'|'-'|'~') factor | power
562 # comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
563
564 def make_lt_compare(arg):
565 (left, right) = arg
566 return ast.Compare(left, [ast.Lt()], [right])
567 def make_gt_compare(arg):
568 (left, right) = arg
569 return ast.Compare(left, [ast.Gt()], [right])
570 def make_eq_compare(arg):
571 (left, right) = arg
572 return ast.Compare(left, [ast.Eq()], [right])
573
574
575 binary_ops = {
576 "+": ast.Add(),
577 "-": ast.Sub(),
578 "*": ast.Mult(),
579 "/": ast.Div(),
580 "<": make_lt_compare,
581 ">": make_gt_compare,
582 "==": make_eq_compare,
583 }
584 unary_ops = {
585 "+": ast.Add,
586 "-": ast.Sub,
587 }
588 precedence = (
589 ("left", "EQ", "GT", "LT"),
590 ("left", "PLUS", "MINUS"),
591 ("left", "MULT", "DIV"),
592 )
593
594 def check_concat(node): # checks if the comparison is already a concat
595 print (node)
596 if not isinstance(node, ast.Call):
597 return [node]
598 if node[0].id != 'concat':
599 return node
600 return node[1]
601
602 def p_comparison(p):
603 """comparison : comparison PLUS comparison
604 | comparison MINUS comparison
605 | comparison MULT comparison
606 | comparison DIV comparison
607 | comparison LT comparison
608 | comparison EQ comparison
609 | comparison GT comparison
610 | PLUS comparison
611 | MINUS comparison
612 | comparison APPEND comparison
613 | power"""
614 if len(p) == 4:
615 print (list(p))
616 if p[2] == '||':
617 l = check_concat(p[1]) + check_concat(p[3])
618 p[0] = ast.Call(ast.Name("concat"), l, [])
619 elif p[2] in ['<', '>', '==']:
620 p[0] = binary_ops[p[2]]((p[1],p[3]))
621 else:
622 p[0] = ast.BinOp(p[1], binary_ops[p[2]], p[3])
623 elif len(p) == 3:
624 p[0] = unary_ops[p[1]](p[2])
625 else:
626 p[0] = p[1]
627
628 # power: atom trailer* ['**' factor]
629 # trailers enables function calls (and subscripts).
630 # I only allow one level of calls
631 # so this is 'trailer'
632 def p_power(p):
633 """power : atom
634 | atom trailer"""
635 if len(p) == 2:
636 p[0] = p[1]
637 else:
638 if p[2][0] == "CALL":
639 p[0] = ast.Expr(ast.Call(p[1], p[2][1], []))
640 #if p[1].id == 'print':
641 # p[0] = ast.Printnl(ast.Tuple(p[2][1]), None, None)
642 #else:
643 # p[0] = ast.CallFunc(p[1], p[2][1], None, None)
644 else:
645 print (p[2][1])
646 #raise AssertionError("not implemented %s" % p[2][0])
647 subs = p[2][1]
648 if len(subs) == 1:
649 idx = subs[0]
650 else:
651 idx = ast.Slice(subs[0], subs[1], None)
652 p[0] = ast.Subscript(p[1], idx)
653
654 def p_atom_name(p):
655 """atom : NAME"""
656 p[0] = ast.Name(p[1], ctx=ast.Load())
657
658 def p_atom_number(p):
659 """atom : NUMBER
660 | STRING"""
661 p[0] = ast.Constant(p[1])
662
663 #'[' [listmaker] ']' |
664
665 def p_atom_listmaker(p):
666 """atom : LBRACK listmaker RBRACK"""
667 p[0] = p[2]
668
669 def p_listmaker(p):
670 """listmaker : test COMMA listmaker
671 | test
672 """
673 if len(p) == 2:
674 p[0] = ast.List([p[1]])
675 else:
676 p[0] = ast.List([p[1]] + p[3].nodes)
677
678 def p_atom_tuple(p):
679 """atom : LPAR testlist RPAR"""
680 p[0] = p[2]
681
682 # trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
683 def p_trailer(p):
684 """trailer : trailer_arglist
685 | trailer_subscript
686 """
687 p[0] = p[1]
688
689 def p_trailer_arglist(p):
690 "trailer_arglist : LPAR arglist RPAR"
691 p[0] = ("CALL", p[2])
692
693 def p_trailer_subscript(p):
694 "trailer_subscript : LBRACK subscript RBRACK"
695 p[0] = ("SUBS", p[2])
696
697 #subscript: '.' '.' '.' | test | [test] ':' [test]
698
699 def p_subscript(p):
700 """subscript : test COLON test
701 | test
702 """
703 if len(p) == 4:
704 p[0] = [p[1], p[3]]
705 else:
706 p[0] = [p[1]]
707
708
709 # testlist: test (',' test)* [',']
710 # Contains shift/reduce error
711 def p_testlist(p):
712 """testlist : testlist_multi COMMA
713 | testlist_multi """
714 if len(p) == 2:
715 p[0] = p[1]
716 else:
717 # May need to promote singleton to tuple
718 if isinstance(p[1], list):
719 p[0] = p[1]
720 else:
721 p[0] = [p[1]]
722 # Convert into a tuple?
723 if isinstance(p[0], list):
724 p[0] = ast.Tuple(p[0])
725
726 def p_testlist_multi(p):
727 """testlist_multi : testlist_multi COMMA test
728 | test"""
729 if len(p) == 2:
730 # singleton
731 p[0] = p[1]
732 else:
733 if isinstance(p[1], list):
734 p[0] = p[1] + [p[3]]
735 else:
736 # singleton -> tuple
737 p[0] = [p[1], p[3]]
738
739
740 # test: or_test ['if' or_test 'else' test] | lambdef
741 # as I don't support 'and', 'or', and 'not' this works down to 'comparison'
742 def p_test(p):
743 "test : comparison"
744 p[0] = p[1]
745
746
747
748 # arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
749 # XXX INCOMPLETE: this doesn't allow the trailing comma
750 def p_arglist(p):
751 """arglist : arglist COMMA argument
752 | argument"""
753 if len(p) == 4:
754 p[0] = p[1] + [p[3]]
755 else:
756 p[0] = [p[1]]
757
758 # argument: test [gen_for] | test '=' test # Really [keyword '='] test
759 def p_argument(p):
760 "argument : test"
761 p[0] = p[1]
762
763 def p_error(p):
764 #print "Error!", repr(p)
765 raise SyntaxError(p)
766
767
768 class GardenSnakeParser(object):
769 def __init__(self, lexer = None):
770 if lexer is None:
771 lexer = IndentLexer(debug=1)
772 self.lexer = lexer
773 self.parser = yacc.yacc(start="file_input_end",
774 debug=False, write_tables=False)
775
776 def parse(self, code):
777 self.lexer.input(code)
778 result = self.parser.parse(lexer = self.lexer, debug=False)
779 return ast.Module(result)
780
781
782 ###### Code generation ######
783
784 #from compiler import misc, syntax, pycodegen
785
786 class GardenSnakeCompiler(object):
787 def __init__(self):
788 self.parser = GardenSnakeParser()
789 def compile(self, code, mode="exec", filename="<string>"):
790 tree = self.parser.parse(code)
791 print ("snake")
792 pprint(tree)
793 return tree
794 #misc.set_filename(filename, tree)
795 return compile(tree, mode="exec", filename="<string>")
796 #syntax.check(tree)
797 gen = pycodegen.ModuleCodeGenerator(tree)
798 code = gen.getCode()
799 return code
800
801 ####### Test code #######
802
803 from soc.decoder.power_fieldsn import create_sigdecode
804
805 bpermd = r"""
806 for i = 0 to 7
807 index <- (RS)[8*i:8*i+7]
808 if index < 64 then
809 permi <- (RB)[index]
810 else
811 permi <- 0
812 RA <- [0]*56|| perm[0:7]
813 """
814
815 bpermd = r"""
816 perm <- [0] * 8
817 if index < 64:
818 index <- (RS)[8*i:8*i+7]
819 RA <- [0]*56 || perm[0:7]
820 print (RA)
821 """
822
823 bpermd = r"""
824 if index < 64 then index <- 0
825 else index <- 5
826 while index
827 index <- 0
828 """
829 code = bpermd
830
831 lexer = IndentLexer(debug=1)
832 # Give the lexer some input
833 print ("code")
834 print (code)
835 lexer.input(code)
836
837 # Tokenize
838 while True:
839 tok = lexer.token()
840 if not tok:
841 break # No more input
842 print(tok)
843
844 #sys.exit(0)
845
846 # Set up the GardenSnake run-time environment
847 def print_(*args):
848 print ("args", args)
849 print ("-->", " ".join(map(str,args)))
850
851 #d = copy(globals())
852 d = {}
853 d["print"] = print_
854
855 sd = create_sigdecode()
856 print ("forms", sd.df.forms)
857 for f in sd.df.FormX:
858 print (f)
859
860 _compile = GardenSnakeCompiler().compile
861
862 tree = _compile(code, mode="single", filename="string")
863 import ast
864 tree = ast.fix_missing_locations(tree)
865 print ( ast.dump(tree) )
866
867 print ("astor dump")
868 print (astor.dump_tree(tree))
869 print ("to source")
870 source = astor.to_source(tree)
871 print (source)
872
873 #from compiler import parse
874 #tree = parse(code, "exec")
875
876 print (compiled_code)
877
878 exec (compiled_code, d)
879 print ("Done")
880
881 #print d
882 #print l