return hex(reg.value)
-def convert_to_pure_python(pcode, helper=False):
+def convert_to_pure_python(pcode, helper=False, filename="string"):
gsc = GardenSnakeCompiler(form=None, incl_carry=False, helper=helper)
- tree = gsc.compile(pcode, mode="exec", filename="string")
+ tree = gsc.compile(pcode, mode="exec", filename=filename)
tree = ast.fix_missing_locations(tree)
return astor.to_source(tree)
-def convert_to_python(pcode, form, incl_carry, helper=False):
+def convert_to_python(pcode, form, incl_carry, helper=False, filename="string"):
print("form", form)
gsc = GardenSnakeCompiler(form=form, incl_carry=incl_carry, helper=helper)
- tree = gsc.compile(pcode, mode="exec", filename="string")
+ tree = gsc.compile(pcode, mode="exec", filename=filename)
tree = ast.fix_missing_locations(tree)
regsused = {'read_regs': gsc.parser.read_regs,
'write_regs': gsc.parser.write_regs,
# so please put ending of comments on one line:
# <!-- line 1 comment -->
# <!-- line 2 comment -->
- if l.startswith('<!--'):
+ if l.strip().startswith('<!--'):
# print ("skipping comment", l)
l = lines.pop(0).rstrip() # get next line
prefix_lines += 1
if len(l) == 0:
li.append(l)
continue
- if l.startswith('<!--'):
+ if l.strip().startswith('<!--'):
li.append("")
continue
assert l.startswith(' '), ("4spcs not found in line %s" % l)
from ply import lex
from openpower.decoder.selectable_int import SelectableInt
+
+def raise_syntax_error(msg, filename, lineno, lexpos, input_text):
+ line_start = input_text.rfind('\n', 0, lexpos) + 1
+ line_end = input_text.find('\n', line_start)
+ col = (lexpos - line_start) + 1
+ raise SyntaxError(str(msg), (filename, lineno, col,
+ input_text[line_start:line_end]))
+
# I implemented INDENT / DEDENT generation as a post-processing filter
# The original lex token stream contains WS and NEWLINE characters.
# Build the lexer
def build(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
+ self.filename = None
def t_HEX(self, t):
r"""0x[0-9a-fA-F_]+"""
#t_ignore = " "
def t_error(self, t):
- raise SyntaxError("Unknown symbol %r" % (t.value[0],))
+ raise_syntax_error("Unknown symbol %r" % (t.value[0],),
+ self.filename, t.lexer.lineno,
+ t.lexer.lexpos, t.lexer.lexdata)
print("Skipping", repr(t.value[0]))
t.lexer.skip(1)
# all sections are mandatory so no need for a full LALR parser.
l = lines.pop(0).rstrip() # get first line
+ prefix_lines = 0
while lines:
if self.verbose:
print(l)
if l.strip().startswith('<!--'):
# print ("skipping comment", l)
l = lines.pop(0).rstrip() # get next line
+ prefix_lines += 1
continue
# Ignore blank lines before the first #
if len(l) == 0:
l = lines.pop(0).rstrip() # get next line
+ prefix_lines += 1
continue
# expect get heading
# whitespace expected
l = lines.pop(0).strip()
+ prefix_lines += 1
if self.verbose:
print(repr(l))
assert len(l) == 0, ("blank line not found %s" % l)
# Form expected
l = lines.pop(0).strip()
+ prefix_lines += 1
assert l.endswith('-Form'), ("line with -Form expected %s" % l)
d['form'] = l.split('-')[0]
# whitespace expected
l = lines.pop(0).strip()
+ prefix_lines += 1
assert len(l) == 0, ("blank line not found %s" % l)
# get list of opcodes
opcodes = []
while True:
l = lines.pop(0).strip()
+ prefix_lines += 1
if len(l) == 0:
break
assert l.startswith('*'), ("* not found in line %s" % l)
# "Pseudocode" expected
l = lines.pop(0).rstrip()
+ prefix_lines += 1
assert l.startswith("Pseudo-code:"), ("pseudocode found %s" % l)
# whitespace expected
l = lines.pop(0).strip()
+ prefix_lines += 1
if self.verbose:
print(repr(l))
assert len(l) == 0, ("blank line not found %s" % l)
# get pseudocode
- li = []
+
+ # fix parser line numbers by prepending the right number of
+ # blank lines to the parser input
+ li = [""] * prefix_lines
+ li += [l[4:]] # first line detected with 4-space
while True:
l = lines.pop(0).rstrip()
- if l.strip().startswith('<!--'):
- continue
+ prefix_lines += 1
if len(l) == 0:
+ li.append(l)
break
+ if l.strip().startswith('<!--'):
+ li.append("")
+ continue
assert l.startswith(' '), ("4spcs not found in line %s" % l)
l = l[4:] # lose 4 spaces
li.append(l)
# "Special Registers Altered" expected
l = lines.pop(0).rstrip()
+ prefix_lines += 1
assert l.startswith("Special"), ("special not found %s" % l)
# whitespace expected
l = lines.pop(0).strip()
+ prefix_lines += 1
assert len(l) == 0, ("blank line not found %s" % l)
# get special regs
li = []
while lines:
l = lines.pop(0).rstrip()
+ prefix_lines += 1
if len(l) == 0:
break
assert l.startswith(' '), ("4spcs not found in line %s" % l)
# expect and drop whitespace and comments
while lines:
l = lines.pop(0).rstrip()
+ prefix_lines += 1
if len(l) != 0 and not l.strip().startswith('<!--'):
break
from copy import deepcopy
from openpower.decoder.power_decoder import create_pdecode
-from openpower.decoder.pseudo.lexer import IndentLexer
+from openpower.decoder.pseudo.lexer import IndentLexer, raise_syntax_error
from openpower.decoder.orderedset import OrderedSet
# I use the Python AST
self.include_ca_in_write = include_carry_in_write
self.helper = helper
self.form = form
+ self.filename = None
+ self.input_text = None
self.reset()
# The grammar comments come from Python's Grammar/Grammar file
# documentation for why we need this
copy_fn = ast.Name("copy_assign_rhs", ast.Load())
rhs = ast.Call(copy_fn, (p[3],), [])
- p[0] = self.Assign(autoassign, name, p[1], rhs, iea_mode)
+ p[0] = self.Assign(autoassign, name, p[1], rhs, iea_mode, p[2])
if name:
self.declared_vars.add(name)
def p_error(self, p):
# print "Error!", repr(p)
- raise SyntaxError(p)
+ raise_syntax_error(str(p), self.filename, p.lineno, p.lexpos,
+ self.input_text)
- def Assign(self, autoassign, assignname, left, right, iea_mode):
+ def Assign(self, autoassign, assignname, left, right, iea_mode, eq_tok):
names = []
print("Assign", autoassign, assignname, left, right)
if isinstance(left, ast.Name):
names = []
for child in left.elts:
if not isinstance(child, ast.Name):
- raise SyntaxError("that assignment not supported")
+ raise_syntax_error(
+ "that assignment not supported", self.filename,
+ eq_tok.lineno, eq_tok.lexpos, self.input_text)
names.append(child.id)
ass_list = [ast.Name(name, ast.Store()) for name in names]
return ast.Assign([ast.Tuple(ass_list)], right)
[left.value, ls, right], [])
else:
print("Assign fail")
- raise SyntaxError("Can't do that yet")
+ raise_syntax_error("Can't do that yet", self.filename,
+ eq_tok.lineno, eq_tok.lexpos, self.input_text)
_CACHE_DECODER = True
def parse(self, code):
self.reset()
+ self.lexer.filename = self.filename
result = self.parser.parse(code, lexer=self.lexer, debug=self.debug)
if self.helper:
result = [ast.ClassDef("ISACallerFnHelper", [
incl_carry=incl_carry, helper=helper)
def compile(self, code, mode="exec", filename="<string>"):
+ if filename in ["string", "<string>"]:
+ raise ValueError("missing filename")
+ self.parser.filename = filename
+ self.parser.input_text = code
tree = self.parser.parse(code)
print("snake")
pprint(tree)
import sys
import shutil
import subprocess
-from openpower.decoder.pseudo.functionreader import ISAFunctions
+from openpower.decoder.pseudo.functionreader import ISAFunctions, get_isafn_dir
from openpower.decoder.power_pseudo import convert_to_pure_python
if phash in sourcecache:
pycode = sourcecache[phash]
else:
- pycode = convert_to_pure_python(pcode, True)
+ filename = os.path.join(get_isafn_dir(), pagename + ".mdwn")
+ pycode = convert_to_pure_python(pcode, True, filename=filename)
sourcecache[phash] = pycode
f.write(pycode)
import sys
import shutil
import subprocess
-from openpower.decoder.pseudo.pagereader import ISA
+from openpower.decoder.pseudo.pagereader import ISA, get_isa_dir
from openpower.decoder.power_pseudo import convert_to_python
from openpower.decoder.orderedset import OrderedSet
from openpower.decoder.isa.caller import create_args
pcode = '\n'.join(d.pcode) + '\n'
print(pcode)
incl_carry = pagename == 'fixedshift'
- pycode, rused = convert_to_python(pcode, d.form, incl_carry)
+ filename = os.path.join(get_isa_dir(), pagename + ".mdwn")
+ pycode, rused = convert_to_python(pcode, d.form, incl_carry,
+ filename=filename)
# create list of arguments to call
regs = list(rused['read_regs']) + list(rused['uninit_regs'])
regs += list(rused['special_regs'])