# all sections are mandatory so no need for a full LALR parser.
l = lines.pop(0).rstrip() # get first line
+ prefix_lines = 0
while lines:
print(l)
# look for HTML comment, if starting, skip line.
if l.startswith('<!--'):
# print ("skipping comment", l)
l = lines.pop(0).rstrip() # get next line
+ prefix_lines += 1
continue
# Ignore blank lines before the first #
if len(l) == 0:
l = lines.pop(0).rstrip() # get next line
+ prefix_lines += 1
continue
# expect get heading
# any lines not starting with space, ignore
while True:
l = lines.pop(0).rstrip()
+ prefix_lines += 1
print ("examining", repr(l))
if l.startswith(" "):
break
continue
# get pseudocode
- li = [l[4:]] # first line detected with 4-space
+
+ # fix parser line numbers by prepending the right number of
+ # blank lines to the parser input
+ li = [""] * prefix_lines
+ li += [l[4:]] # first line detected with 4-space
while lines:
l = lines.pop(0).rstrip()
print ("examining", repr(l))
li.append(l)
continue
if l.startswith('<!--'):
+ li.append("")
continue
assert l.startswith(' '), ("4spcs not found in line %s" % l)
l = l[4:] # lose 4 spaces
spc_count = count_spaces(l)
nwhite = l[spc_count:]
if len(nwhite) == 0: # skip blank lines
+ res.append('')
continue
if nwhite.startswith("case") or nwhite.startswith("default"):
#print ("case/default", nwhite, spc_count, prev_spc_count)
s += "\n"
self.lexer.paren_count = 0
self.lexer.brack_count = 0
+ self.lexer.lineno = 1
self.lexer.input(s)
self.token_stream = filter(self.lexer, add_endmarker)