*/
"""
+from parse_tokens import tokens
+from ply import lex
lex_debug = 0
-from ply import lex
-#DOCSTRING REMOVED
+# DOCSTRING REMOVED
-states = (#('module', 'exclusive'),
- ('timescale', 'exclusive'),)
+states = ( # ('module', 'exclusive'),
+ ('timescale', 'exclusive'),)
-from parse_tokens import tokens
tokens += ['timescale', 'LITERAL', 'IDENTIFIER', 'DEC_NUMBER', 'BASED_NUMBER',
'UNBASED_NUMBER']
+
def t_ccomment(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
+
t_ignore_cppcomment = r'//.*'
t_ignore = ' \t\n'
t_K_STARP = r"\*\)"
t_K_DOTSTAR = r"\.\*"
t_K_LS = r"(<<|<<<)"
-t_K_RS = r">>"
+t_K_RS = r">>"
t_K_RSS = r">>>"
t_K_POW = r"\*\*"
t_K_LE = r"<="
t_K_LP = r"\'\{"
t_K_SCOPE_RES = r"::"
-tokens += [ 'K_PSTAR', 'K_STARP', 'K_DOTSTAR', 'K_LS',
- 'K_RS', 'K_RSS', 'K_POW', 'K_LE', 'K_GE', 'K_EG', 'K_SG',
- 'K_EQ', 'K_NE', 'K_CEQ', 'K_CNE', 'K_WEQ', 'K_WNE',
- 'K_LOR', 'K_LAND', 'K_TAND', 'K_NOR', 'K_NXOR',
- 'K_NAND', 'K_TRIGGER', 'K_PO_POS', 'K_PO_NEG', 'K_CONTRIBUTE',
- 'K_PLUS_EQ', 'K_MINUS_EQ', 'K_MUL_EQ', 'K_DIV_EQ', 'K_MOD_EQ',
- 'K_AND_EQ', 'K_OR_EQ', 'K_XOR_EQ', 'K_LS_EQ', 'K_RS_EQ',
- 'K_RSS_EQ', 'K_INCR', 'K_DECR', 'K_LP',
- 'K_SCOPE_RES'
- ]
+tokens += ['K_PSTAR', 'K_STARP', 'K_DOTSTAR', 'K_LS',
+ 'K_RS', 'K_RSS', 'K_POW', 'K_LE', 'K_GE', 'K_EG', 'K_SG',
+ 'K_EQ', 'K_NE', 'K_CEQ', 'K_CNE', 'K_WEQ', 'K_WNE',
+ 'K_LOR', 'K_LAND', 'K_TAND', 'K_NOR', 'K_NXOR',
+ 'K_NAND', 'K_TRIGGER', 'K_PO_POS', 'K_PO_NEG', 'K_CONTRIBUTE',
+ 'K_PLUS_EQ', 'K_MINUS_EQ', 'K_MUL_EQ', 'K_DIV_EQ', 'K_MOD_EQ',
+ 'K_AND_EQ', 'K_OR_EQ', 'K_XOR_EQ', 'K_LS_EQ', 'K_RS_EQ',
+ 'K_RSS_EQ', 'K_INCR', 'K_DECR', 'K_LP',
+ 'K_SCOPE_RES'
+ ]
lexor_keyword_code = {
"above" : 'K_above',
"zi_zp" : 'K_zi_zp',
}
-literals = [ '[', '}', '{', ';', ':', '[', ']', ',', '(', ')',
- '#', '=', '.', '@', '&', '!', '?', '<', '>', '%',
+literals = ['[', '}', '{', ';', ':', '[', ']', ',', '(', ')',
+ '#', '=', '.', '@', '&', '!', '?', '<', '>', '%',
'|', '^', '~', '+', '*', '/', '-']
"""
t_module_ignore = ' \t'
"""
+
def t_LITERAL(t):
r'[a-zA-Z_$][a-zA-Z0-9$_]*'
word = t.value
keyword = lexor_keyword_code.get(t.value, 'IDENTIFIER')
- if(lex_debug): print ("literal", word,keyword)
- #if keyword in ['K_module', 'K_macromodule']:
+ if(lex_debug):
+ print("literal", word, keyword)
+ # if keyword in ['K_module', 'K_macromodule']:
# t.lexer.modulestart = t.lexpos+len(t.value)
# t.lexer.begin('module')
if keyword == 'IDENTIFIER':
t.type = keyword
return t
+
def t_dec_number(t):
r'\'[sS]?[dD][ \t]*[0-9][0-9_]*'
t.type = 'BASED_NUMBER'
- #t.value = word # make_unsized_dec(yytext);
+ # t.value = word # make_unsized_dec(yytext);
return t
+
def t_undef_highz_dec(t):
r'\'[sS]?[dD][ \t]*[xzXZ?]_*'
t.type = 'BASED_NUMBER'
- #t.value = word # make_undef_highz_dec(yytext);
+ # t.value = word # make_undef_highz_dec(yytext);
return t
+
def t_based_make_unsized_binary(t):
r'\'[sS]?[bB][ \t]*[0-1xzXZ?][0-1xzXZ?_]*'
t.type = 'BASED_NUMBER'
- #t.value = word # make_unsized_binary(yytext);
+ # t.value = word # make_unsized_binary(yytext);
return t
+
def t_make_unsized_octal(t):
r'\'[sS]?[oO][ \t]*[0-7xzXZ?][0-7xzXZ?_]*'
t.type = 'BASED_NUMBER'
- #t.value = word # make_unsized_octal(yytext);
+ # t.value = word # make_unsized_octal(yytext);
return t
+
def t_make_unsized_hex(t):
r'\'[sS]?[hH][ \t]*[0-9a-fA-FxzXZ?][0-9a-fA-FxzXZ?_]*'
t.type = 'BASED_NUMBER'
- #t.value = word # make_unsized_hex(yytext);
+ # t.value = word # make_unsized_hex(yytext);
return t
+
def t_unbased_make_unsized_binary(t):
r'\'[01xzXZ]'
t.type = 'UNBASED_NUMBER'
- #t.value = word # make_unsized_binary(yytext);
+ # t.value = word # make_unsized_binary(yytext);
return t
+
"""
/* Decimal numbers are the usual. But watch out for the UDPTABLE
mode, where there are no decimal numbers. Reject the match if we
are in the UDPTABLE state. */
"""
+
+
def t_make_unsized_dec(t):
r'[0-9][0-9_]*'
t.type = 'DEC_NUMBER'
- #t.value = word # make_unsized_dec(yytext);
- #based_size = yylval.number->as_ulong();
+ # t.value = word # make_unsized_dec(yytext);
+ # based_size = yylval.number->as_ulong();
return t
+
"""
/* Notice and handle the `timescale directive. */
"""
+
def t_timescale(t):
- #r'^{W}?`timescale'
+ # r'^{W}?`timescale'
r'`timescale'
t.lexer.timestart = t.lexpos+len(t.value)
t.lexer.push_state('timescale')
+
#t_timescale_ignore_toeol = r'.+\n'
t_timescale_ignore = ' \t'
#t_timescale_ignore_whitespace = r'\s+'
#t_code_ignore = ""
+
def t_timescale_end(t):
r'.+\n'
code = t.lexer.lexdata[t.lexer.timestart:t.lexpos]
t.type = 'timescale'
t.value = code
t.lexer.pop_state()
- print ("match", code)
+ print("match", code)
return t
+
def t_timescale_error(t):
print("%d: Timescale error '%s'" % (t.lexer.lineno, t.value[0]))
print(t.value)
raise RuntimeError
+
"""
def t_module_error(t):
print("%d: Module error '%s'" % (t.lexer.lineno, t.value[0]))
raise RuntimeError
"""
+
def t_error(t):
print("%d: Illegal character '%s'" % (t.lexer.lineno, t.value[0]))
print(t.value)
t.lexer.skip(1)
+
tokens = list(set(tokens))
lex.lex()
if __name__ == '__main__':
lex.runmain()
-