+ return extn.sub(r'-g\1.inc', s)
+
+ # Get the file object for emitting code into the specified section
+ # (header, decoder, exec, decode_block).
+ def get_file(self, section):
+ if section == 'decode_block':
+ filename = 'decode-method.cc.inc'
+ else:
+ if section == 'header':
+ file = 'decoder.hh'
+ else:
+ file = '%s.cc' % section
+ filename = self.suffixize(file, section)
+ try:
+ return self.files[filename]
+ except KeyError: pass
+
+ f = self.open(filename)
+ self.files[filename] = f
+
+ # The splittable files are the ones with many independent
+ # per-instruction functions - the decoder's instruction constructors
+ # and the instruction execution (execute()) methods. These both have
+ # the suffix -ns.cc.inc, meaning they are within the namespace part
+ # of the ISA, contain object-emitting C++ source, and are included
+ # into other top-level files. These are the files that need special
+ # #define's to allow parts of them to be compiled separately. Rather
+ # than splitting the emissions into separate files, the monolithic
+ # output of the ISA parser is maintained, but the value (or lack
+ # thereof) of the __SPLIT definition during C preprocessing will
+ # select the different chunks. If no 'split' directives are used,
+ # the cpp emissions have no effect.
+ if re.search('-ns.cc.inc$', filename):
+ print('#if !defined(__SPLIT) || (__SPLIT == 1)', file=f)
+ self.splits[f] = 1
+ # ensure requisite #include's
+ elif filename == 'decoder-g.hh.inc':
+ print('#include "base/bitfield.hh"', file=f)
+
+ return f
+
+ # Weave together the parts of the different output sections by
+ # #include'ing them into some very short top-level .cc/.hh files.
+ # These small files make it much clearer how this tool works, since
+ # you directly see the chunks emitted as files that are #include'd.
+ def write_top_level_files(self):
+ # decoder header - everything depends on this
+ file = 'decoder.hh'
+ with self.open(file) as f:
+ f.write('#ifndef __ARCH_%(isa)s_GENERATED_DECODER_HH__\n'
+ '#define __ARCH_%(isa)s_GENERATED_DECODER_HH__\n\n' %
+ {'isa': self.isa_name.upper()})
+ fn = 'decoder-g.hh.inc'
+ assert(fn in self.files)
+ f.write('#include "%s"\n' % fn)
+
+ fn = 'decoder-ns.hh.inc'
+ assert(fn in self.files)
+ f.write('namespace %s {\n#include "%s"\n}\n'
+ % (self.namespace, fn))
+ f.write('\n#endif // __ARCH_%s_GENERATED_DECODER_HH__\n' %
+ self.isa_name.upper())
+
+ # decoder method - cannot be split
+ file = 'decoder.cc'
+ with self.open(file) as f:
+ fn = 'base/compiler.hh'
+ f.write('#include "%s"\n' % fn)
+
+ fn = 'decoder-g.cc.inc'
+ assert(fn in self.files)
+ f.write('#include "%s"\n' % fn)
+
+ fn = 'decoder.hh'
+ f.write('#include "%s"\n' % fn)
+
+ fn = 'decode-method.cc.inc'
+ # is guaranteed to have been written for parse to complete
+ f.write('#include "%s"\n' % fn)
+
+ extn = re.compile('(\.[^\.]+)$')
+
+ # instruction constructors
+ splits = self.splits[self.get_file('decoder')]
+ file_ = 'inst-constrs.cc'
+ for i in range(1, splits+1):
+ if splits > 1:
+ file = extn.sub(r'-%d\1' % i, file_)
+ else:
+ file = file_
+ with self.open(file) as f:
+ fn = 'decoder-g.cc.inc'
+ assert(fn in self.files)
+ f.write('#include "%s"\n' % fn)
+
+ fn = 'decoder.hh'
+ f.write('#include "%s"\n' % fn)
+
+ fn = 'decoder-ns.cc.inc'
+ assert(fn in self.files)
+ print('namespace %s {' % self.namespace, file=f)
+ if splits > 1:
+ print('#define __SPLIT %u' % i, file=f)
+ print('#include "%s"' % fn, file=f)
+ print('}', file=f)
+
+ # instruction execution
+ splits = self.splits[self.get_file('exec')]
+ for i in range(1, splits+1):
+ file = 'generic_cpu_exec.cc'
+ if splits > 1:
+ file = extn.sub(r'_%d\1' % i, file)
+ with self.open(file) as f:
+ fn = 'exec-g.cc.inc'
+ assert(fn in self.files)
+ f.write('#include "%s"\n' % fn)
+ f.write('#include "cpu/exec_context.hh"\n')
+ f.write('#include "decoder.hh"\n')
+
+ fn = 'exec-ns.cc.inc'
+ assert(fn in self.files)
+ print('namespace %s {' % self.namespace, file=f)
+ if splits > 1:
+ print('#define __SPLIT %u' % i, file=f)
+ print('#include "%s"' % fn, file=f)
+ print('}', file=f)
+
+ # max_inst_regs.hh
+ self.update('max_inst_regs.hh',
+ '''namespace %(namespace)s {
+ const int MaxInstSrcRegs = %(maxInstSrcRegs)d;
+ const int MaxInstDestRegs = %(maxInstDestRegs)d;
+ const int MaxMiscDestRegs = %(maxMiscDestRegs)d;\n}\n''' % self)
+
+ scaremonger_template ='''// DO NOT EDIT
+// This file was automatically generated from an ISA description:
+// %(filename)s
+
+''';
+
+ #####################################################################
+ #
+ # Lexer
+ #
+ # The PLY lexer module takes two things as input:
+ # - A list of token names (the string list 'tokens')
+ # - A regular expression describing a match for each token. The
+ # regexp for token FOO can be provided in two ways:
+ # - as a string variable named t_FOO
+ # - as the doc string for a function named t_FOO. In this case,
+ # the function is also executed, allowing an action to be
+ # associated with each token match.
+ #
+ #####################################################################
+
+ # Reserved words. These are listed separately as they are matched
+ # using the same regexp as generic IDs, but distinguished in the
+ # t_ID() function. The PLY documentation suggests this approach.
+ reserved = (
+ 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
+ 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
+ 'OUTPUT', 'SIGNED', 'SPLIT', 'TEMPLATE'
+ )
+
+ # List of tokens. The lex module requires this.
+ tokens = reserved + (
+ # identifier
+ 'ID',
+
+ # integer literal
+ 'INTLIT',
+
+ # string literal
+ 'STRLIT',
+
+ # code literal
+ 'CODELIT',
+
+ # ( ) [ ] { } < > , ; . : :: *
+ 'LPAREN', 'RPAREN',
+ 'LBRACKET', 'RBRACKET',
+ 'LBRACE', 'RBRACE',
+ 'LESS', 'GREATER', 'EQUALS',
+ 'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON',
+ 'ASTERISK',
+
+ # C preprocessor directives
+ 'CPPDIRECTIVE'
+
+ # The following are matched but never returned. commented out to
+ # suppress PLY warning
+ # newfile directive
+ # 'NEWFILE',
+
+ # endfile directive
+ # 'ENDFILE'
+ )
+
+ # Regular expressions for token matching
+ t_LPAREN = r'\('
+ t_RPAREN = r'\)'
+ t_LBRACKET = r'\['
+ t_RBRACKET = r'\]'
+ t_LBRACE = r'\{'
+ t_RBRACE = r'\}'
+ t_LESS = r'\<'
+ t_GREATER = r'\>'
+ t_EQUALS = r'='
+ t_COMMA = r','
+ t_SEMI = r';'
+ t_DOT = r'\.'
+ t_COLON = r':'
+ t_DBLCOLON = r'::'
+ t_ASTERISK = r'\*'
+
+ # Identifiers and reserved words
+ reserved_map = { }
+ for r in reserved:
+ reserved_map[r.lower()] = r
+
+ def t_ID(self, t):
+ r'[A-Za-z_]\w*'
+ t.type = self.reserved_map.get(t.value, 'ID')
+ return t
+
+ # Integer literal
+ def t_INTLIT(self, t):
+ r'-?(0x[\da-fA-F]+)|\d+'
+ try:
+ t.value = int(t.value,0)
+ except ValueError:
+ error(t.lexer.lineno, 'Integer value "%s" too large' % t.value)
+ t.value = 0
+ return t
+
+ # String literal. Note that these use only single quotes, and
+ # can span multiple lines.
+ def t_STRLIT(self, t):
+ r"(?m)'([^'])+'"
+ # strip off quotes
+ t.value = t.value[1:-1]
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+
+ # "Code literal"... like a string literal, but delimiters are
+ # '{{' and '}}' so they get formatted nicely under emacs c-mode
+ def t_CODELIT(self, t):
+ r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
+ # strip off {{ & }}
+ t.value = t.value[2:-2]
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+ def t_CPPDIRECTIVE(self, t):
+ r'^\#[^\#].*\n'
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+ def t_NEWFILE(self, t):
+ r'^\#\#newfile\s+"[^"]*"\n'
+ self.fileNameStack.push(t.lexer.lineno)
+ t.lexer.lineno = LineTracker(t.value[11:-2])
+
+ def t_ENDFILE(self, t):
+ r'^\#\#endfile\n'
+ t.lexer.lineno = self.fileNameStack.pop()
+
+ #
+ # The functions t_NEWLINE, t_ignore, and t_error are
+ # special for the lex module.
+ #
+
+ # Newlines
+ def t_NEWLINE(self, t):
+ r'\n+'
+ t.lexer.lineno += t.value.count('\n')
+
+ # Comments
+ def t_comment(self, t):
+ r'//.*'
+
+ # Completely ignored characters
+ t_ignore = ' \t\x0c'
+
+ # Error handler
+ def t_error(self, t):
+ error(t.lexer.lineno, "illegal character '%s'" % t.value[0])
+ t.skip(1)
+
+ #####################################################################
+ #
+ # Parser
+ #
+ # Every function whose name starts with 'p_' defines a grammar
+ # rule. The rule is encoded in the function's doc string, while
+ # the function body provides the action taken when the rule is
+ # matched. The argument to each function is a list of the values
+ # of the rule's symbols: t[0] for the LHS, and t[1..n] for the
+ # symbols on the RHS. For tokens, the value is copied from the
+ # t.value attribute provided by the lexer. For non-terminals, the
+ # value is assigned by the producing rule; i.e., the job of the
+ # grammar rule function is to set the value for the non-terminal
+ # on the LHS (by assigning to t[0]).
+ #####################################################################
+
+ # The LHS of the first grammar rule is used as the start symbol
+ # (in this case, 'specification'). Note that this rule enforces
+ # that there will be exactly one namespace declaration, with 0 or
+ # more global defs/decls before and after it. The defs & decls
+ # before the namespace decl will be outside the namespace; those
+ # after will be inside. The decoder function is always inside the
+ # namespace.
+ def p_specification(self, t):
+ 'specification : opt_defs_and_outputs top_level_decode_block'
+
+ for f in self.splits.iterkeys():
+ f.write('\n#endif\n')
+
+ for f in self.files.itervalues(): # close ALL the files;
+ f.close() # not doing so can cause compilation to fail
+
+ self.write_top_level_files()
+
+ t[0] = True
+
+ # 'opt_defs_and_outputs' is a possibly empty sequence of def and/or
+ # output statements. Its productions do the hard work of eventually
+ # instantiating a GenCode, which are generally emitted (written to disk)
+ # as soon as possible, except for the decode_block, which has to be
+ # accumulated into one large function of nested switch/case blocks.
+ def p_opt_defs_and_outputs_0(self, t):
+ 'opt_defs_and_outputs : empty'
+
+ def p_opt_defs_and_outputs_1(self, t):
+ 'opt_defs_and_outputs : defs_and_outputs'
+
+ def p_defs_and_outputs_0(self, t):
+ 'defs_and_outputs : def_or_output'
+
+ def p_defs_and_outputs_1(self, t):
+ 'defs_and_outputs : defs_and_outputs def_or_output'
+
+ # The list of possible definition/output statements.
+ # They are all processed as they are seen.
+ def p_def_or_output(self, t):
+ '''def_or_output : name_decl
+ | def_format
+ | def_bitfield
+ | def_bitfield_struct
+ | def_template
+ | def_operand_types
+ | def_operands
+ | output
+ | global_let
+ | split'''
+
+ # Utility function used by both invocations of splitting - explicit
+ # 'split' keyword and split() function inside "let {{ }};" blocks.
+ def split(self, sec, write=False):
+ assert(sec != 'header' and "header cannot be split")
+
+ f = self.get_file(sec)
+ self.splits[f] += 1
+ s = '\n#endif\n#if __SPLIT == %u\n' % self.splits[f]
+ if write:
+ f.write(s)
+ else:
+ return s
+
+ # split output file to reduce compilation time
+ def p_split(self, t):
+ 'split : SPLIT output_type SEMI'
+ assert(self.isa_name and "'split' not allowed before namespace decl")
+
+ self.split(t[2], True)
+
+ def p_output_type(self, t):
+ '''output_type : DECODER
+ | HEADER
+ | EXEC'''
+ t[0] = t[1]
+
+ # ISA name declaration looks like "namespace <foo>;"
+ def p_name_decl(self, t):
+ 'name_decl : NAMESPACE ID SEMI'
+ assert(self.isa_name == None and "Only 1 namespace decl permitted")
+ self.isa_name = t[2]
+ self.namespace = t[2] + 'Inst'
+
+ # Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
+ # directly to the appropriate output section.
+
+ # Massage output block by substituting in template definitions and
+ # bit operators. We handle '%'s embedded in the string that don't
+ # indicate template substitutions by doubling them first so that the
+ # format operation will reduce them back to single '%'s.
+ def process_output(self, s):
+ s = self.protectNonSubstPercents(s)
+ return substBitOps(s % self.templateMap)
+
+ def p_output(self, t):
+ 'output : OUTPUT output_type CODELIT SEMI'
+ kwargs = { t[2]+'_output' : self.process_output(t[3]) }
+ GenCode(self, **kwargs).emit()
+
+ # global let blocks 'let {{...}}' (Python code blocks) are
+ # executed directly when seen. Note that these execute in a
+ # special variable context 'exportContext' to prevent the code
+ # from polluting this script's namespace.
+ def p_global_let(self, t):
+ 'global_let : LET CODELIT SEMI'
+ def _split(sec):
+ return self.split(sec)
+ self.updateExportContext()
+ self.exportContext["header_output"] = ''
+ self.exportContext["decoder_output"] = ''
+ self.exportContext["exec_output"] = ''
+ self.exportContext["decode_block"] = ''
+ self.exportContext["split"] = _split
+ split_setup = '''
+def wrap(func):
+ def split(sec):
+ globals()[sec + '_output'] += func(sec)
+ return split
+split = wrap(split)
+del wrap
+'''
+ # This tricky setup (immediately above) allows us to just write
+ # (e.g.) "split('exec')" in the Python code and the split #ifdef's
+ # will automatically be added to the exec_output variable. The inner
+ # Python execution environment doesn't know about the split points,
+ # so we carefully inject and wrap a closure that can retrieve the
+ # next split's #define from the parser and add it to the current
+ # emission-in-progress.
+ try:
+ exec split_setup+fixPythonIndentation(t[2]) in self.exportContext
+ except Exception, exc:
+ traceback.print_exc(file=sys.stdout)
+ if debug:
+ raise
+ error(t.lineno(1), 'In global let block: %s' % exc)
+ GenCode(self,
+ header_output=self.exportContext["header_output"],
+ decoder_output=self.exportContext["decoder_output"],
+ exec_output=self.exportContext["exec_output"],
+ decode_block=self.exportContext["decode_block"]).emit()
+
+ # Define the mapping from operand type extensions to C++ types and
+ # bit widths (stored in operandTypeMap).
+ def p_def_operand_types(self, t):
+ 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
+ try:
+ self.operandTypeMap = eval('{' + t[3] + '}')
+ except Exception, exc:
+ if debug:
+ raise
+ error(t.lineno(1),
+ 'In def operand_types: %s' % exc)
+
+ # Define the mapping from operand names to operand classes and
+ # other traits. Stored in operandNameMap.
+ def p_def_operands(self, t):
+ 'def_operands : DEF OPERANDS CODELIT SEMI'
+ if not hasattr(self, 'operandTypeMap'):
+ error(t.lineno(1),
+ 'error: operand types must be defined before operands')
+ try:
+ user_dict = eval('{' + t[3] + '}', self.exportContext)
+ except Exception, exc:
+ if debug:
+ raise
+ error(t.lineno(1), 'In def operands: %s' % exc)
+ self.buildOperandNameMap(user_dict, t.lexer.lineno)
+
+ # A bitfield definition looks like:
+ # 'def [signed] bitfield <ID> [<first>:<last>]'
+ # This generates a preprocessor macro in the output file.
+ def p_def_bitfield_0(self, t):
+ 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
+ expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
+ if (t[2] == 'signed'):
+ expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
+ hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
+ GenCode(self, header_output=hash_define).emit()
+
+ # alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
+ def p_def_bitfield_1(self, t):
+ 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
+ expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
+ if (t[2] == 'signed'):
+ expr = 'sext<%d>(%s)' % (1, expr)
+ hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
+ GenCode(self, header_output=hash_define).emit()
+
+ # alternate form for structure member: 'def bitfield <ID> <ID>'
+ def p_def_bitfield_struct(self, t):
+ 'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI'
+ if (t[2] != ''):
+ error(t.lineno(1),
+ 'error: structure bitfields are always unsigned.')
+ expr = 'machInst.%s' % t[5]
+ hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
+ GenCode(self, header_output=hash_define).emit()
+
+ def p_id_with_dot_0(self, t):
+ 'id_with_dot : ID'
+ t[0] = t[1]
+
+ def p_id_with_dot_1(self, t):
+ 'id_with_dot : ID DOT id_with_dot'
+ t[0] = t[1] + t[2] + t[3]
+
+ def p_opt_signed_0(self, t):
+ 'opt_signed : SIGNED'
+ t[0] = t[1]
+
+ def p_opt_signed_1(self, t):
+ 'opt_signed : empty'
+ t[0] = ''
+
+ def p_def_template(self, t):
+ 'def_template : DEF TEMPLATE ID CODELIT SEMI'
+ if t[3] in self.templateMap:
+ print("warning: template %s already defined" % t[3])
+ self.templateMap[t[3]] = Template(self, t[4])
+
+ # An instruction format definition looks like
+ # "def format <fmt>(<params>) {{...}};"
+ def p_def_format(self, t):
+ 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
+ (id, params, code) = (t[3], t[5], t[7])
+ self.defFormat(id, params, code, t.lexer.lineno)
+
+ # The formal parameter list for an instruction format is a
+ # possibly empty list of comma-separated parameters. Positional
+ # (standard, non-keyword) parameters must come first, followed by
+ # keyword parameters, followed by a '*foo' parameter that gets
+ # excess positional arguments (as in Python). Each of these three
+ # parameter categories is optional.
+ #
+ # Note that we do not support the '**foo' parameter for collecting
+ # otherwise undefined keyword args. Otherwise the parameter list
+ # is (I believe) identical to what is supported in Python.
+ #
+ # The param list generates a tuple, where the first element is a
+ # list of the positional params and the second element is a dict
+ # containing the keyword params.
+ def p_param_list_0(self, t):
+ 'param_list : positional_param_list COMMA nonpositional_param_list'
+ t[0] = t[1] + t[3]
+
+ def p_param_list_1(self, t):
+ '''param_list : positional_param_list
+ | nonpositional_param_list'''
+ t[0] = t[1]
+
+ def p_positional_param_list_0(self, t):
+ 'positional_param_list : empty'
+ t[0] = []
+
+ def p_positional_param_list_1(self, t):
+ 'positional_param_list : ID'
+ t[0] = [t[1]]
+
+ def p_positional_param_list_2(self, t):
+ 'positional_param_list : positional_param_list COMMA ID'
+ t[0] = t[1] + [t[3]]
+
+ def p_nonpositional_param_list_0(self, t):
+ 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
+ t[0] = t[1] + t[3]
+
+ def p_nonpositional_param_list_1(self, t):
+ '''nonpositional_param_list : keyword_param_list
+ | excess_args_param'''
+ t[0] = t[1]
+
+ def p_keyword_param_list_0(self, t):
+ 'keyword_param_list : keyword_param'
+ t[0] = [t[1]]
+
+ def p_keyword_param_list_1(self, t):
+ 'keyword_param_list : keyword_param_list COMMA keyword_param'
+ t[0] = t[1] + [t[3]]
+
+ def p_keyword_param(self, t):
+ 'keyword_param : ID EQUALS expr'
+ t[0] = t[1] + ' = ' + t[3].__repr__()
+
+ def p_excess_args_param(self, t):
+ 'excess_args_param : ASTERISK ID'
+ # Just concatenate them: '*ID'. Wrap in list to be consistent
+ # with positional_param_list and keyword_param_list.
+ t[0] = [t[1] + t[2]]
+
+ # End of format definition-related rules.
+ ##############
+
+ #
+ # A decode block looks like:
+ # decode <field1> [, <field2>]* [default <inst>] { ... }
+ #
+ def p_top_level_decode_block(self, t):
+ 'top_level_decode_block : decode_block'
+ codeObj = t[1]
+ codeObj.wrap_decode_block('''
+StaticInstPtr
+%(isa_name)s::Decoder::decodeInst(%(isa_name)s::ExtMachInst machInst)
+{
+ using namespace %(namespace)s;
+''' % self, '}')
+
+ codeObj.emit()
+
+ def p_decode_block(self, t):
+ 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
+ default_defaults = self.defaultStack.pop()
+ codeObj = t[5]
+ # use the "default defaults" only if there was no explicit
+ # default statement in decode_stmt_list
+ if not codeObj.has_decode_default:
+ codeObj += default_defaults
+ codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
+ t[0] = codeObj
+
+ # The opt_default statement serves only to push the "default
+ # defaults" onto defaultStack. This value will be used by nested
+ # decode blocks, and used and popped off when the current
+ # decode_block is processed (in p_decode_block() above).
+ def p_opt_default_0(self, t):
+ 'opt_default : empty'
+ # no default specified: reuse the one currently at the top of
+ # the stack
+ self.defaultStack.push(self.defaultStack.top())
+ # no meaningful value returned
+ t[0] = None
+
+ def p_opt_default_1(self, t):
+ 'opt_default : DEFAULT inst'
+ # push the new default
+ codeObj = t[2]
+ codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
+ self.defaultStack.push(codeObj)
+ # no meaningful value returned
+ t[0] = None
+
+ def p_decode_stmt_list_0(self, t):
+ 'decode_stmt_list : decode_stmt'
+ t[0] = t[1]
+
+ def p_decode_stmt_list_1(self, t):
+ 'decode_stmt_list : decode_stmt decode_stmt_list'
+ if (t[1].has_decode_default and t[2].has_decode_default):
+ error(t.lineno(1), 'Two default cases in decode block')
+ t[0] = t[1] + t[2]
+
+ #
+ # Decode statement rules
+ #
+ # There are four types of statements allowed in a decode block:
+ # 1. Format blocks 'format <foo> { ... }'
+ # 2. Nested decode blocks
+ # 3. Instruction definitions.
+ # 4. C preprocessor directives.
+
+
+ # Preprocessor directives found in a decode statement list are
+ # passed through to the output, replicated to all of the output
+ # code streams. This works well for ifdefs, so we can ifdef out
+ # both the declarations and the decode cases generated by an
+ # instruction definition. Handling them as part of the grammar
+ # makes it easy to keep them in the right place with respect to
+ # the code generated by the other statements.
+ def p_decode_stmt_cpp(self, t):
+ 'decode_stmt : CPPDIRECTIVE'
+ t[0] = GenCode(self, t[1], t[1], t[1], t[1])
+
+ # A format block 'format <foo> { ... }' sets the default
+ # instruction format used to handle instruction definitions inside
+ # the block. This format can be overridden by using an explicit
+ # format on the instruction definition or with a nested format
+ # block.
+ def p_decode_stmt_format(self, t):
+ 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
+ # The format will be pushed on the stack when 'push_format_id'
+ # is processed (see below). Once the parser has recognized
+ # the full production (though the right brace), we're done
+ # with the format, so now we can pop it.
+ self.formatStack.pop()
+ t[0] = t[4]
+
+ # This rule exists so we can set the current format (& push the
+ # stack) when we recognize the format name part of the format
+ # block.
+ def p_push_format_id(self, t):
+ 'push_format_id : ID'
+ try:
+ self.formatStack.push(self.formatMap[t[1]])
+ t[0] = ('', '// format %s' % t[1])
+ except KeyError:
+ error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
+
+ # Nested decode block: if the value of the current field matches
+ # the specified constant(s), do a nested decode on some other field.
+ def p_decode_stmt_decode(self, t):
+ 'decode_stmt : case_list COLON decode_block'
+ case_list = t[1]
+ codeObj = t[3]
+ # just wrap the decoding code from the block as a case in the
+ # outer switch statement.
+ codeObj.wrap_decode_block('\n%s\n' % ''.join(case_list),
+ 'M5_UNREACHABLE;\n')
+ codeObj.has_decode_default = (case_list == ['default:'])
+ t[0] = codeObj
+
+ # Instruction definition (finally!).
+ def p_decode_stmt_inst(self, t):
+ 'decode_stmt : case_list COLON inst SEMI'
+ case_list = t[1]
+ codeObj = t[3]
+ codeObj.wrap_decode_block('\n%s' % ''.join(case_list), 'break;\n')
+ codeObj.has_decode_default = (case_list == ['default:'])
+ t[0] = codeObj
+
+ # The constant list for a decode case label must be non-empty, and must
+ # either be the keyword 'default', or made up of one or more
+ # comma-separated integer literals or strings which evaluate to
+ # constants when compiled as C++.
+ def p_case_list_0(self, t):
+ 'case_list : DEFAULT'
+ t[0] = ['default:']
+
+ def prep_int_lit_case_label(self, lit):
+ if lit >= 2**32:
+ return 'case ULL(%#x): ' % lit
+ else:
+ return 'case %#x: ' % lit
+
+ def prep_str_lit_case_label(self, lit):
+ return 'case %s: ' % lit
+
+ def p_case_list_1(self, t):
+ 'case_list : INTLIT'
+ t[0] = [self.prep_int_lit_case_label(t[1])]
+
+ def p_case_list_2(self, t):
+ 'case_list : STRLIT'
+ t[0] = [self.prep_str_lit_case_label(t[1])]
+
+ def p_case_list_3(self, t):
+ 'case_list : case_list COMMA INTLIT'
+ t[0] = t[1]
+ t[0].append(self.prep_int_lit_case_label(t[3]))
+
+ def p_case_list_4(self, t):
+ 'case_list : case_list COMMA STRLIT'
+ t[0] = t[1]
+ t[0].append(self.prep_str_lit_case_label(t[3]))
+
+ # Define an instruction using the current instruction format
+ # (specified by an enclosing format block).
+ # "<mnemonic>(<args>)"
+ def p_inst_0(self, t):
+ 'inst : ID LPAREN arg_list RPAREN'
+ # Pass the ID and arg list to the current format class to deal with.
+ currentFormat = self.formatStack.top()
+ codeObj = currentFormat.defineInst(self, t[1], t[3], t.lexer.lineno)
+ args = ','.join(map(str, t[3]))
+ args = re.sub('(?m)^', '//', args)
+ args = re.sub('^//', '', args)
+ comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
+ codeObj.prepend_all(comment)
+ t[0] = codeObj
+
+ # Define an instruction using an explicitly specified format:
+ # "<fmt>::<mnemonic>(<args>)"
+ def p_inst_1(self, t):
+ 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
+ try:
+ format = self.formatMap[t[1]]
+ except KeyError:
+ error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
+
+ codeObj = format.defineInst(self, t[3], t[5], t.lexer.lineno)
+ comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
+ codeObj.prepend_all(comment)
+ t[0] = codeObj
+
+ # The arg list generates a tuple, where the first element is a
+ # list of the positional args and the second element is a dict
+ # containing the keyword args.
+ def p_arg_list_0(self, t):
+ 'arg_list : positional_arg_list COMMA keyword_arg_list'
+ t[0] = ( t[1], t[3] )
+
+ def p_arg_list_1(self, t):
+ 'arg_list : positional_arg_list'
+ t[0] = ( t[1], {} )
+
+ def p_arg_list_2(self, t):
+ 'arg_list : keyword_arg_list'
+ t[0] = ( [], t[1] )
+
+ def p_positional_arg_list_0(self, t):
+ 'positional_arg_list : empty'
+ t[0] = []
+
+ def p_positional_arg_list_1(self, t):
+ 'positional_arg_list : expr'
+ t[0] = [t[1]]
+
+ def p_positional_arg_list_2(self, t):
+ 'positional_arg_list : positional_arg_list COMMA expr'
+ t[0] = t[1] + [t[3]]
+
+ def p_keyword_arg_list_0(self, t):
+ 'keyword_arg_list : keyword_arg'
+ t[0] = t[1]
+
+ def p_keyword_arg_list_1(self, t):
+ 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
+ t[0] = t[1]
+ t[0].update(t[3])
+
+ def p_keyword_arg(self, t):
+ 'keyword_arg : ID EQUALS expr'
+ t[0] = { t[1] : t[3] }
+
+ #
+ # Basic expressions. These constitute the argument values of
+ # "function calls" (i.e. instruction definitions in the decode
+ # block) and default values for formal parameters of format
+ # functions.
+ #
+ # Right now, these are either strings, integers, or (recursively)
+ # lists of exprs (using Python square-bracket list syntax). Note
+ # that bare identifiers are trated as string constants here (since
+ # there isn't really a variable namespace to refer to).
+ #
+ def p_expr_0(self, t):
+ '''expr : ID
+ | INTLIT
+ | STRLIT
+ | CODELIT'''
+ t[0] = t[1]
+
+ def p_expr_1(self, t):
+ '''expr : LBRACKET list_expr RBRACKET'''
+ t[0] = t[2]
+
+ def p_list_expr_0(self, t):
+ 'list_expr : expr'
+ t[0] = [t[1]]
+
+ def p_list_expr_1(self, t):
+ 'list_expr : list_expr COMMA expr'
+ t[0] = t[1] + [t[3]]
+
+ def p_list_expr_2(self, t):
+ 'list_expr : empty'
+ t[0] = []
+
+ #
+ # Empty production... use in other rules for readability.
+ #
+ def p_empty(self, t):
+ 'empty :'
+ pass
+
+ # Parse error handler. Note that the argument here is the
+ # offending *token*, not a grammar symbol (hence the need to use
+ # t.value)
+ def p_error(self, t):
+ if t:
+ error(t.lexer.lineno, "syntax error at '%s'" % t.value)
+ else:
+ error("unknown syntax error")
+
+ # END OF GRAMMAR RULES
+
+ def updateExportContext(self):
+
+ # create a continuation that allows us to grab the current parser
+ def wrapInstObjParams(*args):
+ return InstObjParams(self, *args)
+ self.exportContext['InstObjParams'] = wrapInstObjParams
+ self.exportContext.update(self.templateMap)
+
+ def defFormat(self, id, params, code, lineno):
+ '''Define a new format'''
+
+ # make sure we haven't already defined this one
+ if id in self.formatMap:
+ error(lineno, 'format %s redefined.' % id)
+
+ # create new object and store in global map
+ self.formatMap[id] = Format(id, params, code)
+
+ def protectNonSubstPercents(self, s):
+ '''Protect any non-dict-substitution '%'s in a format string
+ (i.e. those not followed by '(')'''
+
+ return re.sub(r'%(?!\()', '%%', s)
+
+ def buildOperandNameMap(self, user_dict, lineno):
+ operand_name = {}
+ for op_name, val in user_dict.iteritems():
+
+ # Check if extra attributes have been specified.
+ if len(val) > 9:
+ error(lineno, 'error: too many attributes for operand "%s"' %
+ base_cls_name)
+
+ # Pad val with None in case optional args are missing
+ val += (None, None, None, None)
+ base_cls_name, dflt_ext, reg_spec, flags, sort_pri, \
+ read_code, write_code, read_predicate, write_predicate = val[:9]
+
+ # Canonical flag structure is a triple of lists, where each list
+ # indicates the set of flags implied by this operand always, when
+ # used as a source, and when used as a dest, respectively.
+ # For simplicity this can be initialized using a variety of fairly
+ # obvious shortcuts; we convert these to canonical form here.
+ if not flags:
+ # no flags specified (e.g., 'None')
+ flags = ( [], [], [] )
+ elif isinstance(flags, str):
+ # a single flag: assumed to be unconditional
+ flags = ( [ flags ], [], [] )
+ elif isinstance(flags, list):
+ # a list of flags: also assumed to be unconditional
+ flags = ( flags, [], [] )
+ elif isinstance(flags, tuple):
+ # it's a tuple: it should be a triple,
+ # but each item could be a single string or a list
+ (uncond_flags, src_flags, dest_flags) = flags
+ flags = (makeList(uncond_flags),
+ makeList(src_flags), makeList(dest_flags))
+
+ # Accumulate attributes of new operand class in tmp_dict
+ tmp_dict = {}
+ attrList = ['reg_spec', 'flags', 'sort_pri',
+ 'read_code', 'write_code',
+ 'read_predicate', 'write_predicate']
+ if dflt_ext:
+ dflt_ctype = self.operandTypeMap[dflt_ext]
+ attrList.extend(['dflt_ctype', 'dflt_ext'])
+ # reg_spec is either just a string or a dictionary
+ # (for elems of vector)
+ if isinstance(reg_spec, tuple):
+ (reg_spec, elem_spec) = reg_spec
+ if isinstance(elem_spec, str):
+ attrList.append('elem_spec')
+ else:
+ assert(isinstance(elem_spec, dict))
+ elems = elem_spec
+ attrList.append('elems')
+ for attr in attrList:
+ tmp_dict[attr] = eval(attr)
+ tmp_dict['base_name'] = op_name
+
+ # New class name will be e.g. "IntReg_Ra"
+ cls_name = base_cls_name + '_' + op_name
+ # Evaluate string arg to get class object. Note that the
+ # actual base class for "IntReg" is "IntRegOperand", i.e. we
+ # have to append "Operand".
+ try:
+ base_cls = eval(base_cls_name + 'Operand')
+ except NameError:
+ error(lineno,
+ 'error: unknown operand base class "%s"' % base_cls_name)
+ # The following statement creates a new class called
+ # <cls_name> as a subclass of <base_cls> with the attributes
+ # in tmp_dict, just as if we evaluated a class declaration.
+ operand_name[op_name] = type(cls_name, (base_cls,), tmp_dict)
+
+ self.operandNameMap = operand_name
+
+ # Define operand variables.
+ operands = user_dict.keys()
+ # Add the elems defined in the vector operands and
+ # build a map elem -> vector (used in OperandList)
+ elem_to_vec = {}
+ for op in user_dict.keys():
+ if hasattr(self.operandNameMap[op], 'elems'):
+ for elem in self.operandNameMap[op].elems.keys():
+ operands.append(elem)
+ elem_to_vec[elem] = op
+ self.elemToVector = elem_to_vec
+ extensions = self.operandTypeMap.keys()
+
+ operandsREString = r'''
+ (?<!\w) # neg. lookbehind assertion: prevent partial matches
+ ((%s)(?:_(%s))?) # match: operand with optional '_' then suffix
+ (?!\w) # neg. lookahead assertion: prevent partial matches
+ ''' % ('|'.join(operands), '|'.join(extensions))
+
+ self.operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
+
+ # Same as operandsREString, but extension is mandatory, and only two
+ # groups are returned (base and ext, not full name as above).
+ # Used for subtituting '_' for '.' to make C++ identifiers.
+ operandsWithExtREString = r'(?<!\w)(%s)_(%s)(?!\w)' \
+ % ('|'.join(operands), '|'.join(extensions))
+
+ self.operandsWithExtRE = \
+ re.compile(operandsWithExtREString, re.MULTILINE)
+
+ def substMungedOpNames(self, code):
+ '''Munge operand names in code string to make legal C++
+ variable names. This means getting rid of the type extension
+ if any. Will match base_name attribute of Operand object.)'''
+ return self.operandsWithExtRE.sub(r'\1', code)
+
+ def mungeSnippet(self, s):
+ '''Fix up code snippets for final substitution in templates.'''
+ if isinstance(s, str):
+ return self.substMungedOpNames(substBitOps(s))
+ else:
+ return s
+
+ def open(self, name, bare=False):
+ '''Open the output file for writing and include scary warning.'''
+ filename = os.path.join(self.output_dir, name)
+ f = open(filename, 'w')
+ if f:
+ if not bare:
+ f.write(ISAParser.scaremonger_template % self)
+ return f
+
+ def update(self, file, contents):
+ '''Update the output file only. Scons should handle the case when
+ the new contents are unchanged using its built-in hash feature.'''
+ f = self.open(file)