3 # Copyright (c) 2003-2005 The Regents of The University of Michigan
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
8 # met: redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer;
10 # redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution;
13 # neither the name of the copyright holders nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 # Prepend the directory where the PLY lex & yacc modules are found
38 # to the search path. Assumes we're compiling in a subdirectory
39 # of 'build' in the current tree.
40 sys
.path
[0:0] = [os
.environ
['M5_EXT'] + '/ply']
45 #####################################################################
49 # The PLY lexer module takes two things as input:
50 # - A list of token names (the string list 'tokens')
51 # - A regular expression describing a match for each token. The
52 # regexp for token FOO can be provided in two ways:
53 # - as a string variable named t_FOO
54 # - as the doc string for a function named t_FOO. In this case,
55 # the function is also executed, allowing an action to be
56 # associated with each token match.
58 #####################################################################
60 # Reserved words. These are listed separately as they are matched
61 # using the same regexp as generic IDs, but distinguished in the
62 # t_ID() function. The PLY documentation suggests this approach.
64 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
65 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
66 'OUTPUT', 'SIGNED', 'TEMPLATE'
69 # List of tokens. The lex module requires this.
83 # ( ) [ ] { } < > , ; : :: *
85 # not used any more... commented out to suppress PLY warning
86 # 'LBRACKET', 'RBRACKET',
89 'COMMA', 'SEMI', 'COLON', 'DBLCOLON',
92 # C preprocessor directives
95 # The following are matched but never returned. commented out to
96 # suppress PLY warning
104 # Regular expressions for token matching
107 # not used any more... commented out to suppress PLY warning
120 # Identifiers and reserved words
123 reserved_map
[r
.lower()] = r
127 t
.type = reserved_map
.get(t
.value
,'ID')
132 r
'(0x[\da-fA-F]+)|\d+'
134 t
.value
= int(t
.value
,0)
136 error(t
.lineno
, 'Integer value "%s" too large' % t
.value
)
140 # String literal. Note that these use only single quotes, and
141 # can span multiple lines.
145 t
.value
= t
.value
[1:-1]
146 t
.lineno
+= t
.value
.count('\n')
150 # "Code literal"... like a string literal, but delimiters are
151 # '{{' and '}}' so they get formatted nicely under emacs c-mode
153 r
"(?m)\{\{([^\}]|}(?!\}))+\}\}"
155 t
.value
= t
.value
[2:-2]
156 t
.lineno
+= t
.value
.count('\n')
159 def t_CPPDIRECTIVE(t
):
161 t
.lineno
+= t
.value
.count('\n')
165 r
'^\#\#newfile\s+"[\w/.-]*"'
167 fileNameStack
.append((t
.value
[11:-1], t
.lineno
))
172 (filename
, t
.lineno
) = fileNameStack
.pop()
175 # The functions t_NEWLINE, t_ignore, and t_error are
176 # special for the lex module.
182 t
.lineno
+= t
.value
.count('\n')
188 # Completely ignored characters
193 error(t
.lineno
, "illegal character '%s'" % t
.value
[0])
199 #####################################################################
203 # Every function whose name starts with 'p_' defines a grammar rule.
204 # The rule is encoded in the function's doc string, while the
205 # function body provides the action taken when the rule is matched.
206 # The argument to each function is a list of the values of the
207 # rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
208 # on the RHS. For tokens, the value is copied from the t.value
209 # attribute provided by the lexer. For non-terminals, the value
210 # is assigned by the producing rule; i.e., the job of the grammar
211 # rule function is to set the value for the non-terminal on the LHS
212 # (by assigning to t[0]).
213 #####################################################################
215 # The LHS of the first grammar rule is used as the start symbol
216 # (in this case, 'specification'). Note that this rule enforces
217 # that there will be exactly one namespace declaration, with 0 or more
218 # global defs/decls before and after it. The defs & decls before
219 # the namespace decl will be outside the namespace; those after
220 # will be inside. The decoder function is always inside the namespace.
221 def p_specification(t
):
222 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
225 namespace
= isa_name
+ "Inst"
226 # wrap the decode block as a function definition
227 t
[4].wrap_decode_block('''
228 StaticInstPtr<%(isa_name)s>
229 %(isa_name)s::decodeInst(%(isa_name)s::MachInst machInst)
231 using namespace %(namespace)s;
233 # both the latter output blocks and the decode block are in the namespace
234 namespace_code
= t
[3] + t
[4]
235 # pass it all back to the caller of yacc.parse()
236 t
[0] = (isa_name
, namespace
, global_code
, namespace_code
)
238 # ISA name declaration looks like "namespace <foo>;"
240 'name_decl : NAMESPACE ID SEMI'
243 # 'opt_defs_and_outputs' is a possibly empty sequence of
244 # def and/or output statements.
245 def p_opt_defs_and_outputs_0(t
):
246 'opt_defs_and_outputs : empty'
249 def p_opt_defs_and_outputs_1(t
):
250 'opt_defs_and_outputs : defs_and_outputs'
253 def p_defs_and_outputs_0(t
):
254 'defs_and_outputs : def_or_output'
257 def p_defs_and_outputs_1(t
):
258 'defs_and_outputs : defs_and_outputs def_or_output'
261 # The list of possible definition/output statements.
262 def p_def_or_output(t
):
263 '''def_or_output : def_format
274 # Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
275 # directly to the appropriate output section.
278 # Protect any non-dict-substitution '%'s in a format string
279 # (i.e. those not followed by '(')
280 def protect_non_subst_percents(s
):
281 return re
.sub(r
'%(?!\()', '%%', s
)
283 # Massage output block by substituting in template definitions and bit
284 # operators. We handle '%'s embedded in the string that don't
285 # indicate template substitutions (or CPU-specific symbols, which get
286 # handled in GenCode) by doubling them first so that the format
287 # operation will reduce them back to single '%'s.
288 def process_output(s
):
289 s
= protect_non_subst_percents(s
)
290 # protects cpu-specific symbols too
291 s
= protect_cpu_symbols(s
)
292 return substBitOps(s
% templateMap
)
294 def p_output_header(t
):
295 'output_header : OUTPUT HEADER CODELIT SEMI'
296 t
[0] = GenCode(header_output
= process_output(t
[3]))
298 def p_output_decoder(t
):
299 'output_decoder : OUTPUT DECODER CODELIT SEMI'
300 t
[0] = GenCode(decoder_output
= process_output(t
[3]))
302 def p_output_exec(t
):
303 'output_exec : OUTPUT EXEC CODELIT SEMI'
304 t
[0] = GenCode(exec_output
= process_output(t
[3]))
306 # global let blocks 'let {{...}}' (Python code blocks) are executed
307 # directly when seen. Note that these execute in a special variable
308 # context 'exportContext' to prevent the code from polluting this
309 # script's namespace.
311 'global_let : LET CODELIT SEMI'
312 updateExportContext()
314 exec fixPythonIndentation(t
[2]) in exportContext
315 except Exception, exc
:
317 'error: %s in global let block "%s".' % (exc
, t
[2]))
318 t
[0] = GenCode() # contributes nothing to the output C++ file
320 # Define the mapping from operand type extensions to C++ types and bit
321 # widths (stored in operandTypeMap).
322 def p_def_operand_types(t
):
323 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
324 s
= 'global operandTypeMap; operandTypeMap = {' + t
[3] + '}'
327 except Exception, exc
:
329 'error: %s in def operand_types block "%s".' % (exc
, t
[3]))
330 t
[0] = GenCode() # contributes nothing to the output C++ file
332 # Define the mapping from operand names to operand classes and other
333 # traits. Stored in operandTraitsMap.
334 def p_def_operands(t
):
335 'def_operands : DEF OPERANDS CODELIT SEMI'
336 s
= 'global operandTraitsMap; operandTraitsMap = {' + t
[3] + '}'
339 except Exception, exc
:
341 'error: %s in def operands block "%s".' % (exc
, t
[3]))
342 defineDerivedOperandVars()
343 t
[0] = GenCode() # contributes nothing to the output C++ file
345 # A bitfield definition looks like:
346 # 'def [signed] bitfield <ID> [<first>:<last>]'
347 # This generates a preprocessor macro in the output file.
348 def p_def_bitfield_0(t
):
349 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
350 expr
= 'bits(machInst, %2d, %2d)' % (t
[6], t
[8])
351 if (t
[2] == 'signed'):
352 expr
= 'sext<%d>(%s)' % (t
[6] - t
[8] + 1, expr
)
353 hash_define
= '#undef %s\n#define %s\t%s\n' % (t
[4], t
[4], expr
)
354 t
[0] = GenCode(header_output
= hash_define
)
356 # alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
357 def p_def_bitfield_1(t
):
358 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
359 expr
= 'bits(machInst, %2d, %2d)' % (t
[6], t
[6])
360 if (t
[2] == 'signed'):
361 expr
= 'sext<%d>(%s)' % (1, expr
)
362 hash_define
= '#undef %s\n#define %s\t%s\n' % (t
[4], t
[4], expr
)
363 t
[0] = GenCode(header_output
= hash_define
)
365 def p_opt_signed_0(t
):
366 'opt_signed : SIGNED'
369 def p_opt_signed_1(t
):
373 # Global map variable to hold templates
376 def p_def_template(t
):
377 'def_template : DEF TEMPLATE ID CODELIT SEMI'
378 templateMap
[t
[3]] = Template(t
[4])
381 # An instruction format definition looks like
382 # "def format <fmt>(<params>) {{...}};"
384 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
385 (id, params
, code
) = (t
[3], t
[5], t
[7])
386 defFormat(id, params
, code
, t
.lineno(1))
389 # The formal parameter list for an instruction format is a possibly
390 # empty list of comma-separated parameters.
391 def p_param_list_0(t
):
395 def p_param_list_1(t
):
399 def p_param_list_2(t
):
400 'param_list : param_list COMMA param'
404 # Each formal parameter is either an identifier or an identifier
405 # preceded by an asterisk. As in Python, the latter (if present) gets
406 # a tuple containing all the excess positional arguments, allowing
413 'param : ASTERISK ID'
414 # just concatenate them: '*ID'
417 # End of format definition-related rules.
421 # A decode block looks like:
422 # decode <field1> [, <field2>]* [default <inst>] { ... }
424 def p_decode_block(t
):
425 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
426 default_defaults
= defaultStack
.pop()
428 # use the "default defaults" only if there was no explicit
429 # default statement in decode_stmt_list
430 if not codeObj
.has_decode_default
:
431 codeObj
+= default_defaults
432 codeObj
.wrap_decode_block('switch (%s) {\n' % t
[2], '}\n')
435 # The opt_default statement serves only to push the "default defaults"
436 # onto defaultStack. This value will be used by nested decode blocks,
437 # and used and popped off when the current decode_block is processed
438 # (in p_decode_block() above).
439 def p_opt_default_0(t
):
440 'opt_default : empty'
441 # no default specified: reuse the one currently at the top of the stack
442 defaultStack
.push(defaultStack
.top())
443 # no meaningful value returned
446 def p_opt_default_1(t
):
447 'opt_default : DEFAULT inst'
448 # push the new default
450 codeObj
.wrap_decode_block('\ndefault:\n', 'break;\n')
451 defaultStack
.push(codeObj
)
452 # no meaningful value returned
455 def p_decode_stmt_list_0(t
):
456 'decode_stmt_list : decode_stmt'
459 def p_decode_stmt_list_1(t
):
460 'decode_stmt_list : decode_stmt decode_stmt_list'
461 if (t
[1].has_decode_default
and t
[2].has_decode_default
):
462 error(t
.lineno(1), 'Two default cases in decode block')
466 # Decode statement rules
468 # There are four types of statements allowed in a decode block:
469 # 1. Format blocks 'format <foo> { ... }'
470 # 2. Nested decode blocks
471 # 3. Instruction definitions.
472 # 4. C preprocessor directives.
475 # Preprocessor directives found in a decode statement list are passed
476 # through to the output, replicated to all of the output code
477 # streams. This works well for ifdefs, so we can ifdef out both the
478 # declarations and the decode cases generated by an instruction
479 # definition. Handling them as part of the grammar makes it easy to
480 # keep them in the right place with respect to the code generated by
481 # the other statements.
482 def p_decode_stmt_cpp(t
):
483 'decode_stmt : CPPDIRECTIVE'
484 t
[0] = GenCode(t
[1], t
[1], t
[1], t
[1])
486 # A format block 'format <foo> { ... }' sets the default instruction
487 # format used to handle instruction definitions inside the block.
488 # This format can be overridden by using an explicit format on the
489 # instruction definition or with a nested format block.
490 def p_decode_stmt_format(t
):
491 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
492 # The format will be pushed on the stack when 'push_format_id' is
493 # processed (see below). Once the parser has recognized the full
494 # production (though the right brace), we're done with the format,
495 # so now we can pop it.
499 # This rule exists so we can set the current format (& push the stack)
500 # when we recognize the format name part of the format block.
501 def p_push_format_id(t
):
502 'push_format_id : ID'
504 formatStack
.push(formatMap
[t
[1]])
505 t
[0] = ('', '// format %s' % t
[1])
507 error(t
.lineno(1), 'instruction format "%s" not defined.' % t
[1])
509 # Nested decode block: if the value of the current field matches the
510 # specified constant, do a nested decode on some other field.
511 def p_decode_stmt_decode(t
):
512 'decode_stmt : case_label COLON decode_block'
515 # just wrap the decoding code from the block as a case in the
516 # outer switch statement.
517 codeObj
.wrap_decode_block('\n%s:\n' % label
)
518 codeObj
.has_decode_default
= (label
== 'default')
521 # Instruction definition (finally!).
522 def p_decode_stmt_inst(t
):
523 'decode_stmt : case_label COLON inst SEMI'
526 codeObj
.wrap_decode_block('\n%s:' % label
, 'break;\n')
527 codeObj
.has_decode_default
= (label
== 'default')
530 # The case label is either a list of one or more constants or 'default'
531 def p_case_label_0(t
):
532 'case_label : intlit_list'
533 t
[0] = ': '.join(map(lambda a
: 'case %#x' % a
, t
[1]))
535 def p_case_label_1(t
):
536 'case_label : DEFAULT'
540 # The constant list for a decode case label must be non-empty, but may have
541 # one or more comma-separated integer literals in it.
543 def p_intlit_list_0(t
):
544 'intlit_list : INTLIT'
547 def p_intlit_list_1(t
):
548 'intlit_list : intlit_list COMMA INTLIT'
552 # Define an instruction using the current instruction format (specified
553 # by an enclosing format block).
554 # "<mnemonic>(<args>)"
556 'inst : ID LPAREN arg_list RPAREN'
557 # Pass the ID and arg list to the current format class to deal with.
558 currentFormat
= formatStack
.top()
559 codeObj
= currentFormat
.defineInst(t
[1], t
[3], t
.lineno(1))
560 args
= ','.join(map(str, t
[3]))
561 args
= re
.sub('(?m)^', '//', args
)
562 args
= re
.sub('^//', '', args
)
563 comment
= '\n// %s::%s(%s)\n' % (currentFormat
.id, t
[1], args
)
564 codeObj
.prepend_all(comment
)
567 # Define an instruction using an explicitly specified format:
568 # "<fmt>::<mnemonic>(<args>)"
570 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
572 format
= formatMap
[t
[1]]
574 error(t
.lineno(1), 'instruction format "%s" not defined.' % t
[1])
575 codeObj
= format
.defineInst(t
[3], t
[5], t
.lineno(1))
576 comment
= '\n// %s::%s(%s)\n' % (t
[1], t
[3], t
[5])
577 codeObj
.prepend_all(comment
)
589 'arg_list : arg_list COMMA arg'
601 # Empty production... use in other rules for readability.
607 # Parse error handler. Note that the argument here is the offending
608 # *token*, not a grammar symbol (hence the need to use t.value)
611 error(t
.lineno
, "syntax error at '%s'" % t
.value
)
613 error_bt(0, "unknown syntax error")
615 # END OF GRAMMAR RULES
617 # Now build the parser.
621 #####################################################################
625 #####################################################################
630 # The CpuModel class encapsulates everything we need to know about a
631 # particular CPU model.
634 # List of all CPU models. Accessible as CpuModel.list.
637 # Constructor. Automatically adds models to CpuModel.list.
638 def __init__(self
, name
, filename
, includes
, strings
):
640 self
.filename
= filename
# filename for output exec code
641 self
.includes
= includes
# include files needed in exec file
642 # The 'strings' dict holds all the per-CPU symbols we can
643 # substitute into templates etc.
644 self
.strings
= strings
646 CpuModel
.list.append(self
)
648 # Define CPU models. The following lines should contain the only
649 # CPU-model-specific information in this file. Note that the ISA
650 # description itself should have *no* CPU-model-specific content.
651 CpuModel('SimpleCPU', 'simple_cpu_exec.cc',
652 '#include "cpu/simple/cpu.hh"',
653 { 'CPU_exec_context': 'SimpleCPU' })
654 CpuModel('FastCPU', 'fast_cpu_exec.cc',
655 '#include "cpu/fast/cpu.hh"',
656 { 'CPU_exec_context': 'FastCPU' })
657 CpuModel('FullCPU', 'full_cpu_exec.cc',
658 '#include "encumbered/cpu/full/dyn_inst.hh"',
659 { 'CPU_exec_context': 'DynInst' })
660 CpuModel('AlphaFullCPU', 'alpha_o3_exec.cc',
661 '#include "cpu/o3/alpha_dyn_inst.hh"',
662 { 'CPU_exec_context': 'AlphaDynInst<AlphaSimpleImpl>' })
664 # Expand template with CPU-specific references into a dictionary with
665 # an entry for each CPU model name. The entry key is the model name
666 # and the corresponding value is the template with the CPU-specific
667 # refs substituted for that model.
668 def expand_cpu_symbols_to_dict(template
):
669 # Protect '%'s that don't go with CPU-specific terms
670 t
= re
.sub(r
'%(?!\(CPU_)', '%%', template
)
672 for cpu
in CpuModel
.list:
673 result
[cpu
.name
] = t
% cpu
.strings
676 # *If* the template has CPU-specific references, return a single
677 # string containing a copy of the template for each CPU model with the
678 # corresponding values substituted in. If the template has no
679 # CPU-specific references, it is returned unmodified.
680 def expand_cpu_symbols_to_string(template
):
681 if template
.find('%(CPU_') != -1:
682 return reduce(lambda x
,y
: x
+y
,
683 expand_cpu_symbols_to_dict(template
).values())
687 # Protect CPU-specific references by doubling the corresponding '%'s
688 # (in preparation for substituting a different set of references into
690 def protect_cpu_symbols(template
):
691 return re
.sub(r
'%(?=\(CPU_)', '%%', template
)
696 # The GenCode class encapsulates generated code destined for various
697 # output files. The header_output and decoder_output attributes are
698 # strings containing code destined for decoder.hh and decoder.cc
699 # respectively. The decode_block attribute contains code to be
700 # incorporated in the decode function itself (that will also end up in
701 # decoder.cc). The exec_output attribute is a dictionary with a key
702 # for each CPU model name; the value associated with a particular key
703 # is the string of code for that CPU model's exec.cc file. The
704 # has_decode_default attribute is used in the decode block to allow
705 # explicit default clauses to override default default clauses.
708 # Constructor. At this point we substitute out all CPU-specific
709 # symbols. For the exec output, these go into the per-model
710 # dictionary. For all other output types they get collapsed into
713 header_output
= '', decoder_output
= '', exec_output
= '',
714 decode_block
= '', has_decode_default
= False):
715 self
.header_output
= expand_cpu_symbols_to_string(header_output
)
716 self
.decoder_output
= expand_cpu_symbols_to_string(decoder_output
)
717 if isinstance(exec_output
, dict):
718 self
.exec_output
= exec_output
719 elif isinstance(exec_output
, str):
720 # If the exec_output arg is a single string, we replicate
721 # it for each of the CPU models, substituting and
722 # %(CPU_foo)s params appropriately.
723 self
.exec_output
= expand_cpu_symbols_to_dict(exec_output
)
724 self
.decode_block
= expand_cpu_symbols_to_string(decode_block
)
725 self
.has_decode_default
= has_decode_default
727 # Override '+' operator: generate a new GenCode object that
728 # concatenates all the individual strings in the operands.
729 def __add__(self
, other
):
731 for cpu
in CpuModel
.list:
733 exec_output
[n
] = self
.exec_output
[n
] + other
.exec_output
[n
]
734 return GenCode(self
.header_output
+ other
.header_output
,
735 self
.decoder_output
+ other
.decoder_output
,
737 self
.decode_block
+ other
.decode_block
,
738 self
.has_decode_default
or other
.has_decode_default
)
740 # Prepend a string (typically a comment) to all the strings.
741 def prepend_all(self
, pre
):
742 self
.header_output
= pre
+ self
.header_output
743 self
.decoder_output
= pre
+ self
.decoder_output
744 self
.decode_block
= pre
+ self
.decode_block
745 for cpu
in CpuModel
.list:
746 self
.exec_output
[cpu
.name
] = pre
+ self
.exec_output
[cpu
.name
]
748 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
749 # and 'break;'). Used to build the big nested switch statement.
750 def wrap_decode_block(self
, pre
, post
= ''):
751 self
.decode_block
= pre
+ indent(self
.decode_block
) + post
756 # A format object encapsulates an instruction format. It must provide
757 # a defineInst() method that generates the code for an instruction
761 def __init__(self
, id, params
, code
):
762 # constructor: just save away arguments
765 label
= 'def format ' + id
766 self
.user_code
= compile(fixPythonIndentation(code
), label
, 'exec')
767 param_list
= string
.join(params
, ", ")
768 f
= '''def defInst(_code, _context, %s):
769 my_locals = vars().copy()
770 exec _code in _context, my_locals
771 return my_locals\n''' % param_list
772 c
= compile(f
, label
+ ' wrapper', 'exec')
776 def defineInst(self
, name
, args
, lineno
):
778 updateExportContext()
779 context
.update(exportContext
)
780 context
.update({ 'name': name
, 'Name': string
.capitalize(name
) })
782 vars = self
.func(self
.user_code
, context
, *args
)
783 except Exception, exc
:
784 error(lineno
, 'error defining "%s": %s.' % (name
, exc
))
785 for k
in vars.keys():
786 if k
not in ('header_output', 'decoder_output',
787 'exec_output', 'decode_block'):
789 return GenCode(**vars)
791 # Special null format to catch an implicit-format instruction
792 # definition outside of any format block.
795 self
.defaultInst
= ''
797 def defineInst(self
, name
, args
, lineno
):
799 'instruction definition "%s" with no active format!' % name
)
801 # This dictionary maps format name strings to Format objects.
804 # Define a new format
805 def defFormat(id, params
, code
, lineno
):
806 # make sure we haven't already defined this one
807 if formatMap
.get(id, None) != None:
808 error(lineno
, 'format %s redefined.' % id)
809 # create new object and store in global map
810 formatMap
[id] = Format(id, params
, code
)
814 # Stack: a simple stack object. Used for both formats (formatStack)
815 # and default cases (defaultStack). Simply wraps a list to give more
816 # stack-like syntax and enable initialization with an argument list
817 # (as opposed to an argument that's a list).
820 def __init__(self
, *items
):
821 list.__init
__(self
, items
)
823 def push(self
, item
):
829 # The global format stack.
830 formatStack
= Stack(NoFormat())
832 # The global default case stack.
833 defaultStack
= Stack( None )
839 # Indent every line in string 's' by two spaces
840 # (except preprocessor directives).
841 # Used to make nested code blocks look pretty.
844 return re
.sub(r
'(?m)^(?!#)', ' ', s
)
847 # Munge a somewhat arbitrarily formatted piece of Python code
848 # (e.g. from a format 'let' block) into something whose indentation
849 # will get by the Python parser.
851 # The two keys here are that Python will give a syntax error if
852 # there's any whitespace at the beginning of the first line, and that
853 # all lines at the same lexical nesting level must have identical
854 # indentation. Unfortunately the way code literals work, an entire
855 # let block tends to have some initial indentation. Rather than
856 # trying to figure out what that is and strip it off, we prepend 'if
857 # 1:' to make the let code the nested block inside the if (and have
858 # the parser automatically deal with the indentation for us).
860 # We don't want to do this if (1) the code block is empty or (2) the
861 # first line of the block doesn't have any whitespace at the front.
863 def fixPythonIndentation(s
):
864 # get rid of blank lines first
865 s
= re
.sub(r
'(?m)^\s*\n', '', s
);
866 if (s
!= '' and re
.match(r
'[ \t]', s
[0])):
870 # Error handler. Just call exit. Output formatted to work under
871 # Emacs compile-mode. This function should be called when errors due
872 # to user input are detected (as opposed to parser bugs).
873 def error(lineno
, string
):
875 for (filename
, line
) in fileNameStack
[0:-1]:
876 print spaces
+ "In file included from " + filename
878 # Uncomment the following line to get a Python stack backtrace for
879 # these errors too. Can be handy when trying to debug the parser.
880 # traceback.print_exc()
881 sys
.exit(spaces
+ "%s:%d: %s" % (fileNameStack
[-1][0], lineno
, string
))
883 # Like error(), but include a Python stack backtrace (for processing
884 # Python exceptions). This function should be called for errors that
885 # appear to be bugs in the parser itself.
886 def error_bt(lineno
, string
):
887 traceback
.print_exc()
888 print >> sys
.stderr
, "%s:%d: %s" % (input_filename
, lineno
, string
)
892 #####################################################################
894 # Bitfield Operator Support
896 #####################################################################
898 bitOp1ArgRE
= re
.compile(r
'<\s*(\w+)\s*:\s*>')
900 bitOpWordRE
= re
.compile(r
'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
901 bitOpExprRE
= re
.compile(r
'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
903 def substBitOps(code
):
904 # first convert single-bit selectors to two-index form
905 # i.e., <n> --> <n:n>
906 code
= bitOp1ArgRE
.sub(r
'<\1:\1>', code
)
907 # simple case: selector applied to ID (name)
908 # i.e., foo<a:b> --> bits(foo, a, b)
909 code
= bitOpWordRE
.sub(r
'bits(\1, \2, \3)', code
)
910 # if selector is applied to expression (ending in ')'),
911 # we need to search backward for matching '('
912 match
= bitOpExprRE
.search(code
)
914 exprEnd
= match
.start()
918 if code
[here
] == '(':
920 elif code
[here
] == ')':
924 sys
.exit("Didn't find '('!")
926 newExpr
= r
'bits(%s, %s, %s)' % (code
[exprStart
:exprEnd
+1],
927 match
.group(1), match
.group(2))
928 code
= code
[:exprStart
] + newExpr
+ code
[match
.end():]
929 match
= bitOpExprRE
.search(code
)
936 # Template objects are format strings that allow substitution from
937 # the attribute spaces of other objects (e.g. InstObjParams instances).
940 def __init__(self
, t
):
944 # Start with the template namespace. Make a copy since we're
945 # going to modify it.
946 myDict
= templateMap
.copy()
947 # if the argument is a dictionary, we just use it.
948 if isinstance(d
, dict):
950 # if the argument is an object, we use its attribute map.
951 elif hasattr(d
, '__dict__'):
952 myDict
.update(d
.__dict
__)
954 raise TypeError, "Template.subst() arg must be or have dictionary"
955 # Protect non-Python-dict substitutions (e.g. if there's a printf
956 # in the templated C++ code)
957 template
= protect_non_subst_percents(self
.template
)
958 # CPU-model-specific substitutions are handled later (in GenCode).
959 template
= protect_cpu_symbols(template
)
960 return template
% myDict
962 # Convert to string. This handles the case when a template with a
963 # CPU-specific term gets interpolated into another template or into
966 return expand_cpu_symbols_to_string(self
.template
)
968 #####################################################################
972 # The remaining code is the support for automatically extracting
973 # instruction characteristics from pseudocode.
975 #####################################################################
977 # Force the argument to be a list
978 def makeList(list_or_item
):
981 elif type(list_or_item
) == ListType
:
984 return [ list_or_item
]
986 # generate operandSizeMap based on provided operandTypeMap:
987 # basically generate equiv. C++ type and make is_signed flag
988 def buildOperandSizeMap():
989 global operandSizeMap
991 for ext
in operandTypeMap
.keys():
992 (desc
, size
) = operandTypeMap
[ext
]
993 if desc
== 'signed int':
994 type = 'int%d_t' % size
996 elif desc
== 'unsigned int':
997 type = 'uint%d_t' % size
999 elif desc
== 'float':
1000 is_signed
= 1 # shouldn't really matter
1006 error(0, 'Unrecognized type description "%s" in operandTypeMap')
1007 operandSizeMap
[ext
] = (size
, type, is_signed
)
1010 # Base class for operand traits. An instance of this class (or actually
1011 # a class derived from this one) encapsulates the traits of a particular
1012 # operand type (e.g., "32-bit integer register").
1014 class OperandTraits
:
1015 def __init__(self
, dflt_ext
, reg_spec
, flags
, sort_pri
):
1016 # Force construction of operandSizeMap from operandTypeMap
1017 # if it hasn't happened yet
1018 if not globals().has_key('operandSizeMap'):
1019 buildOperandSizeMap()
1020 self
.dflt_ext
= dflt_ext
1021 (self
.dflt_size
, self
.dflt_type
, self
.dflt_is_signed
) = \
1022 operandSizeMap
[dflt_ext
]
1023 self
.reg_spec
= reg_spec
1024 # Canonical flag structure is a triple of lists, where each list
1025 # indicates the set of flags implied by this operand always, when
1026 # used as a source, and when used as a dest, respectively.
1027 # For simplicity this can be initialized using a variety of fairly
1028 # obvious shortcuts; we convert these to canonical form here.
1030 # no flags specified (e.g., 'None')
1031 self
.flags
= ( [], [], [] )
1032 elif type(flags
) == StringType
:
1033 # a single flag: assumed to be unconditional
1034 self
.flags
= ( [ flags
], [], [] )
1035 elif type(flags
) == ListType
:
1036 # a list of flags: also assumed to be unconditional
1037 self
.flags
= ( flags
, [], [] )
1038 elif type(flags
) == TupleType
:
1039 # it's a tuple: it should be a triple,
1040 # but each item could be a single string or a list
1041 (uncond_flags
, src_flags
, dest_flags
) = flags
1042 self
.flags
= (makeList(uncond_flags
),
1043 makeList(src_flags
), makeList(dest_flags
))
1044 self
.sort_pri
= sort_pri
1052 def isFloatReg(self
):
1058 def isControlReg(self
):
1061 def getFlags(self
, op_desc
):
1062 # note the empty slice '[:]' gives us a copy of self.flags[0]
1063 # instead of a reference to it
1064 my_flags
= self
.flags
[0][:]
1066 my_flags
+= self
.flags
[1]
1068 my_flags
+= self
.flags
[2]
1071 def makeDecl(self
, op_desc
):
1072 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1073 # Note that initializations in the declarations are solely
1074 # to avoid 'uninitialized variable' errors from the compiler.
1075 return type + ' ' + op_desc
.base_name
+ ' = 0;\n';
1077 class IntRegOperandTraits(OperandTraits
):
1084 def makeConstructor(self
, op_desc
):
1087 c
+= '\n\t_srcRegIdx[%d] = %s;' % \
1088 (op_desc
.src_reg_idx
, self
.reg_spec
)
1090 c
+= '\n\t_destRegIdx[%d] = %s;' % \
1091 (op_desc
.dest_reg_idx
, self
.reg_spec
)
1094 def makeRead(self
, op_desc
):
1095 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1096 if (type == 'float' or type == 'double'):
1097 error(0, 'Attempt to read integer register as FP')
1098 if (size
== self
.dflt_size
):
1099 return '%s = xc->readIntReg(this, %d);\n' % \
1100 (op_desc
.base_name
, op_desc
.src_reg_idx
)
1102 return '%s = bits(xc->readIntReg(this, %d), %d, 0);\n' % \
1103 (op_desc
.base_name
, op_desc
.src_reg_idx
, size
-1)
1105 def makeWrite(self
, op_desc
):
1106 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1107 if (type == 'float' or type == 'double'):
1108 error(0, 'Attempt to write integer register as FP')
1109 if (size
!= self
.dflt_size
and is_signed
):
1110 final_val
= 'sext<%d>(%s)' % (size
, op_desc
.base_name
)
1112 final_val
= op_desc
.base_name
1116 xc->setIntReg(this, %d, final_val);\n
1117 if (traceData) { traceData->setData(final_val); }
1118 }''' % (self
.dflt_type
, final_val
, op_desc
.dest_reg_idx
)
1121 class FloatRegOperandTraits(OperandTraits
):
1125 def isFloatReg(self
):
1128 def makeConstructor(self
, op_desc
):
1131 c
+= '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1132 (op_desc
.src_reg_idx
, self
.reg_spec
)
1134 c
+= '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1135 (op_desc
.dest_reg_idx
, self
.reg_spec
)
1138 def makeRead(self
, op_desc
):
1139 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1141 if (type == 'float'):
1142 func
= 'readFloatRegSingle'
1143 elif (type == 'double'):
1144 func
= 'readFloatRegDouble'
1146 func
= 'readFloatRegInt'
1147 if (size
!= self
.dflt_size
):
1149 base
= 'xc->%s(this, %d)' % \
1150 (func
, op_desc
.src_reg_idx
)
1152 return '%s = bits(%s, %d, 0);\n' % \
1153 (op_desc
.base_name
, base
, size
-1)
1155 return '%s = %s;\n' % (op_desc
.base_name
, base
)
1157 def makeWrite(self
, op_desc
):
1158 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1159 final_val
= op_desc
.base_name
1160 if (type == 'float'):
1161 func
= 'setFloatRegSingle'
1162 elif (type == 'double'):
1163 func
= 'setFloatRegDouble'
1165 func
= 'setFloatRegInt'
1166 type = 'uint%d_t' % self
.dflt_size
1167 if (size
!= self
.dflt_size
and is_signed
):
1168 final_val
= 'sext<%d>(%s)' % (size
, op_desc
.base_name
)
1172 xc->%s(this, %d, final_val);\n
1173 if (traceData) { traceData->setData(final_val); }
1174 }''' % (type, final_val
, func
, op_desc
.dest_reg_idx
)
1177 class ControlRegOperandTraits(OperandTraits
):
1181 def isControlReg(self
):
1184 def makeConstructor(self
, op_desc
):
1187 c
+= '\n\t_srcRegIdx[%d] = %s_DepTag;' % \
1188 (op_desc
.src_reg_idx
, self
.reg_spec
)
1190 c
+= '\n\t_destRegIdx[%d] = %s_DepTag;' % \
1191 (op_desc
.dest_reg_idx
, self
.reg_spec
)
1194 def makeRead(self
, op_desc
):
1195 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1197 if (type == 'float' or type == 'double'):
1198 error(0, 'Attempt to read control register as FP')
1199 base
= 'xc->read%s()' % self
.reg_spec
1200 if size
== self
.dflt_size
:
1201 return '%s = %s;\n' % (op_desc
.base_name
, base
)
1203 return '%s = bits(%s, %d, 0);\n' % \
1204 (op_desc
.base_name
, base
, size
-1)
1206 def makeWrite(self
, op_desc
):
1207 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1208 if (type == 'float' or type == 'double'):
1209 error(0, 'Attempt to write control register as FP')
1210 wb
= 'xc->set%s(%s);\n' % (self
.reg_spec
, op_desc
.base_name
)
1211 wb
+= 'if (traceData) { traceData->setData(%s); }' % \
1215 class MemOperandTraits(OperandTraits
):
1219 def makeConstructor(self
, op_desc
):
1222 def makeDecl(self
, op_desc
):
1223 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1224 # Note that initializations in the declarations are solely
1225 # to avoid 'uninitialized variable' errors from the compiler.
1226 # Declare memory data variable.
1227 c
= '%s %s = 0;\n' % (type, op_desc
.base_name
)
1230 def makeRead(self
, op_desc
):
1233 def makeWrite(self
, op_desc
):
1236 # Return the memory access size *in bits*, suitable for
1237 # forming a type via "uint%d_t". Divide by 8 if you want bytes.
1238 def makeAccSize(self
, op_desc
):
1239 (size
, type, is_signed
) = operandSizeMap
[op_desc
.eff_ext
]
1242 class NPCOperandTraits(OperandTraits
):
1243 def makeConstructor(self
, op_desc
):
1246 def makeRead(self
, op_desc
):
1247 return '%s = xc->readPC() + 4;\n' % op_desc
.base_name
1249 def makeWrite(self
, op_desc
):
1250 return 'xc->setNextPC(%s);\n' % op_desc
.base_name
1253 exportContextSymbols
= ('IntRegOperandTraits', 'FloatRegOperandTraits',
1254 'ControlRegOperandTraits', 'MemOperandTraits',
1255 'NPCOperandTraits', 'InstObjParams', 'CodeBlock',
1260 def updateExportContext():
1261 exportContext
.update(exportDict(*exportContextSymbols
))
1262 exportContext
.update(templateMap
)
1265 def exportDict(*symNames
):
1266 return dict([(s
, eval(s
)) for s
in symNames
])
1270 # Define operand variables that get derived from the basic declaration
1271 # of ISA-specific operands in operandTraitsMap. This function must be
1272 # called by the ISA description file explicitly after defining
1273 # operandTraitsMap (in a 'let' block).
1275 def defineDerivedOperandVars():
1277 operands
= operandTraitsMap
.keys()
1279 operandsREString
= (r
'''
1280 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1281 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1282 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1284 % string
.join(operands
, '|'))
1287 operandsRE
= re
.compile(operandsREString
, re
.MULTILINE|re
.VERBOSE
)
1289 # Same as operandsREString, but extension is mandatory, and only two
1290 # groups are returned (base and ext, not full name as above).
1291 # Used for subtituting '_' for '.' to make C++ identifiers.
1292 operandsWithExtREString
= (r
'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1293 % string
.join(operands
, '|'))
1295 global operandsWithExtRE
1296 operandsWithExtRE
= re
.compile(operandsWithExtREString
, re
.MULTILINE
)
1300 # Operand descriptor class. An instance of this class represents
1301 # a specific operand for a code block.
1303 class OperandDescriptor
:
1304 def __init__(self
, full_name
, base_name
, ext
, is_src
, is_dest
):
1305 self
.full_name
= full_name
1306 self
.base_name
= base_name
1308 self
.is_src
= is_src
1309 self
.is_dest
= is_dest
1310 self
.traits
= operandTraitsMap
[base_name
]
1311 # The 'effective extension' (eff_ext) is either the actual
1312 # extension, if one was explicitly provided, or the default.
1316 self
.eff_ext
= self
.traits
.dflt_ext
1318 # note that mem_acc_size is undefined for non-mem operands...
1319 # template must be careful not to use it if it doesn't apply.
1320 if self
.traits
.isMem():
1321 self
.mem_acc_size
= self
.traits
.makeAccSize(self
)
1323 # Finalize additional fields (primarily code fields). This step
1324 # is done separately since some of these fields may depend on the
1325 # register index enumeration that hasn't been performed yet at the
1326 # time of __init__().
1328 self
.flags
= self
.traits
.getFlags(self
)
1329 self
.constructor
= self
.traits
.makeConstructor(self
)
1330 self
.op_decl
= self
.traits
.makeDecl(self
)
1333 self
.op_rd
= self
.traits
.makeRead(self
)
1338 self
.op_wb
= self
.traits
.makeWrite(self
)
1343 class OperandDescriptorList
:
1345 # Find all the operands in the given code block. Returns an operand
1346 # descriptor list (instance of class OperandDescriptorList).
1347 def __init__(self
, code
):
1350 # delete comments so we don't match on reg specifiers inside
1351 code
= commentRE
.sub('', code
)
1352 # search for operands
1355 match
= operandsRE
.search(code
, next_pos
)
1357 # no more matches: we're done
1360 # regexp groups are operand full name, base, and extension
1361 (op_full
, op_base
, op_ext
) = op
1362 # if the token following the operand is an assignment, this is
1363 # a destination (LHS), else it's a source (RHS)
1364 is_dest
= (assignRE
.match(code
, match
.end()) != None)
1365 is_src
= not is_dest
1366 # see if we've already seen this one
1367 op_desc
= self
.find_base(op_base
)
1369 if op_desc
.ext
!= op_ext
:
1370 error(0, 'Inconsistent extensions for operand %s' % \
1372 op_desc
.is_src
= op_desc
.is_src
or is_src
1373 op_desc
.is_dest
= op_desc
.is_dest
or is_dest
1375 # new operand: create new descriptor
1376 op_desc
= OperandDescriptor(op_full
, op_base
, op_ext
,
1378 self
.append(op_desc
)
1379 # start next search after end of current match
1380 next_pos
= match
.end()
1382 # enumerate source & dest register operands... used in building
1385 self
.numDestRegs
= 0
1386 self
.numFPDestRegs
= 0
1387 self
.numIntDestRegs
= 0
1388 self
.memOperand
= None
1389 for op_desc
in self
.items
:
1390 if op_desc
.traits
.isReg():
1392 op_desc
.src_reg_idx
= self
.numSrcRegs
1393 self
.numSrcRegs
+= 1
1395 op_desc
.dest_reg_idx
= self
.numDestRegs
1396 self
.numDestRegs
+= 1
1397 if op_desc
.traits
.isFloatReg():
1398 self
.numFPDestRegs
+= 1
1399 elif op_desc
.traits
.isIntReg():
1400 self
.numIntDestRegs
+= 1
1401 elif op_desc
.traits
.isMem():
1403 error(0, "Code block has more than one memory operand.")
1404 self
.memOperand
= op_desc
1405 # now make a final pass to finalize op_desc fields that may depend
1406 # on the register enumeration
1407 for op_desc
in self
.items
:
1411 return len(self
.items
)
1413 def __getitem__(self
, index
):
1414 return self
.items
[index
]
1416 def append(self
, op_desc
):
1417 self
.items
.append(op_desc
)
1418 self
.bases
[op_desc
.base_name
] = op_desc
1420 def find_base(self
, base_name
):
1421 # like self.bases[base_name], but returns None if not found
1422 # (rather than raising exception)
1423 return self
.bases
.get(base_name
)
1425 # internal helper function for concat[Some]Attr{Strings|Lists}
1426 def __internalConcatAttrs(self
, attr_name
, filter, result
):
1427 for op_desc
in self
.items
:
1429 result
+= getattr(op_desc
, attr_name
)
1432 # return a single string that is the concatenation of the (string)
1433 # values of the specified attribute for all operands
1434 def concatAttrStrings(self
, attr_name
):
1435 return self
.__internalConcatAttrs
(attr_name
, lambda x
: 1, '')
1437 # like concatAttrStrings, but only include the values for the operands
1438 # for which the provided filter function returns true
1439 def concatSomeAttrStrings(self
, filter, attr_name
):
1440 return self
.__internalConcatAttrs
(attr_name
, filter, '')
1442 # return a single list that is the concatenation of the (list)
1443 # values of the specified attribute for all operands
1444 def concatAttrLists(self
, attr_name
):
1445 return self
.__internalConcatAttrs
(attr_name
, lambda x
: 1, [])
1447 # like concatAttrLists, but only include the values for the operands
1448 # for which the provided filter function returns true
1449 def concatSomeAttrLists(self
, filter, attr_name
):
1450 return self
.__internalConcatAttrs
(attr_name
, filter, [])
1453 self
.items
.sort(lambda a
, b
: a
.traits
.sort_pri
- b
.traits
.sort_pri
)
1455 # Regular expression object to match C++ comments
1456 # (used in findOperands())
1457 commentRE
= re
.compile(r
'//.*\n')
1459 # Regular expression object to match assignment statements
1460 # (used in findOperands())
1461 assignRE
= re
.compile(r
'\s*=(?!=)', re
.MULTILINE
)
1463 # Munge operand names in code string to make legal C++ variable names.
1464 # This means getting rid of the type extension if any.
1465 # (Will match base_name attribute of OperandDescriptor object.)
1466 def substMungedOpNames(code
):
1467 return operandsWithExtRE
.sub(r
'\1', code
)
1470 return map(string
.join
, t
)
1472 def makeFlagConstructor(flag_list
):
1473 if len(flag_list
) == 0:
1475 # filter out repeated flags
1478 while i
< len(flag_list
):
1479 if flag_list
[i
] == flag_list
[i
-1]:
1485 code
= pre
+ string
.join(flag_list
, post
+ pre
) + post
1489 def __init__(self
, code
):
1490 self
.orig_code
= code
1491 self
.operands
= OperandDescriptorList(code
)
1492 self
.code
= substMungedOpNames(substBitOps(code
))
1493 self
.constructor
= self
.operands
.concatAttrStrings('constructor')
1494 self
.constructor
+= \
1495 '\n\t_numSrcRegs = %d;' % self
.operands
.numSrcRegs
1496 self
.constructor
+= \
1497 '\n\t_numDestRegs = %d;' % self
.operands
.numDestRegs
1498 self
.constructor
+= \
1499 '\n\t_numFPDestRegs = %d;' % self
.operands
.numFPDestRegs
1500 self
.constructor
+= \
1501 '\n\t_numIntDestRegs = %d;' % self
.operands
.numIntDestRegs
1503 self
.op_decl
= self
.operands
.concatAttrStrings('op_decl')
1505 self
.op_rd
= self
.operands
.concatAttrStrings('op_rd')
1506 self
.op_wb
= self
.operands
.concatAttrStrings('op_wb')
1508 self
.flags
= self
.operands
.concatAttrLists('flags')
1510 if self
.operands
.memOperand
:
1511 self
.mem_acc_size
= self
.operands
.memOperand
.mem_acc_size
1513 # Make a basic guess on the operand class (function unit type).
1514 # These are good enough for most cases, and will be overridden
1516 if 'IsStore' in self
.flags
:
1517 self
.op_class
= 'MemWriteOp'
1518 elif 'IsLoad' in self
.flags
or 'IsPrefetch' in self
.flags
:
1519 self
.op_class
= 'MemReadOp'
1520 elif 'IsFloating' in self
.flags
:
1521 self
.op_class
= 'FloatAddOp'
1523 self
.op_class
= 'IntAluOp'
1525 # Assume all instruction flags are of the form 'IsFoo'
1526 instFlagRE
= re
.compile(r
'Is.*')
1528 # OpClass constants end in 'Op' except No_OpClass
1529 opClassRE
= re
.compile(r
'.*Op|No_OpClass')
1531 class InstObjParams
:
1532 def __init__(self
, mnem
, class_name
, base_class
= '',
1533 code_block
= None, opt_args
= []):
1534 self
.mnemonic
= mnem
1535 self
.class_name
= class_name
1536 self
.base_class
= base_class
1538 for code_attr
in code_block
.__dict
__.keys():
1539 setattr(self
, code_attr
, getattr(code_block
, code_attr
))
1541 self
.constructor
= ''
1543 # Optional arguments are assumed to be either StaticInst flags
1544 # or an OpClass value. To avoid having to import a complete
1545 # list of these values to match against, we do it ad-hoc
1548 if instFlagRE
.match(oa
):
1549 self
.flags
.append(oa
)
1550 elif opClassRE
.match(oa
):
1553 error(0, 'InstObjParams: optional arg "%s" not recognized '
1554 'as StaticInst::Flag or OpClass.' % oa
)
1556 # add flag initialization to contructor here to include
1557 # any flags added via opt_args
1558 self
.constructor
+= makeFlagConstructor(self
.flags
)
1560 # if 'IsFloating' is set, add call to the FP enable check
1561 # function (which should be provided by isa_desc via a declare)
1562 if 'IsFloating' in self
.flags
:
1563 self
.fp_enable_check
= 'fault = checkFpEnableFault(xc);'
1565 self
.fp_enable_check
= ''
1567 #######################
1569 # Output file template
1574 * DO NOT EDIT THIS FILE!!!
1576 * It was automatically generated from the ISA description in %(filename)s
1583 namespace %(namespace)s {
1585 %(namespace_output)s
1587 } // namespace %(namespace)s
1591 # Update the output file only if the new contents are different from
1592 # the current contents. Minimizes the files that need to be rebuilt
1593 # after minor changes.
1594 def update_if_needed(file, contents
):
1596 if os
.access(file, os
.R_OK
):
1598 old_contents
= f
.read()
1600 if contents
!= old_contents
:
1601 print 'Updating', file
1602 os
.remove(file) # in case it's write-protected
1605 print 'File', file, 'is unchanged'
1607 print 'Generating', file
1614 # This regular expression matches include directives
1615 includeRE
= re
.compile(r
'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
1618 def preprocess_isa_desc(isa_desc
):
1619 # Find any includes and include them
1622 m
= includeRE
.search(isa_desc
, pos
)
1625 filename
= m
.group('filename')
1626 print 'Including file "%s"' % filename
1628 isa_desc
= isa_desc
[:m
.start()] + \
1629 '##newfile "' + filename
+ '"\n' + \
1630 open(filename
).read() + \
1634 error(0, 'Error including file "%s"' % (filename
))
1639 # Read in and parse the ISA description.
1641 def parse_isa_desc(isa_desc_file
, output_dir
, include_path
):
1642 # set a global var for the input filename... used in error messages
1643 global input_filename
1644 input_filename
= isa_desc_file
1645 global fileNameStack
1646 fileNameStack
= [(input_filename
, 1)]
1648 # Suck the ISA description file in.
1649 input = open(isa_desc_file
)
1650 isa_desc
= input.read()
1653 # Perform Preprocessing
1654 isa_desc
= preprocess_isa_desc(isa_desc
)
1657 (isa_name
, namespace
, global_code
, namespace_code
) = yacc
.parse(isa_desc
)
1659 # grab the last three path components of isa_desc_file to put in
1661 filename
= '/'.join(isa_desc_file
.split('/')[-3:])
1663 # generate decoder.hh
1664 includes
= '#include "base/bitfield.hh" // for bitfield support'
1665 global_output
= global_code
.header_output
1666 namespace_output
= namespace_code
.header_output
1667 update_if_needed(output_dir
+ '/decoder.hh', file_template
% vars())
1669 # generate decoder.cc
1670 includes
= '#include "%s/decoder.hh"' % include_path
1671 global_output
= global_code
.decoder_output
1672 namespace_output
= namespace_code
.decoder_output
1673 namespace_output
+= namespace_code
.decode_block
1674 update_if_needed(output_dir
+ '/decoder.cc', file_template
% vars())
1676 # generate per-cpu exec files
1677 for cpu
in CpuModel
.list:
1678 includes
= '#include "%s/decoder.hh"\n' % include_path
1679 includes
+= cpu
.includes
1680 global_output
= global_code
.exec_output
[cpu
.name
]
1681 namespace_output
= namespace_code
.exec_output
[cpu
.name
]
1682 update_if_needed(output_dir
+ '/' + cpu
.filename
,
1683 file_template
% vars())
1685 # Called as script: get args from command line.
1686 if __name__
== '__main__':
1687 parse_isa_desc(sys
.argv
[1], sys
.argv
[2], sys
.argv
[3])