Merge zizzer.eecs.umich.edu:/z/m5/Bitkeeper/m5
[gem5.git] / arch / isa_parser.py
1 #! /usr/bin/env python
2
3 # Copyright (c) 2003-2005 The Regents of The University of Michigan
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
8 # met: redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer;
10 # redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution;
13 # neither the name of the copyright holders nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 import os
30 import sys
31 import re
32 import string
33 import traceback
34 # get type names
35 from types import *
36
37 # Prepend the directory where the PLY lex & yacc modules are found
38 # to the search path. Assumes we're compiling in a subdirectory
39 # of 'build' in the current tree.
40 sys.path[0:0] = [os.environ['M5_EXT'] + '/ply']
41
42 import lex
43 import yacc
44
45 #####################################################################
46 #
47 # Lexer
48 #
49 # The PLY lexer module takes two things as input:
50 # - A list of token names (the string list 'tokens')
51 # - A regular expression describing a match for each token. The
52 # regexp for token FOO can be provided in two ways:
53 # - as a string variable named t_FOO
54 # - as the doc string for a function named t_FOO. In this case,
55 # the function is also executed, allowing an action to be
56 # associated with each token match.
57 #
58 #####################################################################
59
60 # Reserved words. These are listed separately as they are matched
61 # using the same regexp as generic IDs, but distinguished in the
62 # t_ID() function. The PLY documentation suggests this approach.
63 reserved = (
64 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
65 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
66 'OUTPUT', 'SIGNED', 'TEMPLATE'
67 )
68
69 # List of tokens. The lex module requires this.
70 tokens = reserved + (
71 # identifier
72 'ID',
73
74 # integer literal
75 'INTLIT',
76
77 # string literal
78 'STRLIT',
79
80 # code literal
81 'CODELIT',
82
83 # ( ) [ ] { } < > , ; : :: *
84 'LPAREN', 'RPAREN',
85 # not used any more... commented out to suppress PLY warning
86 # 'LBRACKET', 'RBRACKET',
87 'LBRACE', 'RBRACE',
88 'LESS', 'GREATER',
89 'COMMA', 'SEMI', 'COLON', 'DBLCOLON',
90 'ASTERISK',
91
92 # C preprocessor directives
93 'CPPDIRECTIVE'
94
95 # The following are matched but never returned. commented out to
96 # suppress PLY warning
97 # newfile directive
98 # 'NEWFILE',
99
100 # endfile directive
101 # 'ENDFILE'
102 )
103
104 # Regular expressions for token matching
105 t_LPAREN = r'\('
106 t_RPAREN = r'\)'
107 # not used any more... commented out to suppress PLY warning
108 # t_LBRACKET = r'\['
109 # t_RBRACKET = r'\]'
110 t_LBRACE = r'\{'
111 t_RBRACE = r'\}'
112 t_LESS = r'\<'
113 t_GREATER = r'\>'
114 t_COMMA = r','
115 t_SEMI = r';'
116 t_COLON = r':'
117 t_DBLCOLON = r'::'
118 t_ASTERISK = r'\*'
119
120 # Identifiers and reserved words
121 reserved_map = { }
122 for r in reserved:
123 reserved_map[r.lower()] = r
124
125 def t_ID(t):
126 r'[A-Za-z_]\w*'
127 t.type = reserved_map.get(t.value,'ID')
128 return t
129
130 # Integer literal
131 def t_INTLIT(t):
132 r'(0x[\da-fA-F]+)|\d+'
133 try:
134 t.value = int(t.value,0)
135 except ValueError:
136 error(t.lineno, 'Integer value "%s" too large' % t.value)
137 t.value = 0
138 return t
139
140 # String literal. Note that these use only single quotes, and
141 # can span multiple lines.
142 def t_STRLIT(t):
143 r"(?m)'([^'])+'"
144 # strip off quotes
145 t.value = t.value[1:-1]
146 t.lineno += t.value.count('\n')
147 return t
148
149
150 # "Code literal"... like a string literal, but delimiters are
151 # '{{' and '}}' so they get formatted nicely under emacs c-mode
152 def t_CODELIT(t):
153 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
154 # strip off {{ & }}
155 t.value = t.value[2:-2]
156 t.lineno += t.value.count('\n')
157 return t
158
159 def t_CPPDIRECTIVE(t):
160 r'^\#[^\#].*\n'
161 t.lineno += t.value.count('\n')
162 return t
163
164 def t_NEWFILE(t):
165 r'^\#\#newfile[ /t]*\"[A-Za-z0-9\\/-_.]*\"'
166 global fileNameStack
167 fileNameStack.append((t.value[11:-1], t.lineno))
168 t.lineno = 0
169
170 def t_ENDFILE(t):
171 r'^\#\#endfile'
172 (filename, t.lineno) = fileNameStack.pop()
173
174 #
175 # The functions t_NEWLINE, t_ignore, and t_error are
176 # special for the lex module.
177 #
178
179 # Newlines
180 def t_NEWLINE(t):
181 r'\n+'
182 t.lineno += t.value.count('\n')
183
184 # Comments
185 def t_comment(t):
186 r'//.*'
187
188 # Completely ignored characters
189 t_ignore = ' \t\x0c'
190
191 # Error handler
192 def t_error(t):
193 error(t.lineno, "illegal character '%s'" % t.value[0])
194 t.skip(1)
195
196 # Build the lexer
197 lex.lex()
198
199 #####################################################################
200 #
201 # Parser
202 #
203 # Every function whose name starts with 'p_' defines a grammar rule.
204 # The rule is encoded in the function's doc string, while the
205 # function body provides the action taken when the rule is matched.
206 # The argument to each function is a list of the values of the
207 # rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
208 # on the RHS. For tokens, the value is copied from the t.value
209 # attribute provided by the lexer. For non-terminals, the value
210 # is assigned by the producing rule; i.e., the job of the grammar
211 # rule function is to set the value for the non-terminal on the LHS
212 # (by assigning to t[0]).
213 #####################################################################
214
215 # The LHS of the first grammar rule is used as the start symbol
216 # (in this case, 'specification'). Note that this rule enforces
217 # that there will be exactly one namespace declaration, with 0 or more
218 # global defs/decls before and after it. The defs & decls before
219 # the namespace decl will be outside the namespace; those after
220 # will be inside. The decoder function is always inside the namespace.
221 def p_specification(t):
222 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
223 global_code = t[1]
224 isa_name = t[2]
225 namespace = isa_name + "Inst"
226 # wrap the decode block as a function definition
227 t[4].wrap_decode_block('''
228 StaticInstPtr<%(isa_name)s>
229 %(isa_name)s::decodeInst(%(isa_name)s::MachInst machInst)
230 {
231 using namespace %(namespace)s;
232 ''' % vars(), '}')
233 # both the latter output blocks and the decode block are in the namespace
234 namespace_code = t[3] + t[4]
235 # pass it all back to the caller of yacc.parse()
236 t[0] = (isa_name, namespace, global_code, namespace_code)
237
238 # ISA name declaration looks like "namespace <foo>;"
239 def p_name_decl(t):
240 'name_decl : NAMESPACE ID SEMI'
241 t[0] = t[2]
242
243 # 'opt_defs_and_outputs' is a possibly empty sequence of
244 # def and/or output statements.
245 def p_opt_defs_and_outputs_0(t):
246 'opt_defs_and_outputs : empty'
247 t[0] = GenCode()
248
249 def p_opt_defs_and_outputs_1(t):
250 'opt_defs_and_outputs : defs_and_outputs'
251 t[0] = t[1]
252
253 def p_defs_and_outputs_0(t):
254 'defs_and_outputs : def_or_output'
255 t[0] = t[1]
256
257 def p_defs_and_outputs_1(t):
258 'defs_and_outputs : defs_and_outputs def_or_output'
259 t[0] = t[1] + t[2]
260
261 # The list of possible definition/output statements.
262 def p_def_or_output(t):
263 '''def_or_output : def_format
264 | def_bitfield
265 | def_template
266 | def_operand_types
267 | def_operands
268 | output_header
269 | output_decoder
270 | output_exec
271 | global_let'''
272 t[0] = t[1]
273
274 # Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
275 # directly to the appropriate output section.
276
277
278 # Protect any non-dict-substitution '%'s in a format string
279 # (i.e. those not followed by '(')
280 def protect_non_subst_percents(s):
281 return re.sub(r'%(?!\()', '%%', s)
282
283 # Massage output block by substituting in template definitions and bit
284 # operators. We handle '%'s embedded in the string that don't
285 # indicate template substitutions (or CPU-specific symbols, which get
286 # handled in GenCode) by doubling them first so that the format
287 # operation will reduce them back to single '%'s.
288 def process_output(s):
289 s = protect_non_subst_percents(s)
290 # protects cpu-specific symbols too
291 s = protect_cpu_symbols(s)
292 return substBitOps(s % templateMap)
293
294 def p_output_header(t):
295 'output_header : OUTPUT HEADER CODELIT SEMI'
296 t[0] = GenCode(header_output = process_output(t[3]))
297
298 def p_output_decoder(t):
299 'output_decoder : OUTPUT DECODER CODELIT SEMI'
300 t[0] = GenCode(decoder_output = process_output(t[3]))
301
302 def p_output_exec(t):
303 'output_exec : OUTPUT EXEC CODELIT SEMI'
304 t[0] = GenCode(exec_output = process_output(t[3]))
305
306 # global let blocks 'let {{...}}' (Python code blocks) are executed
307 # directly when seen. Note that these execute in a special variable
308 # context 'exportContext' to prevent the code from polluting this
309 # script's namespace.
310 def p_global_let(t):
311 'global_let : LET CODELIT SEMI'
312 updateExportContext()
313 try:
314 exec fixPythonIndentation(t[2]) in exportContext
315 except Exception, exc:
316 error(t.lineno(1),
317 'error: %s in global let block "%s".' % (exc, t[2]))
318 t[0] = GenCode() # contributes nothing to the output C++ file
319
320 # Define the mapping from operand type extensions to C++ types and bit
321 # widths (stored in operandTypeMap).
322 def p_def_operand_types(t):
323 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
324 s = 'global operandTypeMap; operandTypeMap = {' + t[3] + '}'
325 try:
326 exec s
327 except Exception, exc:
328 error(t.lineno(1),
329 'error: %s in def operand_types block "%s".' % (exc, t[3]))
330 t[0] = GenCode() # contributes nothing to the output C++ file
331
332 # Define the mapping from operand names to operand classes and other
333 # traits. Stored in operandTraitsMap.
334 def p_def_operands(t):
335 'def_operands : DEF OPERANDS CODELIT SEMI'
336 s = 'global operandTraitsMap; operandTraitsMap = {' + t[3] + '}'
337 try:
338 exec s
339 except Exception, exc:
340 error(t.lineno(1),
341 'error: %s in def operands block "%s".' % (exc, t[3]))
342 defineDerivedOperandVars()
343 t[0] = GenCode() # contributes nothing to the output C++ file
344
345 # A bitfield definition looks like:
346 # 'def [signed] bitfield <ID> [<first>:<last>]'
347 # This generates a preprocessor macro in the output file.
348 def p_def_bitfield_0(t):
349 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
350 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
351 if (t[2] == 'signed'):
352 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
353 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
354 t[0] = GenCode(header_output = hash_define)
355
356 # alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
357 def p_def_bitfield_1(t):
358 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
359 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
360 if (t[2] == 'signed'):
361 expr = 'sext<%d>(%s)' % (1, expr)
362 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
363 t[0] = GenCode(header_output = hash_define)
364
365 def p_opt_signed_0(t):
366 'opt_signed : SIGNED'
367 t[0] = t[1]
368
369 def p_opt_signed_1(t):
370 'opt_signed : empty'
371 t[0] = ''
372
373 # Global map variable to hold templates
374 templateMap = {}
375
376 def p_def_template(t):
377 'def_template : DEF TEMPLATE ID CODELIT SEMI'
378 templateMap[t[3]] = Template(t[4])
379 t[0] = GenCode()
380
381 # An instruction format definition looks like
382 # "def format <fmt>(<params>) {{...}};"
383 def p_def_format(t):
384 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
385 (id, params, code) = (t[3], t[5], t[7])
386 defFormat(id, params, code, t.lineno(1))
387 t[0] = GenCode()
388
389 # The formal parameter list for an instruction format is a possibly
390 # empty list of comma-separated parameters.
391 def p_param_list_0(t):
392 'param_list : empty'
393 t[0] = [ ]
394
395 def p_param_list_1(t):
396 'param_list : param'
397 t[0] = [t[1]]
398
399 def p_param_list_2(t):
400 'param_list : param_list COMMA param'
401 t[0] = t[1]
402 t[0].append(t[3])
403
404 # Each formal parameter is either an identifier or an identifier
405 # preceded by an asterisk. As in Python, the latter (if present) gets
406 # a tuple containing all the excess positional arguments, allowing
407 # varargs functions.
408 def p_param_0(t):
409 'param : ID'
410 t[0] = t[1]
411
412 def p_param_1(t):
413 'param : ASTERISK ID'
414 # just concatenate them: '*ID'
415 t[0] = t[1] + t[2]
416
417 # End of format definition-related rules.
418 ##############
419
420 #
421 # A decode block looks like:
422 # decode <field1> [, <field2>]* [default <inst>] { ... }
423 #
424 def p_decode_block(t):
425 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
426 default_defaults = defaultStack.pop()
427 codeObj = t[5]
428 # use the "default defaults" only if there was no explicit
429 # default statement in decode_stmt_list
430 if not codeObj.has_decode_default:
431 codeObj += default_defaults
432 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
433 t[0] = codeObj
434
435 # The opt_default statement serves only to push the "default defaults"
436 # onto defaultStack. This value will be used by nested decode blocks,
437 # and used and popped off when the current decode_block is processed
438 # (in p_decode_block() above).
439 def p_opt_default_0(t):
440 'opt_default : empty'
441 # no default specified: reuse the one currently at the top of the stack
442 defaultStack.push(defaultStack.top())
443 # no meaningful value returned
444 t[0] = None
445
446 def p_opt_default_1(t):
447 'opt_default : DEFAULT inst'
448 # push the new default
449 codeObj = t[2]
450 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
451 defaultStack.push(codeObj)
452 # no meaningful value returned
453 t[0] = None
454
455 def p_decode_stmt_list_0(t):
456 'decode_stmt_list : decode_stmt'
457 t[0] = t[1]
458
459 def p_decode_stmt_list_1(t):
460 'decode_stmt_list : decode_stmt decode_stmt_list'
461 if (t[1].has_decode_default and t[2].has_decode_default):
462 error(t.lineno(1), 'Two default cases in decode block')
463 t[0] = t[1] + t[2]
464
465 #
466 # Decode statement rules
467 #
468 # There are four types of statements allowed in a decode block:
469 # 1. Format blocks 'format <foo> { ... }'
470 # 2. Nested decode blocks
471 # 3. Instruction definitions.
472 # 4. C preprocessor directives.
473
474
475 # Preprocessor directives found in a decode statement list are passed
476 # through to the output, replicated to all of the output code
477 # streams. This works well for ifdefs, so we can ifdef out both the
478 # declarations and the decode cases generated by an instruction
479 # definition. Handling them as part of the grammar makes it easy to
480 # keep them in the right place with respect to the code generated by
481 # the other statements.
482 def p_decode_stmt_cpp(t):
483 'decode_stmt : CPPDIRECTIVE'
484 t[0] = GenCode(t[1], t[1], t[1], t[1])
485
486 # A format block 'format <foo> { ... }' sets the default instruction
487 # format used to handle instruction definitions inside the block.
488 # This format can be overridden by using an explicit format on the
489 # instruction definition or with a nested format block.
490 def p_decode_stmt_format(t):
491 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
492 # The format will be pushed on the stack when 'push_format_id' is
493 # processed (see below). Once the parser has recognized the full
494 # production (though the right brace), we're done with the format,
495 # so now we can pop it.
496 formatStack.pop()
497 t[0] = t[4]
498
499 # This rule exists so we can set the current format (& push the stack)
500 # when we recognize the format name part of the format block.
501 def p_push_format_id(t):
502 'push_format_id : ID'
503 try:
504 formatStack.push(formatMap[t[1]])
505 t[0] = ('', '// format %s' % t[1])
506 except KeyError:
507 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
508
509 # Nested decode block: if the value of the current field matches the
510 # specified constant, do a nested decode on some other field.
511 def p_decode_stmt_decode(t):
512 'decode_stmt : case_label COLON decode_block'
513 label = t[1]
514 codeObj = t[3]
515 # just wrap the decoding code from the block as a case in the
516 # outer switch statement.
517 codeObj.wrap_decode_block('\n%s:\n' % label)
518 codeObj.has_decode_default = (label == 'default')
519 t[0] = codeObj
520
521 # Instruction definition (finally!).
522 def p_decode_stmt_inst(t):
523 'decode_stmt : case_label COLON inst SEMI'
524 label = t[1]
525 codeObj = t[3]
526 codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
527 codeObj.has_decode_default = (label == 'default')
528 t[0] = codeObj
529
530 # The case label is either a list of one or more constants or 'default'
531 def p_case_label_0(t):
532 'case_label : intlit_list'
533 t[0] = ': '.join(map(lambda a: 'case %#x' % a, t[1]))
534
535 def p_case_label_1(t):
536 'case_label : DEFAULT'
537 t[0] = 'default'
538
539 #
540 # The constant list for a decode case label must be non-empty, but may have
541 # one or more comma-separated integer literals in it.
542 #
543 def p_intlit_list_0(t):
544 'intlit_list : INTLIT'
545 t[0] = [t[1]]
546
547 def p_intlit_list_1(t):
548 'intlit_list : intlit_list COMMA INTLIT'
549 t[0] = t[1]
550 t[0].append(t[3])
551
552 # Define an instruction using the current instruction format (specified
553 # by an enclosing format block).
554 # "<mnemonic>(<args>)"
555 def p_inst_0(t):
556 'inst : ID LPAREN arg_list RPAREN'
557 # Pass the ID and arg list to the current format class to deal with.
558 currentFormat = formatStack.top()
559 codeObj = currentFormat.defineInst(t[1], t[3], t.lineno(1))
560 args = ','.join(map(str, t[3]))
561 args = re.sub('(?m)^', '//', args)
562 args = re.sub('^//', '', args)
563 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
564 codeObj.prepend_all(comment)
565 t[0] = codeObj
566
567 # Define an instruction using an explicitly specified format:
568 # "<fmt>::<mnemonic>(<args>)"
569 def p_inst_1(t):
570 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
571 try:
572 format = formatMap[t[1]]
573 except KeyError:
574 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
575 codeObj = format.defineInst(t[3], t[5], t.lineno(1))
576 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
577 codeObj.prepend_all(comment)
578 t[0] = codeObj
579
580 def p_arg_list_0(t):
581 'arg_list : empty'
582 t[0] = [ ]
583
584 def p_arg_list_1(t):
585 'arg_list : arg'
586 t[0] = [t[1]]
587
588 def p_arg_list_2(t):
589 'arg_list : arg_list COMMA arg'
590 t[0] = t[1]
591 t[0].append(t[3])
592
593 def p_arg(t):
594 '''arg : ID
595 | INTLIT
596 | STRLIT
597 | CODELIT'''
598 t[0] = t[1]
599
600 #
601 # Empty production... use in other rules for readability.
602 #
603 def p_empty(t):
604 'empty :'
605 pass
606
607 # Parse error handler. Note that the argument here is the offending
608 # *token*, not a grammar symbol (hence the need to use t.value)
609 def p_error(t):
610 if t:
611 error(t.lineno, "syntax error at '%s'" % t.value)
612 else:
613 error_bt(0, "unknown syntax error")
614
615 # END OF GRAMMAR RULES
616 #
617 # Now build the parser.
618 yacc.yacc()
619
620
621 #####################################################################
622 #
623 # Support Classes
624 #
625 #####################################################################
626
627 ################
628 # CpuModel class
629 #
630 # The CpuModel class encapsulates everything we need to know about a
631 # particular CPU model.
632
633 class CpuModel:
634 # List of all CPU models. Accessible as CpuModel.list.
635 list = []
636
637 # Constructor. Automatically adds models to CpuModel.list.
638 def __init__(self, name, filename, includes, strings):
639 self.name = name
640 self.filename = filename # filename for output exec code
641 self.includes = includes # include files needed in exec file
642 # The 'strings' dict holds all the per-CPU symbols we can
643 # substitute into templates etc.
644 self.strings = strings
645 # Add self to list.
646 CpuModel.list.append(self)
647
648 # Define CPU models. The following lines should contain the only
649 # CPU-model-specific information in this file. Note that the ISA
650 # description itself should have *no* CPU-model-specific content.
651 CpuModel('SimpleCPU', 'simple_cpu_exec.cc',
652 '#include "cpu/simple/cpu.hh"',
653 { 'CPU_exec_context': 'SimpleCPU' })
654 CpuModel('FastCPU', 'fast_cpu_exec.cc',
655 '#include "cpu/fast/cpu.hh"',
656 { 'CPU_exec_context': 'FastCPU' })
657 CpuModel('FullCPU', 'full_cpu_exec.cc',
658 '#include "encumbered/cpu/full/dyn_inst.hh"',
659 { 'CPU_exec_context': 'DynInst' })
660 CpuModel('AlphaFullCPU', 'alpha_o3_exec.cc',
661 '#include "cpu/o3/alpha_dyn_inst.hh"',
662 { 'CPU_exec_context': 'AlphaDynInst<AlphaSimpleImpl>' })
663
664 # Expand template with CPU-specific references into a dictionary with
665 # an entry for each CPU model name. The entry key is the model name
666 # and the corresponding value is the template with the CPU-specific
667 # refs substituted for that model.
668 def expand_cpu_symbols_to_dict(template):
669 # Protect '%'s that don't go with CPU-specific terms
670 t = re.sub(r'%(?!\(CPU_)', '%%', template)
671 result = {}
672 for cpu in CpuModel.list:
673 result[cpu.name] = t % cpu.strings
674 return result
675
676 # *If* the template has CPU-specific references, return a single
677 # string containing a copy of the template for each CPU model with the
678 # corresponding values substituted in. If the template has no
679 # CPU-specific references, it is returned unmodified.
680 def expand_cpu_symbols_to_string(template):
681 if template.find('%(CPU_') != -1:
682 return reduce(lambda x,y: x+y,
683 expand_cpu_symbols_to_dict(template).values())
684 else:
685 return template
686
687 # Protect CPU-specific references by doubling the corresponding '%'s
688 # (in preparation for substituting a different set of references into
689 # the template).
690 def protect_cpu_symbols(template):
691 return re.sub(r'%(?=\(CPU_)', '%%', template)
692
693 ###############
694 # GenCode class
695 #
696 # The GenCode class encapsulates generated code destined for various
697 # output files. The header_output and decoder_output attributes are
698 # strings containing code destined for decoder.hh and decoder.cc
699 # respectively. The decode_block attribute contains code to be
700 # incorporated in the decode function itself (that will also end up in
701 # decoder.cc). The exec_output attribute is a dictionary with a key
702 # for each CPU model name; the value associated with a particular key
703 # is the string of code for that CPU model's exec.cc file. The
704 # has_decode_default attribute is used in the decode block to allow
705 # explicit default clauses to override default default clauses.
706
707 class GenCode:
708 # Constructor. At this point we substitute out all CPU-specific
709 # symbols. For the exec output, these go into the per-model
710 # dictionary. For all other output types they get collapsed into
711 # a single string.
712 def __init__(self,
713 header_output = '', decoder_output = '', exec_output = '',
714 decode_block = '', has_decode_default = False):
715 self.header_output = expand_cpu_symbols_to_string(header_output)
716 self.decoder_output = expand_cpu_symbols_to_string(decoder_output)
717 if isinstance(exec_output, dict):
718 self.exec_output = exec_output
719 elif isinstance(exec_output, str):
720 # If the exec_output arg is a single string, we replicate
721 # it for each of the CPU models, substituting and
722 # %(CPU_foo)s params appropriately.
723 self.exec_output = expand_cpu_symbols_to_dict(exec_output)
724 self.decode_block = expand_cpu_symbols_to_string(decode_block)
725 self.has_decode_default = has_decode_default
726
727 # Override '+' operator: generate a new GenCode object that
728 # concatenates all the individual strings in the operands.
729 def __add__(self, other):
730 exec_output = {}
731 for cpu in CpuModel.list:
732 n = cpu.name
733 exec_output[n] = self.exec_output[n] + other.exec_output[n]
734 return GenCode(self.header_output + other.header_output,
735 self.decoder_output + other.decoder_output,
736 exec_output,
737 self.decode_block + other.decode_block,
738 self.has_decode_default or other.has_decode_default)
739
740 # Prepend a string (typically a comment) to all the strings.
741 def prepend_all(self, pre):
742 self.header_output = pre + self.header_output
743 self.decoder_output = pre + self.decoder_output
744 self.decode_block = pre + self.decode_block
745 for cpu in CpuModel.list:
746 self.exec_output[cpu.name] = pre + self.exec_output[cpu.name]
747
748 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
749 # and 'break;'). Used to build the big nested switch statement.
750 def wrap_decode_block(self, pre, post = ''):
751 self.decode_block = pre + indent(self.decode_block) + post
752
753 ################
754 # Format object.
755 #
756 # A format object encapsulates an instruction format. It must provide
757 # a defineInst() method that generates the code for an instruction
758 # definition.
759
760 class Format:
761 def __init__(self, id, params, code):
762 # constructor: just save away arguments
763 self.id = id
764 self.params = params
765 label = 'def format ' + id
766 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
767 param_list = string.join(params, ", ")
768 f = '''def defInst(_code, _context, %s):
769 my_locals = vars().copy()
770 exec _code in _context, my_locals
771 return my_locals\n''' % param_list
772 c = compile(f, label + ' wrapper', 'exec')
773 exec c
774 self.func = defInst
775
776 def defineInst(self, name, args, lineno):
777 context = {}
778 updateExportContext()
779 context.update(exportContext)
780 context.update({ 'name': name, 'Name': string.capitalize(name) })
781 try:
782 vars = self.func(self.user_code, context, *args)
783 except Exception, exc:
784 error(lineno, 'error defining "%s": %s.' % (name, exc))
785 for k in vars.keys():
786 if k not in ('header_output', 'decoder_output',
787 'exec_output', 'decode_block'):
788 del vars[k]
789 return GenCode(**vars)
790
791 # Special null format to catch an implicit-format instruction
792 # definition outside of any format block.
793 class NoFormat:
794 def __init__(self):
795 self.defaultInst = ''
796
797 def defineInst(self, name, args, lineno):
798 error(lineno,
799 'instruction definition "%s" with no active format!' % name)
800
801 # This dictionary maps format name strings to Format objects.
802 formatMap = {}
803
804 # Define a new format
805 def defFormat(id, params, code, lineno):
806 # make sure we haven't already defined this one
807 if formatMap.get(id, None) != None:
808 error(lineno, 'format %s redefined.' % id)
809 # create new object and store in global map
810 formatMap[id] = Format(id, params, code)
811
812
813 ##############
814 # Stack: a simple stack object. Used for both formats (formatStack)
815 # and default cases (defaultStack). Simply wraps a list to give more
816 # stack-like syntax and enable initialization with an argument list
817 # (as opposed to an argument that's a list).
818
819 class Stack(list):
820 def __init__(self, *items):
821 list.__init__(self, items)
822
823 def push(self, item):
824 self.append(item);
825
826 def top(self):
827 return self[-1]
828
829 # The global format stack.
830 formatStack = Stack(NoFormat())
831
832 # The global default case stack.
833 defaultStack = Stack( None )
834
835 ###################
836 # Utility functions
837
838 #
839 # Indent every line in string 's' by two spaces
840 # (except preprocessor directives).
841 # Used to make nested code blocks look pretty.
842 #
843 def indent(s):
844 return re.sub(r'(?m)^(?!\#)', ' ', s)
845
846 #
847 # Munge a somewhat arbitrarily formatted piece of Python code
848 # (e.g. from a format 'let' block) into something whose indentation
849 # will get by the Python parser.
850 #
851 # The two keys here are that Python will give a syntax error if
852 # there's any whitespace at the beginning of the first line, and that
853 # all lines at the same lexical nesting level must have identical
854 # indentation. Unfortunately the way code literals work, an entire
855 # let block tends to have some initial indentation. Rather than
856 # trying to figure out what that is and strip it off, we prepend 'if
857 # 1:' to make the let code the nested block inside the if (and have
858 # the parser automatically deal with the indentation for us).
859 #
860 # We don't want to do this if (1) the code block is empty or (2) the
861 # first line of the block doesn't have any whitespace at the front.
862
863 def fixPythonIndentation(s):
864 # get rid of blank lines first
865 s = re.sub(r'(?m)^\s*\n', '', s);
866 if (s != '' and re.match(r'[ \t]', s[0])):
867 s = 'if 1:\n' + s
868 return s
869
870 # Error handler. Just call exit. Output formatted to work under
871 # Emacs compile-mode.
872 def error(lineno, string):
873 global fileNameStack
874 spaces = ""
875 for (filename, line) in fileNameStack[0:-1]:
876 print spaces + "In file included from " + filename
877 spaces += " "
878 sys.exit(spaces + "%s:%d: %s" % (fileNameStack[-1][0], lineno, string))
879
880 # Like error(), but include a Python stack backtrace (for processing
881 # Python exceptions).
882 def error_bt(lineno, string):
883 traceback.print_exc()
884 print >> sys.stderr, "%s:%d: %s" % (input_filename, lineno, string)
885 sys.exit(1)
886
887
888 #####################################################################
889 #
890 # Bitfield Operator Support
891 #
892 #####################################################################
893
894 bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
895
896 bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
897 bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
898
899 def substBitOps(code):
900 # first convert single-bit selectors to two-index form
901 # i.e., <n> --> <n:n>
902 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
903 # simple case: selector applied to ID (name)
904 # i.e., foo<a:b> --> bits(foo, a, b)
905 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
906 # if selector is applied to expression (ending in ')'),
907 # we need to search backward for matching '('
908 match = bitOpExprRE.search(code)
909 while match:
910 exprEnd = match.start()
911 here = exprEnd - 1
912 nestLevel = 1
913 while nestLevel > 0:
914 if code[here] == '(':
915 nestLevel -= 1
916 elif code[here] == ')':
917 nestLevel += 1
918 here -= 1
919 if here < 0:
920 sys.exit("Didn't find '('!")
921 exprStart = here+1
922 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
923 match.group(1), match.group(2))
924 code = code[:exprStart] + newExpr + code[match.end():]
925 match = bitOpExprRE.search(code)
926 return code
927
928
929 ####################
930 # Template objects.
931 #
932 # Template objects are format strings that allow substitution from
933 # the attribute spaces of other objects (e.g. InstObjParams instances).
934
935 class Template:
936 def __init__(self, t):
937 self.template = t
938
939 def subst(self, d):
940 # Start with the template namespace. Make a copy since we're
941 # going to modify it.
942 myDict = templateMap.copy()
943 # if the argument is a dictionary, we just use it.
944 if isinstance(d, dict):
945 myDict.update(d)
946 # if the argument is an object, we use its attribute map.
947 elif hasattr(d, '__dict__'):
948 myDict.update(d.__dict__)
949 else:
950 raise TypeError, "Template.subst() arg must be or have dictionary"
951 # Protect non-Python-dict substitutions (e.g. if there's a printf
952 # in the templated C++ code)
953 template = protect_non_subst_percents(self.template)
954 # CPU-model-specific substitutions are handled later (in GenCode).
955 template = protect_cpu_symbols(template)
956 return template % myDict
957
958 # Convert to string. This handles the case when a template with a
959 # CPU-specific term gets interpolated into another template or into
960 # an output block.
961 def __str__(self):
962 return expand_cpu_symbols_to_string(self.template)
963
964 #####################################################################
965 #
966 # Code Parser
967 #
968 # The remaining code is the support for automatically extracting
969 # instruction characteristics from pseudocode.
970 #
971 #####################################################################
972
973 # Force the argument to be a list
974 def makeList(list_or_item):
975 if not list_or_item:
976 return []
977 elif type(list_or_item) == ListType:
978 return list_or_item
979 else:
980 return [ list_or_item ]
981
982 # generate operandSizeMap based on provided operandTypeMap:
983 # basically generate equiv. C++ type and make is_signed flag
984 def buildOperandSizeMap():
985 global operandSizeMap
986 operandSizeMap = {}
987 for ext in operandTypeMap.keys():
988 (desc, size) = operandTypeMap[ext]
989 if desc == 'signed int':
990 type = 'int%d_t' % size
991 is_signed = 1
992 elif desc == 'unsigned int':
993 type = 'uint%d_t' % size
994 is_signed = 0
995 elif desc == 'float':
996 is_signed = 1 # shouldn't really matter
997 if size == 32:
998 type = 'float'
999 elif size == 64:
1000 type = 'double'
1001 if type == '':
1002 error(0, 'Unrecognized type description "%s" in operandTypeMap')
1003 operandSizeMap[ext] = (size, type, is_signed)
1004
1005 #
1006 # Base class for operand traits. An instance of this class (or actually
1007 # a class derived from this one) encapsulates the traits of a particular
1008 # operand type (e.g., "32-bit integer register").
1009 #
1010 class OperandTraits:
1011 def __init__(self, dflt_ext, reg_spec, flags, sort_pri):
1012 # Force construction of operandSizeMap from operandTypeMap
1013 # if it hasn't happened yet
1014 if not globals().has_key('operandSizeMap'):
1015 buildOperandSizeMap()
1016 self.dflt_ext = dflt_ext
1017 (self.dflt_size, self.dflt_type, self.dflt_is_signed) = \
1018 operandSizeMap[dflt_ext]
1019 self.reg_spec = reg_spec
1020 # Canonical flag structure is a triple of lists, where each list
1021 # indicates the set of flags implied by this operand always, when
1022 # used as a source, and when used as a dest, respectively.
1023 # For simplicity this can be initialized using a variety of fairly
1024 # obvious shortcuts; we convert these to canonical form here.
1025 if not flags:
1026 # no flags specified (e.g., 'None')
1027 self.flags = ( [], [], [] )
1028 elif type(flags) == StringType:
1029 # a single flag: assumed to be unconditional
1030 self.flags = ( [ flags ], [], [] )
1031 elif type(flags) == ListType:
1032 # a list of flags: also assumed to be unconditional
1033 self.flags = ( flags, [], [] )
1034 elif type(flags) == TupleType:
1035 # it's a tuple: it should be a triple,
1036 # but each item could be a single string or a list
1037 (uncond_flags, src_flags, dest_flags) = flags
1038 self.flags = (makeList(uncond_flags),
1039 makeList(src_flags), makeList(dest_flags))
1040 self.sort_pri = sort_pri
1041
1042 def isMem(self):
1043 return 0
1044
1045 def isReg(self):
1046 return 0
1047
1048 def isFloatReg(self):
1049 return 0
1050
1051 def isIntReg(self):
1052 return 0
1053
1054 def isControlReg(self):
1055 return 0
1056
1057 def getFlags(self, op_desc):
1058 # note the empty slice '[:]' gives us a copy of self.flags[0]
1059 # instead of a reference to it
1060 my_flags = self.flags[0][:]
1061 if op_desc.is_src:
1062 my_flags += self.flags[1]
1063 if op_desc.is_dest:
1064 my_flags += self.flags[2]
1065 return my_flags
1066
1067 def makeDecl(self, op_desc):
1068 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1069 # Note that initializations in the declarations are solely
1070 # to avoid 'uninitialized variable' errors from the compiler.
1071 return type + ' ' + op_desc.munged_name + ' = 0;\n';
1072
1073 class IntRegOperandTraits(OperandTraits):
1074 def isReg(self):
1075 return 1
1076
1077 def isIntReg(self):
1078 return 1
1079
1080 def makeConstructor(self, op_desc):
1081 c = ''
1082 if op_desc.is_src:
1083 c += '\n\t_srcRegIdx[%d] = %s;' % \
1084 (op_desc.src_reg_idx, self.reg_spec)
1085 if op_desc.is_dest:
1086 c += '\n\t_destRegIdx[%d] = %s;' % \
1087 (op_desc.dest_reg_idx, self.reg_spec)
1088 return c
1089
1090 def makeRead(self, op_desc):
1091 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1092 if (type == 'float' or type == 'double'):
1093 error(0, 'Attempt to read integer register as FP')
1094 if (size == self.dflt_size):
1095 return '%s = xc->readIntReg(this, %d);\n' % \
1096 (op_desc.munged_name, op_desc.src_reg_idx)
1097 else:
1098 return '%s = bits(xc->readIntReg(this, %d), %d, 0);\n' % \
1099 (op_desc.munged_name, op_desc.src_reg_idx, size-1)
1100
1101 def makeWrite(self, op_desc):
1102 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1103 if (type == 'float' or type == 'double'):
1104 error(0, 'Attempt to write integer register as FP')
1105 if (size != self.dflt_size and is_signed):
1106 final_val = 'sext<%d>(%s)' % (size, op_desc.munged_name)
1107 else:
1108 final_val = op_desc.munged_name
1109 wb = '''
1110 {
1111 %s final_val = %s;
1112 xc->setIntReg(this, %d, final_val);\n
1113 if (traceData) { traceData->setData(final_val); }
1114 }''' % (self.dflt_type, final_val, op_desc.dest_reg_idx)
1115 return wb
1116
1117 class FloatRegOperandTraits(OperandTraits):
1118 def isReg(self):
1119 return 1
1120
1121 def isFloatReg(self):
1122 return 1
1123
1124 def makeConstructor(self, op_desc):
1125 c = ''
1126 if op_desc.is_src:
1127 c += '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1128 (op_desc.src_reg_idx, self.reg_spec)
1129 if op_desc.is_dest:
1130 c += '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1131 (op_desc.dest_reg_idx, self.reg_spec)
1132 return c
1133
1134 def makeRead(self, op_desc):
1135 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1136 bit_select = 0
1137 if (type == 'float'):
1138 func = 'readFloatRegSingle'
1139 elif (type == 'double'):
1140 func = 'readFloatRegDouble'
1141 else:
1142 func = 'readFloatRegInt'
1143 if (size != self.dflt_size):
1144 bit_select = 1
1145 base = 'xc->%s(this, %d)' % \
1146 (func, op_desc.src_reg_idx)
1147 if bit_select:
1148 return '%s = bits(%s, %d, 0);\n' % \
1149 (op_desc.munged_name, base, size-1)
1150 else:
1151 return '%s = %s;\n' % (op_desc.munged_name, base)
1152
1153 def makeWrite(self, op_desc):
1154 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1155 final_val = op_desc.munged_name
1156 if (type == 'float'):
1157 func = 'setFloatRegSingle'
1158 elif (type == 'double'):
1159 func = 'setFloatRegDouble'
1160 else:
1161 func = 'setFloatRegInt'
1162 type = 'uint%d_t' % self.dflt_size
1163 if (size != self.dflt_size and is_signed):
1164 final_val = 'sext<%d>(%s)' % (size, op_desc.munged_name)
1165 wb = '''
1166 {
1167 %s final_val = %s;
1168 xc->%s(this, %d, final_val);\n
1169 if (traceData) { traceData->setData(final_val); }
1170 }''' % (type, final_val, func, op_desc.dest_reg_idx)
1171 return wb
1172
1173 class ControlRegOperandTraits(OperandTraits):
1174 def isReg(self):
1175 return 1
1176
1177 def isControlReg(self):
1178 return 1
1179
1180 def makeConstructor(self, op_desc):
1181 c = ''
1182 if op_desc.is_src:
1183 c += '\n\t_srcRegIdx[%d] = %s_DepTag;' % \
1184 (op_desc.src_reg_idx, self.reg_spec)
1185 if op_desc.is_dest:
1186 c += '\n\t_destRegIdx[%d] = %s_DepTag;' % \
1187 (op_desc.dest_reg_idx, self.reg_spec)
1188 return c
1189
1190 def makeRead(self, op_desc):
1191 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1192 bit_select = 0
1193 if (type == 'float' or type == 'double'):
1194 error(0, 'Attempt to read control register as FP')
1195 base = 'xc->read%s()' % self.reg_spec
1196 if size == self.dflt_size:
1197 return '%s = %s;\n' % (op_desc.munged_name, base)
1198 else:
1199 return '%s = bits(%s, %d, 0);\n' % \
1200 (op_desc.munged_name, base, size-1)
1201
1202 def makeWrite(self, op_desc):
1203 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1204 if (type == 'float' or type == 'double'):
1205 error(0, 'Attempt to write control register as FP')
1206 wb = 'xc->set%s(%s);\n' % (self.reg_spec, op_desc.munged_name)
1207 wb += 'if (traceData) { traceData->setData(%s); }' % \
1208 op_desc.munged_name
1209 return wb
1210
1211 class MemOperandTraits(OperandTraits):
1212 def isMem(self):
1213 return 1
1214
1215 def makeConstructor(self, op_desc):
1216 return ''
1217
1218 def makeDecl(self, op_desc):
1219 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1220 # Note that initializations in the declarations are solely
1221 # to avoid 'uninitialized variable' errors from the compiler.
1222 # Declare memory data variable.
1223 c = '%s %s = 0;\n' % (type, op_desc.munged_name)
1224 # Declare var to hold memory access flags.
1225 c += 'unsigned %s_flags = memAccessFlags;\n' % op_desc.base_name
1226 # If this operand is a dest (i.e., it's a store operation),
1227 # then we need to declare a variable for the write result code
1228 # as well.
1229 if op_desc.is_dest:
1230 c += 'uint64_t %s_write_result = 0;\n' % op_desc.base_name
1231 return c
1232
1233 def makeRead(self, op_desc):
1234 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1235 eff_type = 'uint%d_t' % size
1236 return 'fault = xc->read(EA, (%s&)%s, %s_flags);\n' \
1237 % (eff_type, op_desc.munged_name, op_desc.base_name)
1238
1239 def makeWrite(self, op_desc):
1240 (size, type, is_signed) = operandSizeMap[op_desc.eff_ext]
1241 eff_type = 'uint%d_t' % size
1242 wb = 'fault = xc->write((%s&)%s, EA, %s_flags, &%s_write_result);\n' \
1243 % (eff_type, op_desc.munged_name, op_desc.base_name,
1244 op_desc.base_name)
1245 wb += 'if (traceData) { traceData->setData(%s); }' % \
1246 op_desc.munged_name
1247 return wb
1248
1249 class NPCOperandTraits(OperandTraits):
1250 def makeConstructor(self, op_desc):
1251 return ''
1252
1253 def makeRead(self, op_desc):
1254 return '%s = xc->readPC() + 4;\n' % op_desc.munged_name
1255
1256 def makeWrite(self, op_desc):
1257 return 'xc->setNextPC(%s);\n' % op_desc.munged_name
1258
1259
1260 exportContextSymbols = ('IntRegOperandTraits', 'FloatRegOperandTraits',
1261 'ControlRegOperandTraits', 'MemOperandTraits',
1262 'NPCOperandTraits', 'InstObjParams', 'CodeBlock',
1263 're', 'string')
1264
1265 exportContext = {}
1266
1267 def updateExportContext():
1268 exportContext.update(exportDict(*exportContextSymbols))
1269 exportContext.update(templateMap)
1270
1271
1272 def exportDict(*symNames):
1273 return dict([(s, eval(s)) for s in symNames])
1274
1275
1276 #
1277 # Define operand variables that get derived from the basic declaration
1278 # of ISA-specific operands in operandTraitsMap. This function must be
1279 # called by the ISA description file explicitly after defining
1280 # operandTraitsMap (in a 'let' block).
1281 #
1282 def defineDerivedOperandVars():
1283 global operands
1284 operands = operandTraitsMap.keys()
1285
1286 operandsREString = (r'''
1287 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1288 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1289 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1290 '''
1291 % string.join(operands, '|'))
1292
1293 global operandsRE
1294 operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
1295
1296 # Same as operandsREString, but extension is mandatory, and only two
1297 # groups are returned (base and ext, not full name as above).
1298 # Used for subtituting '_' for '.' to make C++ identifiers.
1299 operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1300 % string.join(operands, '|'))
1301
1302 global operandsWithExtRE
1303 operandsWithExtRE = re.compile(operandsWithExtREString, re.MULTILINE)
1304
1305
1306 #
1307 # Operand descriptor class. An instance of this class represents
1308 # a specific operand for a code block.
1309 #
1310 class OperandDescriptor:
1311 def __init__(self, full_name, base_name, ext, is_src, is_dest):
1312 self.full_name = full_name
1313 self.base_name = base_name
1314 self.ext = ext
1315 self.is_src = is_src
1316 self.is_dest = is_dest
1317 self.traits = operandTraitsMap[base_name]
1318 # The 'effective extension' (eff_ext) is either the actual
1319 # extension, if one was explicitly provided, or the default.
1320 # The 'munged name' replaces the '.' between the base and
1321 # extension (if any) with a '_' to make a legal C++ variable name.
1322 if ext:
1323 self.eff_ext = ext
1324 self.munged_name = base_name + '_' + ext
1325 else:
1326 self.eff_ext = self.traits.dflt_ext
1327 self.munged_name = base_name
1328
1329 # Finalize additional fields (primarily code fields). This step
1330 # is done separately since some of these fields may depend on the
1331 # register index enumeration that hasn't been performed yet at the
1332 # time of __init__().
1333 def finalize(self):
1334 self.flags = self.traits.getFlags(self)
1335 self.constructor = self.traits.makeConstructor(self)
1336 self.op_decl = self.traits.makeDecl(self)
1337
1338 if self.is_src:
1339 self.op_rd = self.traits.makeRead(self)
1340 else:
1341 self.op_rd = ''
1342
1343 if self.is_dest:
1344 self.op_wb = self.traits.makeWrite(self)
1345 else:
1346 self.op_wb = ''
1347
1348 class OperandDescriptorList:
1349 def __init__(self):
1350 self.items = []
1351 self.bases = {}
1352
1353 def __len__(self):
1354 return len(self.items)
1355
1356 def __getitem__(self, index):
1357 return self.items[index]
1358
1359 def append(self, op_desc):
1360 self.items.append(op_desc)
1361 self.bases[op_desc.base_name] = op_desc
1362
1363 def find_base(self, base_name):
1364 # like self.bases[base_name], but returns None if not found
1365 # (rather than raising exception)
1366 return self.bases.get(base_name)
1367
1368 # internal helper function for concat[Some]Attr{Strings|Lists}
1369 def __internalConcatAttrs(self, attr_name, filter, result):
1370 for op_desc in self.items:
1371 if filter(op_desc):
1372 result += getattr(op_desc, attr_name)
1373 return result
1374
1375 # return a single string that is the concatenation of the (string)
1376 # values of the specified attribute for all operands
1377 def concatAttrStrings(self, attr_name):
1378 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1379
1380 # like concatAttrStrings, but only include the values for the operands
1381 # for which the provided filter function returns true
1382 def concatSomeAttrStrings(self, filter, attr_name):
1383 return self.__internalConcatAttrs(attr_name, filter, '')
1384
1385 # return a single list that is the concatenation of the (list)
1386 # values of the specified attribute for all operands
1387 def concatAttrLists(self, attr_name):
1388 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1389
1390 # like concatAttrLists, but only include the values for the operands
1391 # for which the provided filter function returns true
1392 def concatSomeAttrLists(self, filter, attr_name):
1393 return self.__internalConcatAttrs(attr_name, filter, [])
1394
1395 def sort(self):
1396 self.items.sort(lambda a, b: a.traits.sort_pri - b.traits.sort_pri)
1397
1398 # Regular expression object to match C++ comments
1399 # (used in findOperands())
1400 commentRE = re.compile(r'//.*\n')
1401
1402 # Regular expression object to match assignment statements
1403 # (used in findOperands())
1404 assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1405
1406 #
1407 # Find all the operands in the given code block. Returns an operand
1408 # descriptor list (instance of class OperandDescriptorList).
1409 #
1410 def findOperands(code):
1411 operands = OperandDescriptorList()
1412 # delete comments so we don't accidentally match on reg specifiers inside
1413 code = commentRE.sub('', code)
1414 # search for operands
1415 next_pos = 0
1416 while 1:
1417 match = operandsRE.search(code, next_pos)
1418 if not match:
1419 # no more matches: we're done
1420 break
1421 op = match.groups()
1422 # regexp groups are operand full name, base, and extension
1423 (op_full, op_base, op_ext) = op
1424 # if the token following the operand is an assignment, this is
1425 # a destination (LHS), else it's a source (RHS)
1426 is_dest = (assignRE.match(code, match.end()) != None)
1427 is_src = not is_dest
1428 # see if we've already seen this one
1429 op_desc = operands.find_base(op_base)
1430 if op_desc:
1431 if op_desc.ext != op_ext:
1432 error(0, 'Inconsistent extensions for operand %s' % op_base)
1433 op_desc.is_src = op_desc.is_src or is_src
1434 op_desc.is_dest = op_desc.is_dest or is_dest
1435 else:
1436 # new operand: create new descriptor
1437 op_desc = OperandDescriptor(op_full, op_base, op_ext,
1438 is_src, is_dest)
1439 operands.append(op_desc)
1440 # start next search after end of current match
1441 next_pos = match.end()
1442 operands.sort()
1443 # enumerate source & dest register operands... used in building
1444 # constructor later
1445 srcRegs = 0
1446 destRegs = 0
1447 operands.numFPDestRegs = 0
1448 operands.numIntDestRegs = 0
1449 for op_desc in operands:
1450 if op_desc.traits.isReg():
1451 if op_desc.is_src:
1452 op_desc.src_reg_idx = srcRegs
1453 srcRegs += 1
1454 if op_desc.is_dest:
1455 op_desc.dest_reg_idx = destRegs
1456 destRegs += 1
1457 if op_desc.traits.isFloatReg():
1458 operands.numFPDestRegs += 1
1459 elif op_desc.traits.isIntReg():
1460 operands.numIntDestRegs += 1
1461 operands.numSrcRegs = srcRegs
1462 operands.numDestRegs = destRegs
1463 # now make a final pass to finalize op_desc fields that may depend
1464 # on the register enumeration
1465 for op_desc in operands:
1466 op_desc.finalize()
1467 return operands
1468
1469 # Munge operand names in code string to make legal C++ variable names.
1470 # (Will match munged_name attribute of OperandDescriptor object.)
1471 def substMungedOpNames(code):
1472 return operandsWithExtRE.sub(r'\1_\2', code)
1473
1474 def joinLists(t):
1475 return map(string.join, t)
1476
1477 def makeFlagConstructor(flag_list):
1478 if len(flag_list) == 0:
1479 return ''
1480 # filter out repeated flags
1481 flag_list.sort()
1482 i = 1
1483 while i < len(flag_list):
1484 if flag_list[i] == flag_list[i-1]:
1485 del flag_list[i]
1486 else:
1487 i += 1
1488 pre = '\n\tflags['
1489 post = '] = true;'
1490 code = pre + string.join(flag_list, post + pre) + post
1491 return code
1492
1493 class CodeBlock:
1494 def __init__(self, code):
1495 self.orig_code = code
1496 self.operands = findOperands(code)
1497 self.code = substMungedOpNames(substBitOps(code))
1498 self.constructor = self.operands.concatAttrStrings('constructor')
1499 self.constructor += \
1500 '\n\t_numSrcRegs = %d;' % self.operands.numSrcRegs
1501 self.constructor += \
1502 '\n\t_numDestRegs = %d;' % self.operands.numDestRegs
1503 self.constructor += \
1504 '\n\t_numFPDestRegs = %d;' % self.operands.numFPDestRegs
1505 self.constructor += \
1506 '\n\t_numIntDestRegs = %d;' % self.operands.numIntDestRegs
1507
1508 self.op_decl = self.operands.concatAttrStrings('op_decl')
1509
1510 is_mem = lambda op: op.traits.isMem()
1511 not_mem = lambda op: not op.traits.isMem()
1512
1513 self.op_rd = self.operands.concatAttrStrings('op_rd')
1514 self.op_wb = self.operands.concatAttrStrings('op_wb')
1515 self.op_mem_rd = \
1516 self.operands.concatSomeAttrStrings(is_mem, 'op_rd')
1517 self.op_mem_wb = \
1518 self.operands.concatSomeAttrStrings(is_mem, 'op_wb')
1519 self.op_nonmem_rd = \
1520 self.operands.concatSomeAttrStrings(not_mem, 'op_rd')
1521 self.op_nonmem_wb = \
1522 self.operands.concatSomeAttrStrings(not_mem, 'op_wb')
1523
1524 self.flags = self.operands.concatAttrLists('flags')
1525
1526 # Make a basic guess on the operand class (function unit type).
1527 # These are good enough for most cases, and will be overridden
1528 # later otherwise.
1529 if 'IsStore' in self.flags:
1530 self.op_class = 'MemWriteOp'
1531 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1532 self.op_class = 'MemReadOp'
1533 elif 'IsFloating' in self.flags:
1534 self.op_class = 'FloatAddOp'
1535 else:
1536 self.op_class = 'IntAluOp'
1537
1538 # Assume all instruction flags are of the form 'IsFoo'
1539 instFlagRE = re.compile(r'Is.*')
1540
1541 # OpClass constants end in 'Op' except No_OpClass
1542 opClassRE = re.compile(r'.*Op|No_OpClass')
1543
1544 class InstObjParams:
1545 def __init__(self, mnem, class_name, base_class = '',
1546 code_block = None, opt_args = []):
1547 self.mnemonic = mnem
1548 self.class_name = class_name
1549 self.base_class = base_class
1550 if code_block:
1551 for code_attr in code_block.__dict__.keys():
1552 setattr(self, code_attr, getattr(code_block, code_attr))
1553 else:
1554 self.constructor = ''
1555 self.flags = []
1556 # Optional arguments are assumed to be either StaticInst flags
1557 # or an OpClass value. To avoid having to import a complete
1558 # list of these values to match against, we do it ad-hoc
1559 # with regexps.
1560 for oa in opt_args:
1561 if instFlagRE.match(oa):
1562 self.flags.append(oa)
1563 elif opClassRE.match(oa):
1564 self.op_class = oa
1565 else:
1566 error(0, 'InstObjParams: optional arg "%s" not recognized '
1567 'as StaticInst::Flag or OpClass.' % oa)
1568
1569 # add flag initialization to contructor here to include
1570 # any flags added via opt_args
1571 self.constructor += makeFlagConstructor(self.flags)
1572
1573 # if 'IsFloating' is set, add call to the FP enable check
1574 # function (which should be provided by isa_desc via a declare)
1575 if 'IsFloating' in self.flags:
1576 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1577 else:
1578 self.fp_enable_check = ''
1579
1580 #######################
1581 #
1582 # Output file template
1583 #
1584
1585 file_template = '''
1586 /*
1587 * DO NOT EDIT THIS FILE!!!
1588 *
1589 * It was automatically generated from the ISA description in %(filename)s
1590 */
1591
1592 %(includes)s
1593
1594 %(global_output)s
1595
1596 namespace %(namespace)s {
1597
1598 %(namespace_output)s
1599
1600 } // namespace %(namespace)s
1601 '''
1602
1603
1604 # Update the output file only if the new contents are different from
1605 # the current contents. Minimizes the files that need to be rebuilt
1606 # after minor changes.
1607 def update_if_needed(file, contents):
1608 update = False
1609 if os.access(file, os.R_OK):
1610 f = open(file, 'r')
1611 old_contents = f.read()
1612 f.close()
1613 if contents != old_contents:
1614 print 'Updating', file
1615 os.remove(file) # in case it's write-protected
1616 update = True
1617 else:
1618 print 'File', file, 'is unchanged'
1619 else:
1620 print 'Generating', file
1621 update = True
1622 if update:
1623 f = open(file, 'w')
1624 f.write(contents)
1625 f.close()
1626
1627 # This regular expression matches include directives
1628 regExp = re.compile('(?P<include>^[ \t]*##include[ \t]*\"[ \t]*(?P<filename>[A-Za-z0-9\\/-_.]*)[ \t]*\"[ \t]*\n)', re.MULTILINE)
1629
1630 def preprocess_isa_desc(isa_desc):
1631 # Find any includes and include them
1632
1633 # Look for an include
1634 m = re.search(regExp, isa_desc)
1635 while m:
1636 filename = m.group('filename')
1637 print 'Including file "%s"' % filename
1638 includeFile = open(filename)
1639 includecontents = includeFile.read()
1640 isa_desc = isa_desc[:m.start('include')] + '##newfile "' + filename + '"\n' + includecontents + '##endfile\n' + isa_desc[m.end('include'):]
1641 # Look for the next include
1642 m = re.search(regExp, isa_desc)
1643 return isa_desc
1644
1645
1646 #
1647 # Read in and parse the ISA description.
1648 #
1649 def parse_isa_desc(isa_desc_file, output_dir, include_path):
1650 # set a global var for the input filename... used in error messages
1651 global input_filename
1652 input_filename = isa_desc_file
1653 global fileNameStack
1654 fileNameStack = [(input_filename, 1)]
1655
1656 # Suck the ISA description file in.
1657 input = open(isa_desc_file)
1658 isa_desc = input.read()
1659 input.close()
1660
1661 # Perform Preprocessing
1662 isa_desc = preprocess_isa_desc(isa_desc)
1663
1664 # Parse it.
1665 (isa_name, namespace, global_code, namespace_code) = yacc.parse(isa_desc)
1666
1667 # grab the last three path components of isa_desc_file to put in
1668 # the output
1669 filename = '/'.join(isa_desc_file.split('/')[-3:])
1670
1671 # generate decoder.hh
1672 includes = '#include "base/bitfield.hh" // for bitfield support'
1673 global_output = global_code.header_output
1674 namespace_output = namespace_code.header_output
1675 update_if_needed(output_dir + '/decoder.hh', file_template % vars())
1676
1677 # generate decoder.cc
1678 includes = '#include "%s/decoder.hh"' % include_path
1679 global_output = global_code.decoder_output
1680 namespace_output = namespace_code.decoder_output
1681 namespace_output += namespace_code.decode_block
1682 update_if_needed(output_dir + '/decoder.cc', file_template % vars())
1683
1684 # generate per-cpu exec files
1685 for cpu in CpuModel.list:
1686 includes = '#include "%s/decoder.hh"\n' % include_path
1687 includes += cpu.includes
1688 global_output = global_code.exec_output[cpu.name]
1689 namespace_output = namespace_code.exec_output[cpu.name]
1690 update_if_needed(output_dir + '/' + cpu.filename,
1691 file_template % vars())
1692
1693 # Called as script: get args from command line.
1694 if __name__ == '__main__':
1695 parse_isa_desc(sys.argv[1], sys.argv[2], sys.argv[3])