Merge zizzer:/bk/multiarch
[gem5.git] / arch / isa_parser.py
1 #! /usr/bin/env python
2
3 # Copyright (c) 2003-2005 The Regents of The University of Michigan
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
8 # met: redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer;
10 # redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution;
13 # neither the name of the copyright holders nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 import os
30 import sys
31 import re
32 import string
33 import traceback
34 # get type names
35 from types import *
36
37 # Prepend the directory where the PLY lex & yacc modules are found
38 # to the search path. Assumes we're compiling in a subdirectory
39 # of 'build' in the current tree.
40 sys.path[0:0] = [os.environ['M5_EXT'] + '/ply']
41
42 import lex
43 import yacc
44
45 #####################################################################
46 #
47 # Lexer
48 #
49 # The PLY lexer module takes two things as input:
50 # - A list of token names (the string list 'tokens')
51 # - A regular expression describing a match for each token. The
52 # regexp for token FOO can be provided in two ways:
53 # - as a string variable named t_FOO
54 # - as the doc string for a function named t_FOO. In this case,
55 # the function is also executed, allowing an action to be
56 # associated with each token match.
57 #
58 #####################################################################
59
60 # Reserved words. These are listed separately as they are matched
61 # using the same regexp as generic IDs, but distinguished in the
62 # t_ID() function. The PLY documentation suggests this approach.
63 reserved = (
64 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
65 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
66 'OUTPUT', 'SIGNED', 'TEMPLATE'
67 )
68
69 # List of tokens. The lex module requires this.
70 tokens = reserved + (
71 # identifier
72 'ID',
73
74 # integer literal
75 'INTLIT',
76
77 # string literal
78 'STRLIT',
79
80 # code literal
81 'CODELIT',
82
83 # ( ) [ ] { } < > , ; : :: *
84 'LPAREN', 'RPAREN',
85 'LBRACKET', 'RBRACKET',
86 'LBRACE', 'RBRACE',
87 'LESS', 'GREATER', 'EQUALS',
88 'COMMA', 'SEMI', 'COLON', 'DBLCOLON',
89 'ASTERISK',
90
91 # C preprocessor directives
92 'CPPDIRECTIVE'
93
94 # The following are matched but never returned. commented out to
95 # suppress PLY warning
96 # newfile directive
97 # 'NEWFILE',
98
99 # endfile directive
100 # 'ENDFILE'
101 )
102
103 # Regular expressions for token matching
104 t_LPAREN = r'\('
105 t_RPAREN = r'\)'
106 t_LBRACKET = r'\['
107 t_RBRACKET = r'\]'
108 t_LBRACE = r'\{'
109 t_RBRACE = r'\}'
110 t_LESS = r'\<'
111 t_GREATER = r'\>'
112 t_EQUALS = r'='
113 t_COMMA = r','
114 t_SEMI = r';'
115 t_COLON = r':'
116 t_DBLCOLON = r'::'
117 t_ASTERISK = r'\*'
118
119 # Identifiers and reserved words
120 reserved_map = { }
121 for r in reserved:
122 reserved_map[r.lower()] = r
123
124 def t_ID(t):
125 r'[A-Za-z_]\w*'
126 t.type = reserved_map.get(t.value,'ID')
127 return t
128
129 # Integer literal
130 def t_INTLIT(t):
131 r'(0x[\da-fA-F]+)|\d+'
132 try:
133 t.value = int(t.value,0)
134 except ValueError:
135 error(t.lineno, 'Integer value "%s" too large' % t.value)
136 t.value = 0
137 return t
138
139 # String literal. Note that these use only single quotes, and
140 # can span multiple lines.
141 def t_STRLIT(t):
142 r"(?m)'([^'])+'"
143 # strip off quotes
144 t.value = t.value[1:-1]
145 t.lineno += t.value.count('\n')
146 return t
147
148
149 # "Code literal"... like a string literal, but delimiters are
150 # '{{' and '}}' so they get formatted nicely under emacs c-mode
151 def t_CODELIT(t):
152 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
153 # strip off {{ & }}
154 t.value = t.value[2:-2]
155 t.lineno += t.value.count('\n')
156 return t
157
158 def t_CPPDIRECTIVE(t):
159 r'^\#[^\#].*\n'
160 t.lineno += t.value.count('\n')
161 return t
162
163 def t_NEWFILE(t):
164 r'^\#\#newfile\s+"[\w/.-]*"'
165 global fileNameStack
166 fileNameStack.append((t.value[11:-1], t.lineno))
167 t.lineno = 0
168
169 def t_ENDFILE(t):
170 r'^\#\#endfile'
171 (filename, t.lineno) = fileNameStack.pop()
172
173 #
174 # The functions t_NEWLINE, t_ignore, and t_error are
175 # special for the lex module.
176 #
177
178 # Newlines
179 def t_NEWLINE(t):
180 r'\n+'
181 t.lineno += t.value.count('\n')
182
183 # Comments
184 def t_comment(t):
185 r'//.*'
186
187 # Completely ignored characters
188 t_ignore = ' \t\x0c'
189
190 # Error handler
191 def t_error(t):
192 error(t.lineno, "illegal character '%s'" % t.value[0])
193 t.skip(1)
194
195 # Build the lexer
196 lex.lex()
197
198 #####################################################################
199 #
200 # Parser
201 #
202 # Every function whose name starts with 'p_' defines a grammar rule.
203 # The rule is encoded in the function's doc string, while the
204 # function body provides the action taken when the rule is matched.
205 # The argument to each function is a list of the values of the
206 # rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
207 # on the RHS. For tokens, the value is copied from the t.value
208 # attribute provided by the lexer. For non-terminals, the value
209 # is assigned by the producing rule; i.e., the job of the grammar
210 # rule function is to set the value for the non-terminal on the LHS
211 # (by assigning to t[0]).
212 #####################################################################
213
214 # The LHS of the first grammar rule is used as the start symbol
215 # (in this case, 'specification'). Note that this rule enforces
216 # that there will be exactly one namespace declaration, with 0 or more
217 # global defs/decls before and after it. The defs & decls before
218 # the namespace decl will be outside the namespace; those after
219 # will be inside. The decoder function is always inside the namespace.
220 def p_specification(t):
221 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
222 global_code = t[1]
223 isa_name = t[2]
224 namespace = isa_name + "Inst"
225 # wrap the decode block as a function definition
226 t[4].wrap_decode_block('''
227 StaticInstPtr
228 %(isa_name)s::decodeInst(%(isa_name)s::MachInst machInst)
229 {
230 using namespace %(namespace)s;
231 ''' % vars(), '}')
232 # both the latter output blocks and the decode block are in the namespace
233 namespace_code = t[3] + t[4]
234 # pass it all back to the caller of yacc.parse()
235 t[0] = (isa_name, namespace, global_code, namespace_code)
236
237 # ISA name declaration looks like "namespace <foo>;"
238 def p_name_decl(t):
239 'name_decl : NAMESPACE ID SEMI'
240 t[0] = t[2]
241
242 # 'opt_defs_and_outputs' is a possibly empty sequence of
243 # def and/or output statements.
244 def p_opt_defs_and_outputs_0(t):
245 'opt_defs_and_outputs : empty'
246 t[0] = GenCode()
247
248 def p_opt_defs_and_outputs_1(t):
249 'opt_defs_and_outputs : defs_and_outputs'
250 t[0] = t[1]
251
252 def p_defs_and_outputs_0(t):
253 'defs_and_outputs : def_or_output'
254 t[0] = t[1]
255
256 def p_defs_and_outputs_1(t):
257 'defs_and_outputs : defs_and_outputs def_or_output'
258 t[0] = t[1] + t[2]
259
260 # The list of possible definition/output statements.
261 def p_def_or_output(t):
262 '''def_or_output : def_format
263 | def_bitfield
264 | def_template
265 | def_operand_types
266 | def_operands
267 | output_header
268 | output_decoder
269 | output_exec
270 | global_let'''
271 t[0] = t[1]
272
273 # Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
274 # directly to the appropriate output section.
275
276
277 # Protect any non-dict-substitution '%'s in a format string
278 # (i.e. those not followed by '(')
279 def protect_non_subst_percents(s):
280 return re.sub(r'%(?!\()', '%%', s)
281
282 # Massage output block by substituting in template definitions and bit
283 # operators. We handle '%'s embedded in the string that don't
284 # indicate template substitutions (or CPU-specific symbols, which get
285 # handled in GenCode) by doubling them first so that the format
286 # operation will reduce them back to single '%'s.
287 def process_output(s):
288 s = protect_non_subst_percents(s)
289 # protects cpu-specific symbols too
290 s = protect_cpu_symbols(s)
291 return substBitOps(s % templateMap)
292
293 def p_output_header(t):
294 'output_header : OUTPUT HEADER CODELIT SEMI'
295 t[0] = GenCode(header_output = process_output(t[3]))
296
297 def p_output_decoder(t):
298 'output_decoder : OUTPUT DECODER CODELIT SEMI'
299 t[0] = GenCode(decoder_output = process_output(t[3]))
300
301 def p_output_exec(t):
302 'output_exec : OUTPUT EXEC CODELIT SEMI'
303 t[0] = GenCode(exec_output = process_output(t[3]))
304
305 # global let blocks 'let {{...}}' (Python code blocks) are executed
306 # directly when seen. Note that these execute in a special variable
307 # context 'exportContext' to prevent the code from polluting this
308 # script's namespace.
309 def p_global_let(t):
310 'global_let : LET CODELIT SEMI'
311 updateExportContext()
312 try:
313 exec fixPythonIndentation(t[2]) in exportContext
314 except Exception, exc:
315 error(t.lineno(1),
316 'error: %s in global let block "%s".' % (exc, t[2]))
317 t[0] = GenCode() # contributes nothing to the output C++ file
318
319 # Define the mapping from operand type extensions to C++ types and bit
320 # widths (stored in operandTypeMap).
321 def p_def_operand_types(t):
322 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
323 try:
324 userDict = eval('{' + t[3] + '}')
325 except Exception, exc:
326 error(t.lineno(1),
327 'error: %s in def operand_types block "%s".' % (exc, t[3]))
328 buildOperandTypeMap(userDict, t.lineno(1))
329 t[0] = GenCode() # contributes nothing to the output C++ file
330
331 # Define the mapping from operand names to operand classes and other
332 # traits. Stored in operandNameMap.
333 def p_def_operands(t):
334 'def_operands : DEF OPERANDS CODELIT SEMI'
335 if not globals().has_key('operandTypeMap'):
336 error(t.lineno(1),
337 'error: operand types must be defined before operands')
338 try:
339 userDict = eval('{' + t[3] + '}')
340 except Exception, exc:
341 error(t.lineno(1),
342 'error: %s in def operands block "%s".' % (exc, t[3]))
343 buildOperandNameMap(userDict, t.lineno(1))
344 t[0] = GenCode() # contributes nothing to the output C++ file
345
346 # A bitfield definition looks like:
347 # 'def [signed] bitfield <ID> [<first>:<last>]'
348 # This generates a preprocessor macro in the output file.
349 def p_def_bitfield_0(t):
350 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
351 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
352 if (t[2] == 'signed'):
353 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
354 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
355 t[0] = GenCode(header_output = hash_define)
356
357 # alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
358 def p_def_bitfield_1(t):
359 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
360 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
361 if (t[2] == 'signed'):
362 expr = 'sext<%d>(%s)' % (1, expr)
363 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
364 t[0] = GenCode(header_output = hash_define)
365
366 def p_opt_signed_0(t):
367 'opt_signed : SIGNED'
368 t[0] = t[1]
369
370 def p_opt_signed_1(t):
371 'opt_signed : empty'
372 t[0] = ''
373
374 # Global map variable to hold templates
375 templateMap = {}
376
377 def p_def_template(t):
378 'def_template : DEF TEMPLATE ID CODELIT SEMI'
379 templateMap[t[3]] = Template(t[4])
380 t[0] = GenCode()
381
382 # An instruction format definition looks like
383 # "def format <fmt>(<params>) {{...}};"
384 def p_def_format(t):
385 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
386 (id, params, code) = (t[3], t[5], t[7])
387 defFormat(id, params, code, t.lineno(1))
388 t[0] = GenCode()
389
390 # The formal parameter list for an instruction format is a possibly
391 # empty list of comma-separated parameters. Positional (standard,
392 # non-keyword) parameters must come first, followed by keyword
393 # parameters, followed by a '*foo' parameter that gets excess
394 # positional arguments (as in Python). Each of these three parameter
395 # categories is optional.
396 #
397 # Note that we do not support the '**foo' parameter for collecting
398 # otherwise undefined keyword args. Otherwise the parameter list is
399 # (I believe) identical to what is supported in Python.
400 #
401 # The param list generates a tuple, where the first element is a list of
402 # the positional params and the second element is a dict containing the
403 # keyword params.
404 def p_param_list_0(t):
405 'param_list : positional_param_list COMMA nonpositional_param_list'
406 t[0] = t[1] + t[3]
407
408 def p_param_list_1(t):
409 '''param_list : positional_param_list
410 | nonpositional_param_list'''
411 t[0] = t[1]
412
413 def p_positional_param_list_0(t):
414 'positional_param_list : empty'
415 t[0] = []
416
417 def p_positional_param_list_1(t):
418 'positional_param_list : ID'
419 t[0] = [t[1]]
420
421 def p_positional_param_list_2(t):
422 'positional_param_list : positional_param_list COMMA ID'
423 t[0] = t[1] + [t[3]]
424
425 def p_nonpositional_param_list_0(t):
426 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
427 t[0] = t[1] + t[3]
428
429 def p_nonpositional_param_list_1(t):
430 '''nonpositional_param_list : keyword_param_list
431 | excess_args_param'''
432 t[0] = t[1]
433
434 def p_keyword_param_list_0(t):
435 'keyword_param_list : keyword_param'
436 t[0] = [t[1]]
437
438 def p_keyword_param_list_1(t):
439 'keyword_param_list : keyword_param_list COMMA keyword_param'
440 t[0] = t[1] + [t[3]]
441
442 def p_keyword_param(t):
443 'keyword_param : ID EQUALS expr'
444 t[0] = t[1] + ' = ' + t[3].__repr__()
445
446 def p_excess_args_param(t):
447 'excess_args_param : ASTERISK ID'
448 # Just concatenate them: '*ID'. Wrap in list to be consistent
449 # with positional_param_list and keyword_param_list.
450 t[0] = [t[1] + t[2]]
451
452 # End of format definition-related rules.
453 ##############
454
455 #
456 # A decode block looks like:
457 # decode <field1> [, <field2>]* [default <inst>] { ... }
458 #
459 def p_decode_block(t):
460 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
461 default_defaults = defaultStack.pop()
462 codeObj = t[5]
463 # use the "default defaults" only if there was no explicit
464 # default statement in decode_stmt_list
465 if not codeObj.has_decode_default:
466 codeObj += default_defaults
467 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
468 t[0] = codeObj
469
470 # The opt_default statement serves only to push the "default defaults"
471 # onto defaultStack. This value will be used by nested decode blocks,
472 # and used and popped off when the current decode_block is processed
473 # (in p_decode_block() above).
474 def p_opt_default_0(t):
475 'opt_default : empty'
476 # no default specified: reuse the one currently at the top of the stack
477 defaultStack.push(defaultStack.top())
478 # no meaningful value returned
479 t[0] = None
480
481 def p_opt_default_1(t):
482 'opt_default : DEFAULT inst'
483 # push the new default
484 codeObj = t[2]
485 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
486 defaultStack.push(codeObj)
487 # no meaningful value returned
488 t[0] = None
489
490 def p_decode_stmt_list_0(t):
491 'decode_stmt_list : decode_stmt'
492 t[0] = t[1]
493
494 def p_decode_stmt_list_1(t):
495 'decode_stmt_list : decode_stmt decode_stmt_list'
496 if (t[1].has_decode_default and t[2].has_decode_default):
497 error(t.lineno(1), 'Two default cases in decode block')
498 t[0] = t[1] + t[2]
499
500 #
501 # Decode statement rules
502 #
503 # There are four types of statements allowed in a decode block:
504 # 1. Format blocks 'format <foo> { ... }'
505 # 2. Nested decode blocks
506 # 3. Instruction definitions.
507 # 4. C preprocessor directives.
508
509
510 # Preprocessor directives found in a decode statement list are passed
511 # through to the output, replicated to all of the output code
512 # streams. This works well for ifdefs, so we can ifdef out both the
513 # declarations and the decode cases generated by an instruction
514 # definition. Handling them as part of the grammar makes it easy to
515 # keep them in the right place with respect to the code generated by
516 # the other statements.
517 def p_decode_stmt_cpp(t):
518 'decode_stmt : CPPDIRECTIVE'
519 t[0] = GenCode(t[1], t[1], t[1], t[1])
520
521 # A format block 'format <foo> { ... }' sets the default instruction
522 # format used to handle instruction definitions inside the block.
523 # This format can be overridden by using an explicit format on the
524 # instruction definition or with a nested format block.
525 def p_decode_stmt_format(t):
526 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
527 # The format will be pushed on the stack when 'push_format_id' is
528 # processed (see below). Once the parser has recognized the full
529 # production (though the right brace), we're done with the format,
530 # so now we can pop it.
531 formatStack.pop()
532 t[0] = t[4]
533
534 # This rule exists so we can set the current format (& push the stack)
535 # when we recognize the format name part of the format block.
536 def p_push_format_id(t):
537 'push_format_id : ID'
538 try:
539 formatStack.push(formatMap[t[1]])
540 t[0] = ('', '// format %s' % t[1])
541 except KeyError:
542 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
543
544 # Nested decode block: if the value of the current field matches the
545 # specified constant, do a nested decode on some other field.
546 def p_decode_stmt_decode(t):
547 'decode_stmt : case_label COLON decode_block'
548 label = t[1]
549 codeObj = t[3]
550 # just wrap the decoding code from the block as a case in the
551 # outer switch statement.
552 codeObj.wrap_decode_block('\n%s:\n' % label)
553 codeObj.has_decode_default = (label == 'default')
554 t[0] = codeObj
555
556 # Instruction definition (finally!).
557 def p_decode_stmt_inst(t):
558 'decode_stmt : case_label COLON inst SEMI'
559 label = t[1]
560 codeObj = t[3]
561 codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
562 codeObj.has_decode_default = (label == 'default')
563 t[0] = codeObj
564
565 # The case label is either a list of one or more constants or 'default'
566 def p_case_label_0(t):
567 'case_label : intlit_list'
568 t[0] = ': '.join(map(lambda a: 'case %#x' % a, t[1]))
569
570 def p_case_label_1(t):
571 'case_label : DEFAULT'
572 t[0] = 'default'
573
574 #
575 # The constant list for a decode case label must be non-empty, but may have
576 # one or more comma-separated integer literals in it.
577 #
578 def p_intlit_list_0(t):
579 'intlit_list : INTLIT'
580 t[0] = [t[1]]
581
582 def p_intlit_list_1(t):
583 'intlit_list : intlit_list COMMA INTLIT'
584 t[0] = t[1]
585 t[0].append(t[3])
586
587 # Define an instruction using the current instruction format (specified
588 # by an enclosing format block).
589 # "<mnemonic>(<args>)"
590 def p_inst_0(t):
591 'inst : ID LPAREN arg_list RPAREN'
592 # Pass the ID and arg list to the current format class to deal with.
593 currentFormat = formatStack.top()
594 codeObj = currentFormat.defineInst(t[1], t[3], t.lineno(1))
595 args = ','.join(map(str, t[3]))
596 args = re.sub('(?m)^', '//', args)
597 args = re.sub('^//', '', args)
598 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
599 codeObj.prepend_all(comment)
600 t[0] = codeObj
601
602 # Define an instruction using an explicitly specified format:
603 # "<fmt>::<mnemonic>(<args>)"
604 def p_inst_1(t):
605 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
606 try:
607 format = formatMap[t[1]]
608 except KeyError:
609 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
610 codeObj = format.defineInst(t[3], t[5], t.lineno(1))
611 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
612 codeObj.prepend_all(comment)
613 t[0] = codeObj
614
615 # The arg list generates a tuple, where the first element is a list of
616 # the positional args and the second element is a dict containing the
617 # keyword args.
618 def p_arg_list_0(t):
619 'arg_list : positional_arg_list COMMA keyword_arg_list'
620 t[0] = ( t[1], t[3] )
621
622 def p_arg_list_1(t):
623 'arg_list : positional_arg_list'
624 t[0] = ( t[1], {} )
625
626 def p_arg_list_2(t):
627 'arg_list : keyword_arg_list'
628 t[0] = ( [], t[1] )
629
630 def p_positional_arg_list_0(t):
631 'positional_arg_list : empty'
632 t[0] = []
633
634 def p_positional_arg_list_1(t):
635 'positional_arg_list : expr'
636 t[0] = [t[1]]
637
638 def p_positional_arg_list_2(t):
639 'positional_arg_list : positional_arg_list COMMA expr'
640 t[0] = t[1] + [t[3]]
641
642 def p_keyword_arg_list_0(t):
643 'keyword_arg_list : keyword_arg'
644 t[0] = t[1]
645
646 def p_keyword_arg_list_1(t):
647 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
648 t[0] = t[1]
649 t[0].update(t[3])
650
651 def p_keyword_arg(t):
652 'keyword_arg : ID EQUALS expr'
653 t[0] = { t[1] : t[3] }
654
655 #
656 # Basic expressions. These constitute the argument values of
657 # "function calls" (i.e. instruction definitions in the decode block)
658 # and default values for formal parameters of format functions.
659 #
660 # Right now, these are either strings, integers, or (recursively)
661 # lists of exprs (using Python square-bracket list syntax). Note that
662 # bare identifiers are trated as string constants here (since there
663 # isn't really a variable namespace to refer to).
664 #
665 def p_expr_0(t):
666 '''expr : ID
667 | INTLIT
668 | STRLIT
669 | CODELIT'''
670 t[0] = t[1]
671
672 def p_expr_1(t):
673 '''expr : LBRACKET list_expr RBRACKET'''
674 t[0] = t[2]
675
676 def p_list_expr_0(t):
677 'list_expr : expr'
678 t[0] = [t[1]]
679
680 def p_list_expr_1(t):
681 'list_expr : list_expr COMMA expr'
682 t[0] = t[1] + [t[3]]
683
684 def p_list_expr_2(t):
685 'list_expr : empty'
686 t[0] = []
687
688 #
689 # Empty production... use in other rules for readability.
690 #
691 def p_empty(t):
692 'empty :'
693 pass
694
695 # Parse error handler. Note that the argument here is the offending
696 # *token*, not a grammar symbol (hence the need to use t.value)
697 def p_error(t):
698 if t:
699 error(t.lineno, "syntax error at '%s'" % t.value)
700 else:
701 error_bt(0, "unknown syntax error")
702
703 # END OF GRAMMAR RULES
704 #
705 # Now build the parser.
706 yacc.yacc()
707
708
709 #####################################################################
710 #
711 # Support Classes
712 #
713 #####################################################################
714
715 ################
716 # CpuModel class
717 #
718 # The CpuModel class encapsulates everything we need to know about a
719 # particular CPU model.
720
721 class CpuModel:
722 # List of all CPU models. Accessible as CpuModel.list.
723 list = []
724
725 # Constructor. Automatically adds models to CpuModel.list.
726 def __init__(self, name, filename, includes, strings):
727 self.name = name
728 self.filename = filename # filename for output exec code
729 self.includes = includes # include files needed in exec file
730 # The 'strings' dict holds all the per-CPU symbols we can
731 # substitute into templates etc.
732 self.strings = strings
733 # Add self to list.
734 CpuModel.list.append(self)
735
736 # Define CPU models. The following lines should contain the only
737 # CPU-model-specific information in this file. Note that the ISA
738 # description itself should have *no* CPU-model-specific content.
739 CpuModel('SimpleCPU', 'simple_cpu_exec.cc',
740 '#include "cpu/simple/cpu.hh"',
741 { 'CPU_exec_context': 'SimpleCPU' })
742 CpuModel('FastCPU', 'fast_cpu_exec.cc',
743 '#include "cpu/fast/cpu.hh"',
744 { 'CPU_exec_context': 'FastCPU' })
745 CpuModel('FullCPU', 'full_cpu_exec.cc',
746 '#include "encumbered/cpu/full/dyn_inst.hh"',
747 { 'CPU_exec_context': 'DynInst' })
748
749 #CpuModel('AlphaFullCPU', 'alpha_o3_exec.cc',
750 # '#include "cpu/o3/alpha_dyn_inst.hh"',
751 # { 'CPU_exec_context': 'AlphaDynInst<AlphaSimpleImpl>' })
752
753 # Expand template with CPU-specific references into a dictionary with
754 # an entry for each CPU model name. The entry key is the model name
755 # and the corresponding value is the template with the CPU-specific
756 # refs substituted for that model.
757 def expand_cpu_symbols_to_dict(template):
758 # Protect '%'s that don't go with CPU-specific terms
759 t = re.sub(r'%(?!\(CPU_)', '%%', template)
760 result = {}
761 for cpu in CpuModel.list:
762 result[cpu.name] = t % cpu.strings
763 return result
764
765 # *If* the template has CPU-specific references, return a single
766 # string containing a copy of the template for each CPU model with the
767 # corresponding values substituted in. If the template has no
768 # CPU-specific references, it is returned unmodified.
769 def expand_cpu_symbols_to_string(template):
770 if template.find('%(CPU_') != -1:
771 return reduce(lambda x,y: x+y,
772 expand_cpu_symbols_to_dict(template).values())
773 else:
774 return template
775
776 # Protect CPU-specific references by doubling the corresponding '%'s
777 # (in preparation for substituting a different set of references into
778 # the template).
779 def protect_cpu_symbols(template):
780 return re.sub(r'%(?=\(CPU_)', '%%', template)
781
782 ###############
783 # GenCode class
784 #
785 # The GenCode class encapsulates generated code destined for various
786 # output files. The header_output and decoder_output attributes are
787 # strings containing code destined for decoder.hh and decoder.cc
788 # respectively. The decode_block attribute contains code to be
789 # incorporated in the decode function itself (that will also end up in
790 # decoder.cc). The exec_output attribute is a dictionary with a key
791 # for each CPU model name; the value associated with a particular key
792 # is the string of code for that CPU model's exec.cc file. The
793 # has_decode_default attribute is used in the decode block to allow
794 # explicit default clauses to override default default clauses.
795
796 class GenCode:
797 # Constructor. At this point we substitute out all CPU-specific
798 # symbols. For the exec output, these go into the per-model
799 # dictionary. For all other output types they get collapsed into
800 # a single string.
801 def __init__(self,
802 header_output = '', decoder_output = '', exec_output = '',
803 decode_block = '', has_decode_default = False):
804 self.header_output = expand_cpu_symbols_to_string(header_output)
805 self.decoder_output = expand_cpu_symbols_to_string(decoder_output)
806 if isinstance(exec_output, dict):
807 self.exec_output = exec_output
808 elif isinstance(exec_output, str):
809 # If the exec_output arg is a single string, we replicate
810 # it for each of the CPU models, substituting and
811 # %(CPU_foo)s params appropriately.
812 self.exec_output = expand_cpu_symbols_to_dict(exec_output)
813 self.decode_block = expand_cpu_symbols_to_string(decode_block)
814 self.has_decode_default = has_decode_default
815
816 # Override '+' operator: generate a new GenCode object that
817 # concatenates all the individual strings in the operands.
818 def __add__(self, other):
819 exec_output = {}
820 for cpu in CpuModel.list:
821 n = cpu.name
822 exec_output[n] = self.exec_output[n] + other.exec_output[n]
823 return GenCode(self.header_output + other.header_output,
824 self.decoder_output + other.decoder_output,
825 exec_output,
826 self.decode_block + other.decode_block,
827 self.has_decode_default or other.has_decode_default)
828
829 # Prepend a string (typically a comment) to all the strings.
830 def prepend_all(self, pre):
831 self.header_output = pre + self.header_output
832 self.decoder_output = pre + self.decoder_output
833 self.decode_block = pre + self.decode_block
834 for cpu in CpuModel.list:
835 self.exec_output[cpu.name] = pre + self.exec_output[cpu.name]
836
837 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
838 # and 'break;'). Used to build the big nested switch statement.
839 def wrap_decode_block(self, pre, post = ''):
840 self.decode_block = pre + indent(self.decode_block) + post
841
842 ################
843 # Format object.
844 #
845 # A format object encapsulates an instruction format. It must provide
846 # a defineInst() method that generates the code for an instruction
847 # definition.
848
849 exportContextSymbols = ('InstObjParams', 'CodeBlock',
850 'makeList', 're', 'string')
851
852 exportContext = {}
853
854 def updateExportContext():
855 exportContext.update(exportDict(*exportContextSymbols))
856 exportContext.update(templateMap)
857
858 def exportDict(*symNames):
859 return dict([(s, eval(s)) for s in symNames])
860
861
862 class Format:
863 def __init__(self, id, params, code):
864 # constructor: just save away arguments
865 self.id = id
866 self.params = params
867 label = 'def format ' + id
868 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
869 param_list = string.join(params, ", ")
870 f = '''def defInst(_code, _context, %s):
871 my_locals = vars().copy()
872 exec _code in _context, my_locals
873 return my_locals\n''' % param_list
874 c = compile(f, label + ' wrapper', 'exec')
875 exec c
876 self.func = defInst
877
878 def defineInst(self, name, args, lineno):
879 context = {}
880 updateExportContext()
881 context.update(exportContext)
882 context.update({ 'name': name, 'Name': string.capitalize(name) })
883 try:
884 vars = self.func(self.user_code, context, *args[0], **args[1])
885 except Exception, exc:
886 error(lineno, 'error defining "%s": %s.' % (name, exc))
887 for k in vars.keys():
888 if k not in ('header_output', 'decoder_output',
889 'exec_output', 'decode_block'):
890 del vars[k]
891 return GenCode(**vars)
892
893 # Special null format to catch an implicit-format instruction
894 # definition outside of any format block.
895 class NoFormat:
896 def __init__(self):
897 self.defaultInst = ''
898
899 def defineInst(self, name, args, lineno):
900 error(lineno,
901 'instruction definition "%s" with no active format!' % name)
902
903 # This dictionary maps format name strings to Format objects.
904 formatMap = {}
905
906 # Define a new format
907 def defFormat(id, params, code, lineno):
908 # make sure we haven't already defined this one
909 if formatMap.get(id, None) != None:
910 error(lineno, 'format %s redefined.' % id)
911 # create new object and store in global map
912 formatMap[id] = Format(id, params, code)
913
914
915 ##############
916 # Stack: a simple stack object. Used for both formats (formatStack)
917 # and default cases (defaultStack). Simply wraps a list to give more
918 # stack-like syntax and enable initialization with an argument list
919 # (as opposed to an argument that's a list).
920
921 class Stack(list):
922 def __init__(self, *items):
923 list.__init__(self, items)
924
925 def push(self, item):
926 self.append(item);
927
928 def top(self):
929 return self[-1]
930
931 # The global format stack.
932 formatStack = Stack(NoFormat())
933
934 # The global default case stack.
935 defaultStack = Stack( None )
936
937 ###################
938 # Utility functions
939
940 #
941 # Indent every line in string 's' by two spaces
942 # (except preprocessor directives).
943 # Used to make nested code blocks look pretty.
944 #
945 def indent(s):
946 return re.sub(r'(?m)^(?!#)', ' ', s)
947
948 #
949 # Munge a somewhat arbitrarily formatted piece of Python code
950 # (e.g. from a format 'let' block) into something whose indentation
951 # will get by the Python parser.
952 #
953 # The two keys here are that Python will give a syntax error if
954 # there's any whitespace at the beginning of the first line, and that
955 # all lines at the same lexical nesting level must have identical
956 # indentation. Unfortunately the way code literals work, an entire
957 # let block tends to have some initial indentation. Rather than
958 # trying to figure out what that is and strip it off, we prepend 'if
959 # 1:' to make the let code the nested block inside the if (and have
960 # the parser automatically deal with the indentation for us).
961 #
962 # We don't want to do this if (1) the code block is empty or (2) the
963 # first line of the block doesn't have any whitespace at the front.
964
965 def fixPythonIndentation(s):
966 # get rid of blank lines first
967 s = re.sub(r'(?m)^\s*\n', '', s);
968 if (s != '' and re.match(r'[ \t]', s[0])):
969 s = 'if 1:\n' + s
970 return s
971
972 # Error handler. Just call exit. Output formatted to work under
973 # Emacs compile-mode. This function should be called when errors due
974 # to user input are detected (as opposed to parser bugs).
975 def error(lineno, string):
976 spaces = ""
977 for (filename, line) in fileNameStack[0:-1]:
978 print spaces + "In file included from " + filename
979 spaces += " "
980 # Uncomment the following line to get a Python stack backtrace for
981 # these errors too. Can be handy when trying to debug the parser.
982 # traceback.print_exc()
983 sys.exit(spaces + "%s:%d: %s" % (fileNameStack[-1][0], lineno, string))
984
985 # Like error(), but include a Python stack backtrace (for processing
986 # Python exceptions). This function should be called for errors that
987 # appear to be bugs in the parser itself.
988 def error_bt(lineno, string):
989 traceback.print_exc()
990 print >> sys.stderr, "%s:%d: %s" % (input_filename, lineno, string)
991 sys.exit(1)
992
993
994 #####################################################################
995 #
996 # Bitfield Operator Support
997 #
998 #####################################################################
999
1000 bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
1001
1002 bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
1003 bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
1004
1005 def substBitOps(code):
1006 # first convert single-bit selectors to two-index form
1007 # i.e., <n> --> <n:n>
1008 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
1009 # simple case: selector applied to ID (name)
1010 # i.e., foo<a:b> --> bits(foo, a, b)
1011 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
1012 # if selector is applied to expression (ending in ')'),
1013 # we need to search backward for matching '('
1014 match = bitOpExprRE.search(code)
1015 while match:
1016 exprEnd = match.start()
1017 here = exprEnd - 1
1018 nestLevel = 1
1019 while nestLevel > 0:
1020 if code[here] == '(':
1021 nestLevel -= 1
1022 elif code[here] == ')':
1023 nestLevel += 1
1024 here -= 1
1025 if here < 0:
1026 sys.exit("Didn't find '('!")
1027 exprStart = here+1
1028 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
1029 match.group(1), match.group(2))
1030 code = code[:exprStart] + newExpr + code[match.end():]
1031 match = bitOpExprRE.search(code)
1032 return code
1033
1034
1035 ####################
1036 # Template objects.
1037 #
1038 # Template objects are format strings that allow substitution from
1039 # the attribute spaces of other objects (e.g. InstObjParams instances).
1040
1041 class Template:
1042 def __init__(self, t):
1043 self.template = t
1044
1045 def subst(self, d):
1046 # Start with the template namespace. Make a copy since we're
1047 # going to modify it.
1048 myDict = templateMap.copy()
1049 # if the argument is a dictionary, we just use it.
1050 if isinstance(d, dict):
1051 myDict.update(d)
1052 # if the argument is an object, we use its attribute map.
1053 elif hasattr(d, '__dict__'):
1054 myDict.update(d.__dict__)
1055 else:
1056 raise TypeError, "Template.subst() arg must be or have dictionary"
1057 # Protect non-Python-dict substitutions (e.g. if there's a printf
1058 # in the templated C++ code)
1059 template = protect_non_subst_percents(self.template)
1060 # CPU-model-specific substitutions are handled later (in GenCode).
1061 template = protect_cpu_symbols(template)
1062 return template % myDict
1063
1064 # Convert to string. This handles the case when a template with a
1065 # CPU-specific term gets interpolated into another template or into
1066 # an output block.
1067 def __str__(self):
1068 return expand_cpu_symbols_to_string(self.template)
1069
1070 #####################################################################
1071 #
1072 # Code Parser
1073 #
1074 # The remaining code is the support for automatically extracting
1075 # instruction characteristics from pseudocode.
1076 #
1077 #####################################################################
1078
1079 # Force the argument to be a list. Useful for flags, where a caller
1080 # can specify a singleton flag or a list of flags. Also usful for
1081 # converting tuples to lists so they can be modified.
1082 def makeList(arg):
1083 if isinstance(arg, list):
1084 return arg
1085 elif isinstance(arg, tuple):
1086 return list(arg)
1087 elif not arg:
1088 return []
1089 else:
1090 return [ arg ]
1091
1092 # Generate operandTypeMap from the user's 'def operand_types'
1093 # statement.
1094 def buildOperandTypeMap(userDict, lineno):
1095 global operandTypeMap
1096 operandTypeMap = {}
1097 for (ext, (desc, size)) in userDict.iteritems():
1098 if desc == 'signed int':
1099 ctype = 'int%d_t' % size
1100 is_signed = 1
1101 elif desc == 'unsigned int':
1102 ctype = 'uint%d_t' % size
1103 is_signed = 0
1104 elif desc == 'float':
1105 is_signed = 1 # shouldn't really matter
1106 if size == 32:
1107 ctype = 'float'
1108 elif size == 64:
1109 ctype = 'double'
1110 if ctype == '':
1111 error(0, 'Unrecognized type description "%s" in userDict')
1112 operandTypeMap[ext] = (size, ctype, is_signed)
1113
1114 #
1115 #
1116 #
1117 # Base class for operand descriptors. An instance of this class (or
1118 # actually a class derived from this one) represents a specific
1119 # operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
1120 # derived classes encapsulates the traits of a particular operand type
1121 # (e.g., "32-bit integer register").
1122 #
1123 class Operand(object):
1124 def __init__(self, full_name, ext, is_src, is_dest):
1125 self.full_name = full_name
1126 self.ext = ext
1127 self.is_src = is_src
1128 self.is_dest = is_dest
1129 # The 'effective extension' (eff_ext) is either the actual
1130 # extension, if one was explicitly provided, or the default.
1131 if ext:
1132 self.eff_ext = ext
1133 else:
1134 self.eff_ext = self.dflt_ext
1135
1136 (self.size, self.ctype, self.is_signed) = operandTypeMap[self.eff_ext]
1137
1138 # note that mem_acc_size is undefined for non-mem operands...
1139 # template must be careful not to use it if it doesn't apply.
1140 if self.isMem():
1141 self.mem_acc_size = self.makeAccSize()
1142 self.mem_acc_type = self.ctype
1143
1144 # Finalize additional fields (primarily code fields). This step
1145 # is done separately since some of these fields may depend on the
1146 # register index enumeration that hasn't been performed yet at the
1147 # time of __init__().
1148 def finalize(self):
1149 self.flags = self.getFlags()
1150 self.constructor = self.makeConstructor()
1151 self.op_decl = self.makeDecl()
1152
1153 if self.is_src:
1154 self.op_rd = self.makeRead()
1155 self.op_src_decl = self.makeDecl()
1156 else:
1157 self.op_rd = ''
1158 self.op_src_decl = ''
1159
1160 if self.is_dest:
1161 self.op_wb = self.makeWrite()
1162 self.op_dest_decl = self.makeDecl()
1163 else:
1164 self.op_wb = ''
1165 self.op_dest_decl = ''
1166
1167 def isMem(self):
1168 return 0
1169
1170 def isReg(self):
1171 return 0
1172
1173 def isFloatReg(self):
1174 return 0
1175
1176 def isIntReg(self):
1177 return 0
1178
1179 def isControlReg(self):
1180 return 0
1181
1182 def getFlags(self):
1183 # note the empty slice '[:]' gives us a copy of self.flags[0]
1184 # instead of a reference to it
1185 my_flags = self.flags[0][:]
1186 if self.is_src:
1187 my_flags += self.flags[1]
1188 if self.is_dest:
1189 my_flags += self.flags[2]
1190 return my_flags
1191
1192 def makeDecl(self):
1193 # Note that initializations in the declarations are solely
1194 # to avoid 'uninitialized variable' errors from the compiler.
1195 return self.ctype + ' ' + self.base_name + ' = 0;\n';
1196
1197 class IntRegOperand(Operand):
1198 def isReg(self):
1199 return 1
1200
1201 def isIntReg(self):
1202 return 1
1203
1204 def makeConstructor(self):
1205 c = ''
1206 if self.is_src:
1207 c += '\n\t_srcRegIdx[%d] = %s;' % \
1208 (self.src_reg_idx, self.reg_spec)
1209 if self.is_dest:
1210 c += '\n\t_destRegIdx[%d] = %s;' % \
1211 (self.dest_reg_idx, self.reg_spec)
1212 return c
1213
1214 def makeRead(self):
1215 if (self.ctype == 'float' or self.ctype == 'double'):
1216 error(0, 'Attempt to read integer register as FP')
1217 if (self.size == self.dflt_size):
1218 return '%s = xc->readIntReg(this, %d);\n' % \
1219 (self.base_name, self.src_reg_idx)
1220 else:
1221 return '%s = bits(xc->readIntReg(this, %d), %d, 0);\n' % \
1222 (self.base_name, self.src_reg_idx, self.size-1)
1223
1224 def makeWrite(self):
1225 if (self.ctype == 'float' or self.ctype == 'double'):
1226 error(0, 'Attempt to write integer register as FP')
1227 if (self.size != self.dflt_size and self.is_signed):
1228 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1229 else:
1230 final_val = self.base_name
1231 wb = '''
1232 {
1233 %s final_val = %s;
1234 xc->setIntReg(this, %d, final_val);\n
1235 if (traceData) { traceData->setData(final_val); }
1236 }''' % (self.dflt_ctype, final_val, self.dest_reg_idx)
1237 return wb
1238
1239 class FloatRegOperand(Operand):
1240 def isReg(self):
1241 return 1
1242
1243 def isFloatReg(self):
1244 return 1
1245
1246 def makeConstructor(self):
1247 c = ''
1248 if self.is_src:
1249 c += '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1250 (self.src_reg_idx, self.reg_spec)
1251 if self.is_dest:
1252 c += '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1253 (self.dest_reg_idx, self.reg_spec)
1254 return c
1255
1256 def makeRead(self):
1257 bit_select = 0
1258 if (self.ctype == 'float'):
1259 func = 'readFloatRegSingle'
1260 elif (self.ctype == 'double'):
1261 func = 'readFloatRegDouble'
1262 else:
1263 func = 'readFloatRegInt'
1264 if (self.size != self.dflt_size):
1265 bit_select = 1
1266 base = 'xc->%s(this, %d)' % \
1267 (func, self.src_reg_idx)
1268 if bit_select:
1269 return '%s = bits(%s, %d, 0);\n' % \
1270 (self.base_name, base, self.size-1)
1271 else:
1272 return '%s = %s;\n' % (self.base_name, base)
1273
1274 def makeWrite(self):
1275 final_val = self.base_name
1276 final_ctype = self.ctype
1277 if (self.ctype == 'float'):
1278 func = 'setFloatRegSingle'
1279 elif (self.ctype == 'double'):
1280 func = 'setFloatRegDouble'
1281 else:
1282 func = 'setFloatRegInt'
1283 final_ctype = 'uint%d_t' % self.dflt_size
1284 if (self.size != self.dflt_size and self.is_signed):
1285 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1286 wb = '''
1287 {
1288 %s final_val = %s;
1289 xc->%s(this, %d, final_val);\n
1290 if (traceData) { traceData->setData(final_val); }
1291 }''' % (final_ctype, final_val, func, self.dest_reg_idx)
1292 return wb
1293
1294 class ControlRegOperand(Operand):
1295 def isReg(self):
1296 return 1
1297
1298 def isControlReg(self):
1299 return 1
1300
1301 def makeConstructor(self):
1302 c = ''
1303 if self.is_src:
1304 c += '\n\t_srcRegIdx[%d] = %s_DepTag;' % \
1305 (self.src_reg_idx, self.reg_spec)
1306 if self.is_dest:
1307 c += '\n\t_destRegIdx[%d] = %s_DepTag;' % \
1308 (self.dest_reg_idx, self.reg_spec)
1309 return c
1310
1311 def makeRead(self):
1312 bit_select = 0
1313 if (self.ctype == 'float' or self.ctype == 'double'):
1314 error(0, 'Attempt to read control register as FP')
1315 base = 'xc->read%s()' % self.reg_spec
1316 if self.size == self.dflt_size:
1317 return '%s = %s;\n' % (self.base_name, base)
1318 else:
1319 return '%s = bits(%s, %d, 0);\n' % \
1320 (self.base_name, base, self.size-1)
1321
1322 def makeWrite(self):
1323 if (self.ctype == 'float' or self.ctype == 'double'):
1324 error(0, 'Attempt to write control register as FP')
1325 wb = 'xc->set%s(%s);\n' % (self.reg_spec, self.base_name)
1326 wb += 'if (traceData) { traceData->setData(%s); }' % \
1327 self.base_name
1328 return wb
1329
1330 class MemOperand(Operand):
1331 def isMem(self):
1332 return 1
1333
1334 def makeConstructor(self):
1335 return ''
1336
1337 def makeDecl(self):
1338 # Note that initializations in the declarations are solely
1339 # to avoid 'uninitialized variable' errors from the compiler.
1340 # Declare memory data variable.
1341 c = '%s %s = 0;\n' % (self.ctype, self.base_name)
1342 return c
1343
1344 def makeRead(self):
1345 return ''
1346
1347 def makeWrite(self):
1348 return ''
1349
1350 # Return the memory access size *in bits*, suitable for
1351 # forming a type via "uint%d_t". Divide by 8 if you want bytes.
1352 def makeAccSize(self):
1353 return self.size
1354
1355
1356 class NPCOperand(Operand):
1357 def makeConstructor(self):
1358 return ''
1359
1360 def makeRead(self):
1361 return '%s = xc->readPC() + 4;\n' % self.base_name
1362
1363 def makeWrite(self):
1364 return 'xc->setNextPC(%s);\n' % self.base_name
1365
1366 class NNPCOperand(Operand):
1367 def makeConstructor(self):
1368 return ''
1369
1370 def makeRead(self):
1371 return '%s = xc->readPC() + 8;\n' % self.base_name
1372
1373 def makeWrite(self):
1374 return 'xc->setNextNPC(%s);\n' % self.base_name
1375
1376 def buildOperandNameMap(userDict, lineno):
1377 global operandNameMap
1378 operandNameMap = {}
1379 for (op_name, val) in userDict.iteritems():
1380 (base_cls_name, dflt_ext, reg_spec, flags, sort_pri) = val
1381 (dflt_size, dflt_ctype, dflt_is_signed) = operandTypeMap[dflt_ext]
1382 # Canonical flag structure is a triple of lists, where each list
1383 # indicates the set of flags implied by this operand always, when
1384 # used as a source, and when used as a dest, respectively.
1385 # For simplicity this can be initialized using a variety of fairly
1386 # obvious shortcuts; we convert these to canonical form here.
1387 if not flags:
1388 # no flags specified (e.g., 'None')
1389 flags = ( [], [], [] )
1390 elif isinstance(flags, str):
1391 # a single flag: assumed to be unconditional
1392 flags = ( [ flags ], [], [] )
1393 elif isinstance(flags, list):
1394 # a list of flags: also assumed to be unconditional
1395 flags = ( flags, [], [] )
1396 elif isinstance(flags, tuple):
1397 # it's a tuple: it should be a triple,
1398 # but each item could be a single string or a list
1399 (uncond_flags, src_flags, dest_flags) = flags
1400 flags = (makeList(uncond_flags),
1401 makeList(src_flags), makeList(dest_flags))
1402 # Accumulate attributes of new operand class in tmp_dict
1403 tmp_dict = {}
1404 for attr in ('dflt_ext', 'reg_spec', 'flags', 'sort_pri',
1405 'dflt_size', 'dflt_ctype', 'dflt_is_signed'):
1406 tmp_dict[attr] = eval(attr)
1407 tmp_dict['base_name'] = op_name
1408 # New class name will be e.g. "IntReg_Ra"
1409 cls_name = base_cls_name + '_' + op_name
1410 # Evaluate string arg to get class object. Note that the
1411 # actual base class for "IntReg" is "IntRegOperand", i.e. we
1412 # have to append "Operand".
1413 try:
1414 base_cls = eval(base_cls_name + 'Operand')
1415 except NameError:
1416 error(lineno,
1417 'error: unknown operand base class "%s"' % base_cls_name)
1418 # The following statement creates a new class called
1419 # <cls_name> as a subclass of <base_cls> with the attributes
1420 # in tmp_dict, just as if we evaluated a class declaration.
1421 operandNameMap[op_name] = type(cls_name, (base_cls,), tmp_dict)
1422
1423 # Define operand variables.
1424 operands = userDict.keys()
1425
1426 operandsREString = (r'''
1427 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1428 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1429 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1430 '''
1431 % string.join(operands, '|'))
1432
1433 global operandsRE
1434 operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
1435
1436 # Same as operandsREString, but extension is mandatory, and only two
1437 # groups are returned (base and ext, not full name as above).
1438 # Used for subtituting '_' for '.' to make C++ identifiers.
1439 operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1440 % string.join(operands, '|'))
1441
1442 global operandsWithExtRE
1443 operandsWithExtRE = re.compile(operandsWithExtREString, re.MULTILINE)
1444
1445
1446 class OperandList:
1447
1448 # Find all the operands in the given code block. Returns an operand
1449 # descriptor list (instance of class OperandList).
1450 def __init__(self, code):
1451 self.items = []
1452 self.bases = {}
1453 # delete comments so we don't match on reg specifiers inside
1454 code = commentRE.sub('', code)
1455 # search for operands
1456 next_pos = 0
1457 while 1:
1458 match = operandsRE.search(code, next_pos)
1459 if not match:
1460 # no more matches: we're done
1461 break
1462 op = match.groups()
1463 # regexp groups are operand full name, base, and extension
1464 (op_full, op_base, op_ext) = op
1465 # if the token following the operand is an assignment, this is
1466 # a destination (LHS), else it's a source (RHS)
1467 is_dest = (assignRE.match(code, match.end()) != None)
1468 is_src = not is_dest
1469 # see if we've already seen this one
1470 op_desc = self.find_base(op_base)
1471 if op_desc:
1472 if op_desc.ext != op_ext:
1473 error(0, 'Inconsistent extensions for operand %s' % \
1474 op_base)
1475 op_desc.is_src = op_desc.is_src or is_src
1476 op_desc.is_dest = op_desc.is_dest or is_dest
1477 else:
1478 # new operand: create new descriptor
1479 op_desc = operandNameMap[op_base](op_full, op_ext,
1480 is_src, is_dest)
1481 self.append(op_desc)
1482 # start next search after end of current match
1483 next_pos = match.end()
1484 self.sort()
1485 # enumerate source & dest register operands... used in building
1486 # constructor later
1487 self.numSrcRegs = 0
1488 self.numDestRegs = 0
1489 self.numFPDestRegs = 0
1490 self.numIntDestRegs = 0
1491 self.memOperand = None
1492 for op_desc in self.items:
1493 if op_desc.isReg():
1494 if op_desc.is_src:
1495 op_desc.src_reg_idx = self.numSrcRegs
1496 self.numSrcRegs += 1
1497 if op_desc.is_dest:
1498 op_desc.dest_reg_idx = self.numDestRegs
1499 self.numDestRegs += 1
1500 if op_desc.isFloatReg():
1501 self.numFPDestRegs += 1
1502 elif op_desc.isIntReg():
1503 self.numIntDestRegs += 1
1504 elif op_desc.isMem():
1505 if self.memOperand:
1506 error(0, "Code block has more than one memory operand.")
1507 self.memOperand = op_desc
1508 # now make a final pass to finalize op_desc fields that may depend
1509 # on the register enumeration
1510 for op_desc in self.items:
1511 op_desc.finalize()
1512
1513 def __len__(self):
1514 return len(self.items)
1515
1516 def __getitem__(self, index):
1517 return self.items[index]
1518
1519 def append(self, op_desc):
1520 self.items.append(op_desc)
1521 self.bases[op_desc.base_name] = op_desc
1522
1523 def find_base(self, base_name):
1524 # like self.bases[base_name], but returns None if not found
1525 # (rather than raising exception)
1526 return self.bases.get(base_name)
1527
1528 # internal helper function for concat[Some]Attr{Strings|Lists}
1529 def __internalConcatAttrs(self, attr_name, filter, result):
1530 for op_desc in self.items:
1531 if filter(op_desc):
1532 result += getattr(op_desc, attr_name)
1533 return result
1534
1535 # return a single string that is the concatenation of the (string)
1536 # values of the specified attribute for all operands
1537 def concatAttrStrings(self, attr_name):
1538 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1539
1540 # like concatAttrStrings, but only include the values for the operands
1541 # for which the provided filter function returns true
1542 def concatSomeAttrStrings(self, filter, attr_name):
1543 return self.__internalConcatAttrs(attr_name, filter, '')
1544
1545 # return a single list that is the concatenation of the (list)
1546 # values of the specified attribute for all operands
1547 def concatAttrLists(self, attr_name):
1548 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1549
1550 # like concatAttrLists, but only include the values for the operands
1551 # for which the provided filter function returns true
1552 def concatSomeAttrLists(self, filter, attr_name):
1553 return self.__internalConcatAttrs(attr_name, filter, [])
1554
1555 def sort(self):
1556 self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1557
1558 # Regular expression object to match C++ comments
1559 # (used in findOperands())
1560 commentRE = re.compile(r'//.*\n')
1561
1562 # Regular expression object to match assignment statements
1563 # (used in findOperands())
1564 assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1565
1566 # Munge operand names in code string to make legal C++ variable names.
1567 # This means getting rid of the type extension if any.
1568 # (Will match base_name attribute of Operand object.)
1569 def substMungedOpNames(code):
1570 return operandsWithExtRE.sub(r'\1', code)
1571
1572 def joinLists(t):
1573 return map(string.join, t)
1574
1575 def makeFlagConstructor(flag_list):
1576 if len(flag_list) == 0:
1577 return ''
1578 # filter out repeated flags
1579 flag_list.sort()
1580 i = 1
1581 while i < len(flag_list):
1582 if flag_list[i] == flag_list[i-1]:
1583 del flag_list[i]
1584 else:
1585 i += 1
1586 pre = '\n\tflags['
1587 post = '] = true;'
1588 code = pre + string.join(flag_list, post + pre) + post
1589 return code
1590
1591 class CodeBlock:
1592 def __init__(self, code):
1593 self.orig_code = code
1594 self.operands = OperandList(code)
1595 self.code = substMungedOpNames(substBitOps(code))
1596 self.constructor = self.operands.concatAttrStrings('constructor')
1597 self.constructor += \
1598 '\n\t_numSrcRegs = %d;' % self.operands.numSrcRegs
1599 self.constructor += \
1600 '\n\t_numDestRegs = %d;' % self.operands.numDestRegs
1601 self.constructor += \
1602 '\n\t_numFPDestRegs = %d;' % self.operands.numFPDestRegs
1603 self.constructor += \
1604 '\n\t_numIntDestRegs = %d;' % self.operands.numIntDestRegs
1605
1606 self.op_decl = self.operands.concatAttrStrings('op_decl')
1607
1608 is_src = lambda op: op.is_src
1609 is_dest = lambda op: op.is_dest
1610
1611 self.op_src_decl = \
1612 self.operands.concatSomeAttrStrings(is_src, 'op_src_decl')
1613 self.op_dest_decl = \
1614 self.operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
1615
1616 self.op_rd = self.operands.concatAttrStrings('op_rd')
1617 self.op_wb = self.operands.concatAttrStrings('op_wb')
1618
1619 self.flags = self.operands.concatAttrLists('flags')
1620
1621 if self.operands.memOperand:
1622 self.mem_acc_size = self.operands.memOperand.mem_acc_size
1623 self.mem_acc_type = self.operands.memOperand.mem_acc_type
1624
1625 # Make a basic guess on the operand class (function unit type).
1626 # These are good enough for most cases, and will be overridden
1627 # later otherwise.
1628 if 'IsStore' in self.flags:
1629 self.op_class = 'MemWriteOp'
1630 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1631 self.op_class = 'MemReadOp'
1632 elif 'IsFloating' in self.flags:
1633 self.op_class = 'FloatAddOp'
1634 else:
1635 self.op_class = 'IntAluOp'
1636
1637 # Assume all instruction flags are of the form 'IsFoo'
1638 instFlagRE = re.compile(r'Is.*')
1639
1640 # OpClass constants end in 'Op' except No_OpClass
1641 opClassRE = re.compile(r'.*Op|No_OpClass')
1642
1643 class InstObjParams:
1644 def __init__(self, mnem, class_name, base_class = '',
1645 code_block = None, opt_args = []):
1646 self.mnemonic = mnem
1647 self.class_name = class_name
1648 self.base_class = base_class
1649 if code_block:
1650 for code_attr in code_block.__dict__.keys():
1651 setattr(self, code_attr, getattr(code_block, code_attr))
1652 else:
1653 self.constructor = ''
1654 self.flags = []
1655 # Optional arguments are assumed to be either StaticInst flags
1656 # or an OpClass value. To avoid having to import a complete
1657 # list of these values to match against, we do it ad-hoc
1658 # with regexps.
1659 for oa in opt_args:
1660 if instFlagRE.match(oa):
1661 self.flags.append(oa)
1662 elif opClassRE.match(oa):
1663 self.op_class = oa
1664 else:
1665 error(0, 'InstObjParams: optional arg "%s" not recognized '
1666 'as StaticInst::Flag or OpClass.' % oa)
1667
1668 # add flag initialization to contructor here to include
1669 # any flags added via opt_args
1670 self.constructor += makeFlagConstructor(self.flags)
1671
1672 # if 'IsFloating' is set, add call to the FP enable check
1673 # function (which should be provided by isa_desc via a declare)
1674 if 'IsFloating' in self.flags:
1675 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1676 else:
1677 self.fp_enable_check = ''
1678
1679 #######################
1680 #
1681 # Output file template
1682 #
1683
1684 file_template = '''
1685 /*
1686 * DO NOT EDIT THIS FILE!!!
1687 *
1688 * It was automatically generated from the ISA description in %(filename)s
1689 */
1690
1691 %(includes)s
1692
1693 %(global_output)s
1694
1695 namespace %(namespace)s {
1696
1697 %(namespace_output)s
1698
1699 } // namespace %(namespace)s
1700
1701 %(decode_function)s
1702 '''
1703
1704
1705 # Update the output file only if the new contents are different from
1706 # the current contents. Minimizes the files that need to be rebuilt
1707 # after minor changes.
1708 def update_if_needed(file, contents):
1709 update = False
1710 if os.access(file, os.R_OK):
1711 f = open(file, 'r')
1712 old_contents = f.read()
1713 f.close()
1714 if contents != old_contents:
1715 print 'Updating', file
1716 os.remove(file) # in case it's write-protected
1717 update = True
1718 else:
1719 print 'File', file, 'is unchanged'
1720 else:
1721 print 'Generating', file
1722 update = True
1723 if update:
1724 f = open(file, 'w')
1725 f.write(contents)
1726 f.close()
1727
1728 # This regular expression matches include directives
1729 includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
1730 re.MULTILINE)
1731
1732 def preprocess_isa_desc(isa_desc):
1733 # Find any includes and include them
1734 pos = 0
1735 while 1:
1736 m = includeRE.search(isa_desc, pos)
1737 if not m:
1738 break
1739 filename = m.group('filename')
1740 print 'Including file "%s"' % filename
1741 try:
1742 isa_desc = isa_desc[:m.start()] + \
1743 '##newfile "' + filename + '"\n' + \
1744 open(filename).read() + \
1745 '##endfile\n' + \
1746 isa_desc[m.end():]
1747 except IOError:
1748 error(0, 'Error including file "%s"' % (filename))
1749 pos = m.start()
1750 return isa_desc
1751
1752 #
1753 # Read in and parse the ISA description.
1754 #
1755 def parse_isa_desc(isa_desc_file, output_dir, include_path):
1756 # set a global var for the input filename... used in error messages
1757 global input_filename
1758 input_filename = isa_desc_file
1759 global fileNameStack
1760 fileNameStack = [(input_filename, 1)]
1761
1762 # Suck the ISA description file in.
1763 input = open(isa_desc_file)
1764 isa_desc = input.read()
1765 input.close()
1766
1767 # Perform Preprocessing
1768 isa_desc = preprocess_isa_desc(isa_desc)
1769
1770 # Parse it.
1771 (isa_name, namespace, global_code, namespace_code) = yacc.parse(isa_desc)
1772
1773 # grab the last three path components of isa_desc_file to put in
1774 # the output
1775 filename = '/'.join(isa_desc_file.split('/')[-3:])
1776
1777 # generate decoder.hh
1778 includes = '#include "base/bitfield.hh" // for bitfield support'
1779 global_output = global_code.header_output
1780 namespace_output = namespace_code.header_output
1781 decode_function = ''
1782 update_if_needed(output_dir + '/decoder.hh', file_template % vars())
1783
1784 # generate decoder.cc
1785 includes = '#include "%s/decoder.hh"' % include_path
1786 global_output = global_code.decoder_output
1787 namespace_output = namespace_code.decoder_output
1788 # namespace_output += namespace_code.decode_block
1789 decode_function = namespace_code.decode_block
1790 update_if_needed(output_dir + '/decoder.cc', file_template % vars())
1791
1792 # generate per-cpu exec files
1793 for cpu in CpuModel.list:
1794 includes = '#include "%s/decoder.hh"\n' % include_path
1795 includes += cpu.includes
1796 global_output = global_code.exec_output[cpu.name]
1797 namespace_output = namespace_code.exec_output[cpu.name]
1798 decode_function = ''
1799 update_if_needed(output_dir + '/' + cpu.filename,
1800 file_template % vars())
1801
1802 # Called as script: get args from command line.
1803 if __name__ == '__main__':
1804 parse_isa_desc(sys.argv[1], sys.argv[2], sys.argv[3])