arch-arm: Remove MISCREG commented numbers
[gem5.git] / src / arch / isa_parser.py
1 # Copyright (c) 2014, 2016 ARM Limited
2 # All rights reserved
3 #
4 # The license below extends only to copyright in the software and shall
5 # not be construed as granting a license to any other intellectual
6 # property including but not limited to intellectual property relating
7 # to a hardware implementation of the functionality of the software
8 # licensed hereunder. You may use the software subject to the license
9 # terms below provided that you ensure that this notice is replicated
10 # unmodified and in its entirety in all distributions of the software,
11 # modified or unmodified, in source code or in binary form.
12 #
13 # Copyright (c) 2003-2005 The Regents of The University of Michigan
14 # Copyright (c) 2013,2015 Advanced Micro Devices, Inc.
15 # All rights reserved.
16 #
17 # Redistribution and use in source and binary forms, with or without
18 # modification, are permitted provided that the following conditions are
19 # met: redistributions of source code must retain the above copyright
20 # notice, this list of conditions and the following disclaimer;
21 # redistributions in binary form must reproduce the above copyright
22 # notice, this list of conditions and the following disclaimer in the
23 # documentation and/or other materials provided with the distribution;
24 # neither the name of the copyright holders nor the names of its
25 # contributors may be used to endorse or promote products derived from
26 # this software without specific prior written permission.
27 #
28 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #
40 # Authors: Steve Reinhardt
41
42 from __future__ import with_statement, print_function
43 import os
44 import sys
45 import re
46 import string
47 import inspect, traceback
48 # get type names
49 from types import *
50
51 from m5.util.grammar import Grammar
52
53 debug=False
54
55 ###################
56 # Utility functions
57
58 #
59 # Indent every line in string 's' by two spaces
60 # (except preprocessor directives).
61 # Used to make nested code blocks look pretty.
62 #
63 def indent(s):
64 return re.sub(r'(?m)^(?!#)', ' ', s)
65
66 #
67 # Munge a somewhat arbitrarily formatted piece of Python code
68 # (e.g. from a format 'let' block) into something whose indentation
69 # will get by the Python parser.
70 #
71 # The two keys here are that Python will give a syntax error if
72 # there's any whitespace at the beginning of the first line, and that
73 # all lines at the same lexical nesting level must have identical
74 # indentation. Unfortunately the way code literals work, an entire
75 # let block tends to have some initial indentation. Rather than
76 # trying to figure out what that is and strip it off, we prepend 'if
77 # 1:' to make the let code the nested block inside the if (and have
78 # the parser automatically deal with the indentation for us).
79 #
80 # We don't want to do this if (1) the code block is empty or (2) the
81 # first line of the block doesn't have any whitespace at the front.
82
83 def fixPythonIndentation(s):
84 # get rid of blank lines first
85 s = re.sub(r'(?m)^\s*\n', '', s);
86 if (s != '' and re.match(r'[ \t]', s[0])):
87 s = 'if 1:\n' + s
88 return s
89
90 class ISAParserError(Exception):
91 """Exception class for parser errors"""
92 def __init__(self, first, second=None):
93 if second is None:
94 self.lineno = 0
95 self.string = first
96 else:
97 self.lineno = first
98 self.string = second
99
100 def __str__(self):
101 return self.string
102
103 def error(*args):
104 raise ISAParserError(*args)
105
106 ####################
107 # Template objects.
108 #
109 # Template objects are format strings that allow substitution from
110 # the attribute spaces of other objects (e.g. InstObjParams instances).
111
112 labelRE = re.compile(r'(?<!%)%\(([^\)]+)\)[sd]')
113
114 class Template(object):
115 def __init__(self, parser, t):
116 self.parser = parser
117 self.template = t
118
119 def subst(self, d):
120 myDict = None
121
122 # Protect non-Python-dict substitutions (e.g. if there's a printf
123 # in the templated C++ code)
124 template = self.parser.protectNonSubstPercents(self.template)
125
126 # Build a dict ('myDict') to use for the template substitution.
127 # Start with the template namespace. Make a copy since we're
128 # going to modify it.
129 myDict = self.parser.templateMap.copy()
130
131 if isinstance(d, InstObjParams):
132 # If we're dealing with an InstObjParams object, we need
133 # to be a little more sophisticated. The instruction-wide
134 # parameters are already formed, but the parameters which
135 # are only function wide still need to be generated.
136 compositeCode = ''
137
138 myDict.update(d.__dict__)
139 # The "operands" and "snippets" attributes of the InstObjParams
140 # objects are for internal use and not substitution.
141 del myDict['operands']
142 del myDict['snippets']
143
144 snippetLabels = [l for l in labelRE.findall(template)
145 if d.snippets.has_key(l)]
146
147 snippets = dict([(s, self.parser.mungeSnippet(d.snippets[s]))
148 for s in snippetLabels])
149
150 myDict.update(snippets)
151
152 compositeCode = ' '.join(map(str, snippets.values()))
153
154 # Add in template itself in case it references any
155 # operands explicitly (like Mem)
156 compositeCode += ' ' + template
157
158 operands = SubOperandList(self.parser, compositeCode, d.operands)
159
160 myDict['op_decl'] = operands.concatAttrStrings('op_decl')
161 if operands.readPC or operands.setPC:
162 myDict['op_decl'] += 'TheISA::PCState __parserAutoPCState;\n'
163
164 # In case there are predicated register reads and write, declare
165 # the variables for register indicies. It is being assumed that
166 # all the operands in the OperandList are also in the
167 # SubOperandList and in the same order. Otherwise, it is
168 # expected that predication would not be used for the operands.
169 if operands.predRead:
170 myDict['op_decl'] += 'uint8_t _sourceIndex = 0;\n'
171 if operands.predWrite:
172 myDict['op_decl'] += 'uint8_t M5_VAR_USED _destIndex = 0;\n'
173
174 is_src = lambda op: op.is_src
175 is_dest = lambda op: op.is_dest
176
177 myDict['op_src_decl'] = \
178 operands.concatSomeAttrStrings(is_src, 'op_src_decl')
179 myDict['op_dest_decl'] = \
180 operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
181 if operands.readPC:
182 myDict['op_src_decl'] += \
183 'TheISA::PCState __parserAutoPCState;\n'
184 if operands.setPC:
185 myDict['op_dest_decl'] += \
186 'TheISA::PCState __parserAutoPCState;\n'
187
188 myDict['op_rd'] = operands.concatAttrStrings('op_rd')
189 if operands.readPC:
190 myDict['op_rd'] = '__parserAutoPCState = xc->pcState();\n' + \
191 myDict['op_rd']
192
193 # Compose the op_wb string. If we're going to write back the
194 # PC state because we changed some of its elements, we'll need to
195 # do that as early as possible. That allows later uncoordinated
196 # modifications to the PC to layer appropriately.
197 reordered = list(operands.items)
198 reordered.reverse()
199 op_wb_str = ''
200 pcWbStr = 'xc->pcState(__parserAutoPCState);\n'
201 for op_desc in reordered:
202 if op_desc.isPCPart() and op_desc.is_dest:
203 op_wb_str = op_desc.op_wb + pcWbStr + op_wb_str
204 pcWbStr = ''
205 else:
206 op_wb_str = op_desc.op_wb + op_wb_str
207 myDict['op_wb'] = op_wb_str
208
209 elif isinstance(d, dict):
210 # if the argument is a dictionary, we just use it.
211 myDict.update(d)
212 elif hasattr(d, '__dict__'):
213 # if the argument is an object, we use its attribute map.
214 myDict.update(d.__dict__)
215 else:
216 raise TypeError, "Template.subst() arg must be or have dictionary"
217 return template % myDict
218
219 # Convert to string.
220 def __str__(self):
221 return self.template
222
223 ################
224 # Format object.
225 #
226 # A format object encapsulates an instruction format. It must provide
227 # a defineInst() method that generates the code for an instruction
228 # definition.
229
230 class Format(object):
231 def __init__(self, id, params, code):
232 self.id = id
233 self.params = params
234 label = 'def format ' + id
235 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
236 param_list = string.join(params, ", ")
237 f = '''def defInst(_code, _context, %s):
238 my_locals = vars().copy()
239 exec _code in _context, my_locals
240 return my_locals\n''' % param_list
241 c = compile(f, label + ' wrapper', 'exec')
242 exec c
243 self.func = defInst
244
245 def defineInst(self, parser, name, args, lineno):
246 parser.updateExportContext()
247 context = parser.exportContext.copy()
248 if len(name):
249 Name = name[0].upper()
250 if len(name) > 1:
251 Name += name[1:]
252 context.update({ 'name' : name, 'Name' : Name })
253 try:
254 vars = self.func(self.user_code, context, *args[0], **args[1])
255 except Exception, exc:
256 if debug:
257 raise
258 error(lineno, 'error defining "%s": %s.' % (name, exc))
259 for k in vars.keys():
260 if k not in ('header_output', 'decoder_output',
261 'exec_output', 'decode_block'):
262 del vars[k]
263 return GenCode(parser, **vars)
264
265 # Special null format to catch an implicit-format instruction
266 # definition outside of any format block.
267 class NoFormat(object):
268 def __init__(self):
269 self.defaultInst = ''
270
271 def defineInst(self, parser, name, args, lineno):
272 error(lineno,
273 'instruction definition "%s" with no active format!' % name)
274
275 ###############
276 # GenCode class
277 #
278 # The GenCode class encapsulates generated code destined for various
279 # output files. The header_output and decoder_output attributes are
280 # strings containing code destined for decoder.hh and decoder.cc
281 # respectively. The decode_block attribute contains code to be
282 # incorporated in the decode function itself (that will also end up in
283 # decoder.cc). The exec_output attribute is the string of code for the
284 # exec.cc file. The has_decode_default attribute is used in the decode block
285 # to allow explicit default clauses to override default default clauses.
286
287 class GenCode(object):
288 # Constructor.
289 def __init__(self, parser,
290 header_output = '', decoder_output = '', exec_output = '',
291 decode_block = '', has_decode_default = False):
292 self.parser = parser
293 self.header_output = header_output
294 self.decoder_output = decoder_output
295 self.exec_output = exec_output
296 self.decode_block = decode_block
297 self.has_decode_default = has_decode_default
298
299 # Write these code chunks out to the filesystem. They will be properly
300 # interwoven by the write_top_level_files().
301 def emit(self):
302 if self.header_output:
303 self.parser.get_file('header').write(self.header_output)
304 if self.decoder_output:
305 self.parser.get_file('decoder').write(self.decoder_output)
306 if self.exec_output:
307 self.parser.get_file('exec').write(self.exec_output)
308 if self.decode_block:
309 self.parser.get_file('decode_block').write(self.decode_block)
310
311 # Override '+' operator: generate a new GenCode object that
312 # concatenates all the individual strings in the operands.
313 def __add__(self, other):
314 return GenCode(self.parser,
315 self.header_output + other.header_output,
316 self.decoder_output + other.decoder_output,
317 self.exec_output + other.exec_output,
318 self.decode_block + other.decode_block,
319 self.has_decode_default or other.has_decode_default)
320
321 # Prepend a string (typically a comment) to all the strings.
322 def prepend_all(self, pre):
323 self.header_output = pre + self.header_output
324 self.decoder_output = pre + self.decoder_output
325 self.decode_block = pre + self.decode_block
326 self.exec_output = pre + self.exec_output
327
328 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
329 # and 'break;'). Used to build the big nested switch statement.
330 def wrap_decode_block(self, pre, post = ''):
331 self.decode_block = pre + indent(self.decode_block) + post
332
333 #####################################################################
334 #
335 # Bitfield Operator Support
336 #
337 #####################################################################
338
339 bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
340
341 bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
342 bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
343
344 def substBitOps(code):
345 # first convert single-bit selectors to two-index form
346 # i.e., <n> --> <n:n>
347 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
348 # simple case: selector applied to ID (name)
349 # i.e., foo<a:b> --> bits(foo, a, b)
350 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
351 # if selector is applied to expression (ending in ')'),
352 # we need to search backward for matching '('
353 match = bitOpExprRE.search(code)
354 while match:
355 exprEnd = match.start()
356 here = exprEnd - 1
357 nestLevel = 1
358 while nestLevel > 0:
359 if code[here] == '(':
360 nestLevel -= 1
361 elif code[here] == ')':
362 nestLevel += 1
363 here -= 1
364 if here < 0:
365 sys.exit("Didn't find '('!")
366 exprStart = here+1
367 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
368 match.group(1), match.group(2))
369 code = code[:exprStart] + newExpr + code[match.end():]
370 match = bitOpExprRE.search(code)
371 return code
372
373
374 #####################################################################
375 #
376 # Code Parser
377 #
378 # The remaining code is the support for automatically extracting
379 # instruction characteristics from pseudocode.
380 #
381 #####################################################################
382
383 # Force the argument to be a list. Useful for flags, where a caller
384 # can specify a singleton flag or a list of flags. Also usful for
385 # converting tuples to lists so they can be modified.
386 def makeList(arg):
387 if isinstance(arg, list):
388 return arg
389 elif isinstance(arg, tuple):
390 return list(arg)
391 elif not arg:
392 return []
393 else:
394 return [ arg ]
395
396 class Operand(object):
397 '''Base class for operand descriptors. An instance of this class
398 (or actually a class derived from this one) represents a specific
399 operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
400 derived classes encapsulates the traits of a particular operand
401 type (e.g., "32-bit integer register").'''
402
403 def buildReadCode(self, func = None):
404 subst_dict = {"name": self.base_name,
405 "func": func,
406 "reg_idx": self.reg_spec,
407 "ctype": self.ctype}
408 if hasattr(self, 'src_reg_idx'):
409 subst_dict['op_idx'] = self.src_reg_idx
410 code = self.read_code % subst_dict
411 return '%s = %s;\n' % (self.base_name, code)
412
413 def buildWriteCode(self, func = None):
414 subst_dict = {"name": self.base_name,
415 "func": func,
416 "reg_idx": self.reg_spec,
417 "ctype": self.ctype,
418 "final_val": self.base_name}
419 if hasattr(self, 'dest_reg_idx'):
420 subst_dict['op_idx'] = self.dest_reg_idx
421 code = self.write_code % subst_dict
422 return '''
423 {
424 %s final_val = %s;
425 %s;
426 if (traceData) { traceData->setData(final_val); }
427 }''' % (self.dflt_ctype, self.base_name, code)
428
429 def __init__(self, parser, full_name, ext, is_src, is_dest):
430 self.full_name = full_name
431 self.ext = ext
432 self.is_src = is_src
433 self.is_dest = is_dest
434 # The 'effective extension' (eff_ext) is either the actual
435 # extension, if one was explicitly provided, or the default.
436 if ext:
437 self.eff_ext = ext
438 elif hasattr(self, 'dflt_ext'):
439 self.eff_ext = self.dflt_ext
440
441 if hasattr(self, 'eff_ext'):
442 self.ctype = parser.operandTypeMap[self.eff_ext]
443
444 # Finalize additional fields (primarily code fields). This step
445 # is done separately since some of these fields may depend on the
446 # register index enumeration that hasn't been performed yet at the
447 # time of __init__(). The register index enumeration is affected
448 # by predicated register reads/writes. Hence, we forward the flags
449 # that indicate whether or not predication is in use.
450 def finalize(self, predRead, predWrite):
451 self.flags = self.getFlags()
452 self.constructor = self.makeConstructor(predRead, predWrite)
453 self.op_decl = self.makeDecl()
454
455 if self.is_src:
456 self.op_rd = self.makeRead(predRead)
457 self.op_src_decl = self.makeDecl()
458 else:
459 self.op_rd = ''
460 self.op_src_decl = ''
461
462 if self.is_dest:
463 self.op_wb = self.makeWrite(predWrite)
464 self.op_dest_decl = self.makeDecl()
465 else:
466 self.op_wb = ''
467 self.op_dest_decl = ''
468
469 def isMem(self):
470 return 0
471
472 def isReg(self):
473 return 0
474
475 def isFloatReg(self):
476 return 0
477
478 def isIntReg(self):
479 return 0
480
481 def isCCReg(self):
482 return 0
483
484 def isControlReg(self):
485 return 0
486
487 def isVecReg(self):
488 return 0
489
490 def isVecElem(self):
491 return 0
492
493 def isPCState(self):
494 return 0
495
496 def isPCPart(self):
497 return self.isPCState() and self.reg_spec
498
499 def hasReadPred(self):
500 return self.read_predicate != None
501
502 def hasWritePred(self):
503 return self.write_predicate != None
504
505 def getFlags(self):
506 # note the empty slice '[:]' gives us a copy of self.flags[0]
507 # instead of a reference to it
508 my_flags = self.flags[0][:]
509 if self.is_src:
510 my_flags += self.flags[1]
511 if self.is_dest:
512 my_flags += self.flags[2]
513 return my_flags
514
515 def makeDecl(self):
516 # Note that initializations in the declarations are solely
517 # to avoid 'uninitialized variable' errors from the compiler.
518 return self.ctype + ' ' + self.base_name + ' = 0;\n';
519
520
521 src_reg_constructor = '\n\t_srcRegIdx[_numSrcRegs++] = RegId(%s, %s);'
522 dst_reg_constructor = '\n\t_destRegIdx[_numDestRegs++] = RegId(%s, %s);'
523
524
525 class IntRegOperand(Operand):
526 reg_class = 'IntRegClass'
527
528 def isReg(self):
529 return 1
530
531 def isIntReg(self):
532 return 1
533
534 def makeConstructor(self, predRead, predWrite):
535 c_src = ''
536 c_dest = ''
537
538 if self.is_src:
539 c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
540 if self.hasReadPred():
541 c_src = '\n\tif (%s) {%s\n\t}' % \
542 (self.read_predicate, c_src)
543
544 if self.is_dest:
545 c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
546 c_dest += '\n\t_numIntDestRegs++;'
547 if self.hasWritePred():
548 c_dest = '\n\tif (%s) {%s\n\t}' % \
549 (self.write_predicate, c_dest)
550
551 return c_src + c_dest
552
553 def makeRead(self, predRead):
554 if (self.ctype == 'float' or self.ctype == 'double'):
555 error('Attempt to read integer register as FP')
556 if self.read_code != None:
557 return self.buildReadCode('readIntRegOperand')
558
559 int_reg_val = ''
560 if predRead:
561 int_reg_val = 'xc->readIntRegOperand(this, _sourceIndex++)'
562 if self.hasReadPred():
563 int_reg_val = '(%s) ? %s : 0' % \
564 (self.read_predicate, int_reg_val)
565 else:
566 int_reg_val = 'xc->readIntRegOperand(this, %d)' % self.src_reg_idx
567
568 return '%s = %s;\n' % (self.base_name, int_reg_val)
569
570 def makeWrite(self, predWrite):
571 if (self.ctype == 'float' or self.ctype == 'double'):
572 error('Attempt to write integer register as FP')
573 if self.write_code != None:
574 return self.buildWriteCode('setIntRegOperand')
575
576 if predWrite:
577 wp = 'true'
578 if self.hasWritePred():
579 wp = self.write_predicate
580
581 wcond = 'if (%s)' % (wp)
582 windex = '_destIndex++'
583 else:
584 wcond = ''
585 windex = '%d' % self.dest_reg_idx
586
587 wb = '''
588 %s
589 {
590 %s final_val = %s;
591 xc->setIntRegOperand(this, %s, final_val);\n
592 if (traceData) { traceData->setData(final_val); }
593 }''' % (wcond, self.ctype, self.base_name, windex)
594
595 return wb
596
597 class FloatRegOperand(Operand):
598 reg_class = 'FloatRegClass'
599
600 def isReg(self):
601 return 1
602
603 def isFloatReg(self):
604 return 1
605
606 def makeConstructor(self, predRead, predWrite):
607 c_src = ''
608 c_dest = ''
609
610 if self.is_src:
611 c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
612
613 if self.is_dest:
614 c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
615 c_dest += '\n\t_numFPDestRegs++;'
616
617 return c_src + c_dest
618
619 def makeRead(self, predRead):
620 bit_select = 0
621 if (self.ctype == 'float' or self.ctype == 'double'):
622 func = 'readFloatRegOperand'
623 else:
624 func = 'readFloatRegOperandBits'
625 if self.read_code != None:
626 return self.buildReadCode(func)
627
628 if predRead:
629 rindex = '_sourceIndex++'
630 else:
631 rindex = '%d' % self.src_reg_idx
632
633 return '%s = xc->%s(this, %s);\n' % \
634 (self.base_name, func, rindex)
635
636 def makeWrite(self, predWrite):
637 if (self.ctype == 'float' or self.ctype == 'double'):
638 func = 'setFloatRegOperand'
639 else:
640 func = 'setFloatRegOperandBits'
641 if self.write_code != None:
642 return self.buildWriteCode(func)
643
644 if predWrite:
645 wp = '_destIndex++'
646 else:
647 wp = '%d' % self.dest_reg_idx
648 wp = 'xc->%s(this, %s, final_val);' % (func, wp)
649
650 wb = '''
651 {
652 %s final_val = %s;
653 %s\n
654 if (traceData) { traceData->setData(final_val); }
655 }''' % (self.ctype, self.base_name, wp)
656 return wb
657
658 class VecRegOperand(Operand):
659 reg_class = 'VecRegClass'
660
661 def __init__(self, parser, full_name, ext, is_src, is_dest):
662 Operand.__init__(self, parser, full_name, ext, is_src, is_dest)
663 self.elemExt = None
664 self.parser = parser
665
666 def isReg(self):
667 return 1
668
669 def isVecReg(self):
670 return 1
671
672 def makeDeclElem(self, elem_op):
673 (elem_name, elem_ext) = elem_op
674 (elem_spec, dflt_elem_ext, zeroing) = self.elems[elem_name]
675 if elem_ext:
676 ext = elem_ext
677 else:
678 ext = dflt_elem_ext
679 ctype = self.parser.operandTypeMap[ext]
680 return '\n\t%s %s = 0;' % (ctype, elem_name)
681
682 def makeDecl(self):
683 if not self.is_dest and self.is_src:
684 c_decl = '\t/* Vars for %s*/' % (self.base_name)
685 if hasattr(self, 'active_elems'):
686 if self.active_elems:
687 for elem in self.active_elems:
688 c_decl += self.makeDeclElem(elem)
689 return c_decl + '\t/* End vars for %s */\n' % (self.base_name)
690 else:
691 return ''
692
693 def makeConstructor(self, predRead, predWrite):
694 c_src = ''
695 c_dest = ''
696
697 numAccessNeeded = 1
698
699 if self.is_src:
700 c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
701
702 if self.is_dest:
703 c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
704 c_dest += '\n\t_numVecDestRegs++;'
705
706 return c_src + c_dest
707
708 # Read destination register to write
709 def makeReadWElem(self, elem_op):
710 (elem_name, elem_ext) = elem_op
711 (elem_spec, dflt_elem_ext, zeroing) = self.elems[elem_name]
712 if elem_ext:
713 ext = elem_ext
714 else:
715 ext = dflt_elem_ext
716 ctype = self.parser.operandTypeMap[ext]
717 c_read = '\t\t%s& %s = %s[%s];\n' % \
718 (ctype, elem_name, self.base_name, elem_spec)
719 return c_read
720
721 def makeReadW(self, predWrite):
722 func = 'getWritableVecRegOperand'
723 if self.read_code != None:
724 return self.buildReadCode(func)
725
726 if predWrite:
727 rindex = '_destIndex++'
728 else:
729 rindex = '%d' % self.dest_reg_idx
730
731 c_readw = '\t\t%s& tmp_d%s = xc->%s(this, %s);\n'\
732 % ('TheISA::VecRegContainer', rindex, func, rindex)
733 if self.elemExt:
734 c_readw += '\t\tauto %s = tmp_d%s.as<%s>();\n' % (self.base_name,
735 rindex, self.parser.operandTypeMap[self.elemExt])
736 if self.ext:
737 c_readw += '\t\tauto %s = tmp_d%s.as<%s>();\n' % (self.base_name,
738 rindex, self.parser.operandTypeMap[self.ext])
739 if hasattr(self, 'active_elems'):
740 if self.active_elems:
741 for elem in self.active_elems:
742 c_readw += self.makeReadWElem(elem)
743 return c_readw
744
745 # Normal source operand read
746 def makeReadElem(self, elem_op, name):
747 (elem_name, elem_ext) = elem_op
748 (elem_spec, dflt_elem_ext, zeroing) = self.elems[elem_name]
749
750 if elem_ext:
751 ext = elem_ext
752 else:
753 ext = dflt_elem_ext
754 ctype = self.parser.operandTypeMap[ext]
755 c_read = '\t\t%s = %s[%s];\n' % \
756 (elem_name, name, elem_spec)
757 return c_read
758
759 def makeRead(self, predRead):
760 func = 'readVecRegOperand'
761 if self.read_code != None:
762 return self.buildReadCode(func)
763
764 if predRead:
765 rindex = '_sourceIndex++'
766 else:
767 rindex = '%d' % self.src_reg_idx
768
769 name = self.base_name
770 if self.is_dest and self.is_src:
771 name += '_merger'
772
773 c_read = '\t\t%s& tmp_s%s = xc->%s(this, %s);\n' \
774 % ('const TheISA::VecRegContainer', rindex, func, rindex)
775 # If the parser has detected that elements are being access, create
776 # the appropriate view
777 if self.elemExt:
778 c_read += '\t\tauto %s = tmp_s%s.as<%s>();\n' % \
779 (name, rindex, self.parser.operandTypeMap[self.elemExt])
780 if self.ext:
781 c_read += '\t\tauto %s = tmp_s%s.as<%s>();\n' % \
782 (name, rindex, self.parser.operandTypeMap[self.ext])
783 if hasattr(self, 'active_elems'):
784 if self.active_elems:
785 for elem in self.active_elems:
786 c_read += self.makeReadElem(elem, name)
787 return c_read
788
789 def makeWrite(self, predWrite):
790 func = 'setVecRegOperand'
791 if self.write_code != None:
792 return self.buildWriteCode(func)
793
794 wb = '''
795 if (traceData) {
796 warn_once("Vectors not supported yet in tracedata");
797 /*traceData->setData(final_val);*/
798 }
799 '''
800 return wb
801
802 def finalize(self, predRead, predWrite):
803 super(VecRegOperand, self).finalize(predRead, predWrite)
804 if self.is_dest:
805 self.op_rd = self.makeReadW(predWrite) + self.op_rd
806
807 class VecElemOperand(Operand):
808 reg_class = 'VectorElemClass'
809
810 def isReg(self):
811 return 1
812
813 def isVecElem(self):
814 return 1
815
816 def makeDecl(self):
817 if self.is_dest and not self.is_src:
818 return '\n\t%s %s;' % (self.ctype, self.base_name)
819 else:
820 return ''
821
822 def makeConstructor(self, predRead, predWrite):
823 c_src = ''
824 c_dest = ''
825
826 numAccessNeeded = 1
827 regId = 'RegId(%s, %s * numVecElemPerVecReg + elemIdx, %s)' % \
828 (self.reg_class, self.reg_spec)
829
830 if self.is_src:
831 c_src = ('\n\t_srcRegIdx[_numSrcRegs++] = RegId(%s, %s, %s);' %
832 (self.reg_class, self.reg_spec, self.elem_spec))
833
834 if self.is_dest:
835 c_dest = ('\n\t_destRegIdx[_numDestRegs++] = RegId(%s, %s, %s);' %
836 (self.reg_class, self.reg_spec, self.elem_spec))
837 c_dest += '\n\t_numVecElemDestRegs++;'
838 return c_src + c_dest
839
840 def makeRead(self, predRead):
841 c_read = ('\n/* Elem is kept inside the operand description */' +
842 '\n\tVecElem %s = xc->readVecElemOperand(this, %d);' %
843 (self.base_name, self.src_reg_idx))
844 return c_read
845
846 def makeWrite(self, predWrite):
847 c_write = ('\n/* Elem is kept inside the operand description */' +
848 '\n\txc->setVecElemOperand(this, %d, %s);' %
849 (self.dest_reg_idx, self.base_name))
850 return c_write
851
852 class CCRegOperand(Operand):
853 reg_class = 'CCRegClass'
854
855 def isReg(self):
856 return 1
857
858 def isCCReg(self):
859 return 1
860
861 def makeConstructor(self, predRead, predWrite):
862 c_src = ''
863 c_dest = ''
864
865 if self.is_src:
866 c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
867 if self.hasReadPred():
868 c_src = '\n\tif (%s) {%s\n\t}' % \
869 (self.read_predicate, c_src)
870
871 if self.is_dest:
872 c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
873 c_dest += '\n\t_numCCDestRegs++;'
874 if self.hasWritePred():
875 c_dest = '\n\tif (%s) {%s\n\t}' % \
876 (self.write_predicate, c_dest)
877
878 return c_src + c_dest
879
880 def makeRead(self, predRead):
881 if (self.ctype == 'float' or self.ctype == 'double'):
882 error('Attempt to read condition-code register as FP')
883 if self.read_code != None:
884 return self.buildReadCode('readCCRegOperand')
885
886 int_reg_val = ''
887 if predRead:
888 int_reg_val = 'xc->readCCRegOperand(this, _sourceIndex++)'
889 if self.hasReadPred():
890 int_reg_val = '(%s) ? %s : 0' % \
891 (self.read_predicate, int_reg_val)
892 else:
893 int_reg_val = 'xc->readCCRegOperand(this, %d)' % self.src_reg_idx
894
895 return '%s = %s;\n' % (self.base_name, int_reg_val)
896
897 def makeWrite(self, predWrite):
898 if (self.ctype == 'float' or self.ctype == 'double'):
899 error('Attempt to write condition-code register as FP')
900 if self.write_code != None:
901 return self.buildWriteCode('setCCRegOperand')
902
903 if predWrite:
904 wp = 'true'
905 if self.hasWritePred():
906 wp = self.write_predicate
907
908 wcond = 'if (%s)' % (wp)
909 windex = '_destIndex++'
910 else:
911 wcond = ''
912 windex = '%d' % self.dest_reg_idx
913
914 wb = '''
915 %s
916 {
917 %s final_val = %s;
918 xc->setCCRegOperand(this, %s, final_val);\n
919 if (traceData) { traceData->setData(final_val); }
920 }''' % (wcond, self.ctype, self.base_name, windex)
921
922 return wb
923
924 class ControlRegOperand(Operand):
925 reg_class = 'MiscRegClass'
926
927 def isReg(self):
928 return 1
929
930 def isControlReg(self):
931 return 1
932
933 def makeConstructor(self, predRead, predWrite):
934 c_src = ''
935 c_dest = ''
936
937 if self.is_src:
938 c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
939
940 if self.is_dest:
941 c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
942
943 return c_src + c_dest
944
945 def makeRead(self, predRead):
946 bit_select = 0
947 if (self.ctype == 'float' or self.ctype == 'double'):
948 error('Attempt to read control register as FP')
949 if self.read_code != None:
950 return self.buildReadCode('readMiscRegOperand')
951
952 if predRead:
953 rindex = '_sourceIndex++'
954 else:
955 rindex = '%d' % self.src_reg_idx
956
957 return '%s = xc->readMiscRegOperand(this, %s);\n' % \
958 (self.base_name, rindex)
959
960 def makeWrite(self, predWrite):
961 if (self.ctype == 'float' or self.ctype == 'double'):
962 error('Attempt to write control register as FP')
963 if self.write_code != None:
964 return self.buildWriteCode('setMiscRegOperand')
965
966 if predWrite:
967 windex = '_destIndex++'
968 else:
969 windex = '%d' % self.dest_reg_idx
970
971 wb = 'xc->setMiscRegOperand(this, %s, %s);\n' % \
972 (windex, self.base_name)
973 wb += 'if (traceData) { traceData->setData(%s); }' % \
974 self.base_name
975
976 return wb
977
978 class MemOperand(Operand):
979 def isMem(self):
980 return 1
981
982 def makeConstructor(self, predRead, predWrite):
983 return ''
984
985 def makeDecl(self):
986 # Declare memory data variable.
987 return '%s %s;\n' % (self.ctype, self.base_name)
988
989 def makeRead(self, predRead):
990 if self.read_code != None:
991 return self.buildReadCode()
992 return ''
993
994 def makeWrite(self, predWrite):
995 if self.write_code != None:
996 return self.buildWriteCode()
997 return ''
998
999 class PCStateOperand(Operand):
1000 def makeConstructor(self, predRead, predWrite):
1001 return ''
1002
1003 def makeRead(self, predRead):
1004 if self.reg_spec:
1005 # A component of the PC state.
1006 return '%s = __parserAutoPCState.%s();\n' % \
1007 (self.base_name, self.reg_spec)
1008 else:
1009 # The whole PC state itself.
1010 return '%s = xc->pcState();\n' % self.base_name
1011
1012 def makeWrite(self, predWrite):
1013 if self.reg_spec:
1014 # A component of the PC state.
1015 return '__parserAutoPCState.%s(%s);\n' % \
1016 (self.reg_spec, self.base_name)
1017 else:
1018 # The whole PC state itself.
1019 return 'xc->pcState(%s);\n' % self.base_name
1020
1021 def makeDecl(self):
1022 ctype = 'TheISA::PCState'
1023 if self.isPCPart():
1024 ctype = self.ctype
1025 # Note that initializations in the declarations are solely
1026 # to avoid 'uninitialized variable' errors from the compiler.
1027 return '%s %s = 0;\n' % (ctype, self.base_name)
1028
1029 def isPCState(self):
1030 return 1
1031
1032 class OperandList(object):
1033 '''Find all the operands in the given code block. Returns an operand
1034 descriptor list (instance of class OperandList).'''
1035 def __init__(self, parser, code):
1036 self.items = []
1037 self.bases = {}
1038 # delete strings and comments so we don't match on operands inside
1039 for regEx in (stringRE, commentRE):
1040 code = regEx.sub('', code)
1041 # search for operands
1042 next_pos = 0
1043 while 1:
1044 match = parser.operandsRE.search(code, next_pos)
1045 if not match:
1046 # no more matches: we're done
1047 break
1048 op = match.groups()
1049 # regexp groups are operand full name, base, and extension
1050 (op_full, op_base, op_ext) = op
1051 # If is a elem operand, define or update the corresponding
1052 # vector operand
1053 isElem = False
1054 if op_base in parser.elemToVector:
1055 isElem = True
1056 elem_op = (op_base, op_ext)
1057 op_base = parser.elemToVector[op_base]
1058 op_ext = '' # use the default one
1059 # if the token following the operand is an assignment, this is
1060 # a destination (LHS), else it's a source (RHS)
1061 is_dest = (assignRE.match(code, match.end()) != None)
1062 is_src = not is_dest
1063
1064 # see if we've already seen this one
1065 op_desc = self.find_base(op_base)
1066 if op_desc:
1067 if op_ext and op_ext != '' and op_desc.ext != op_ext:
1068 error ('Inconsistent extensions for operand %s: %s - %s' \
1069 % (op_base, op_desc.ext, op_ext))
1070 op_desc.is_src = op_desc.is_src or is_src
1071 op_desc.is_dest = op_desc.is_dest or is_dest
1072 if isElem:
1073 (elem_base, elem_ext) = elem_op
1074 found = False
1075 for ae in op_desc.active_elems:
1076 (ae_base, ae_ext) = ae
1077 if ae_base == elem_base:
1078 if ae_ext != elem_ext:
1079 error('Inconsistent extensions for elem'
1080 ' operand %s' % elem_base)
1081 else:
1082 found = True
1083 if not found:
1084 op_desc.active_elems.append(elem_op)
1085 else:
1086 # new operand: create new descriptor
1087 op_desc = parser.operandNameMap[op_base](parser,
1088 op_full, op_ext, is_src, is_dest)
1089 # if operand is a vector elem, add the corresponding vector
1090 # operand if not already done
1091 if isElem:
1092 op_desc.elemExt = elem_op[1]
1093 op_desc.active_elems = [elem_op]
1094 self.append(op_desc)
1095 # start next search after end of current match
1096 next_pos = match.end()
1097 self.sort()
1098 # enumerate source & dest register operands... used in building
1099 # constructor later
1100 self.numSrcRegs = 0
1101 self.numDestRegs = 0
1102 self.numFPDestRegs = 0
1103 self.numIntDestRegs = 0
1104 self.numVecDestRegs = 0
1105 self.numCCDestRegs = 0
1106 self.numMiscDestRegs = 0
1107 self.memOperand = None
1108
1109 # Flags to keep track if one or more operands are to be read/written
1110 # conditionally.
1111 self.predRead = False
1112 self.predWrite = False
1113
1114 for op_desc in self.items:
1115 if op_desc.isReg():
1116 if op_desc.is_src:
1117 op_desc.src_reg_idx = self.numSrcRegs
1118 self.numSrcRegs += 1
1119 if op_desc.is_dest:
1120 op_desc.dest_reg_idx = self.numDestRegs
1121 self.numDestRegs += 1
1122 if op_desc.isFloatReg():
1123 self.numFPDestRegs += 1
1124 elif op_desc.isIntReg():
1125 self.numIntDestRegs += 1
1126 elif op_desc.isVecReg():
1127 self.numVecDestRegs += 1
1128 elif op_desc.isCCReg():
1129 self.numCCDestRegs += 1
1130 elif op_desc.isControlReg():
1131 self.numMiscDestRegs += 1
1132 elif op_desc.isMem():
1133 if self.memOperand:
1134 error("Code block has more than one memory operand.")
1135 self.memOperand = op_desc
1136
1137 # Check if this operand has read/write predication. If true, then
1138 # the microop will dynamically index source/dest registers.
1139 self.predRead = self.predRead or op_desc.hasReadPred()
1140 self.predWrite = self.predWrite or op_desc.hasWritePred()
1141
1142 if parser.maxInstSrcRegs < self.numSrcRegs:
1143 parser.maxInstSrcRegs = self.numSrcRegs
1144 if parser.maxInstDestRegs < self.numDestRegs:
1145 parser.maxInstDestRegs = self.numDestRegs
1146 if parser.maxMiscDestRegs < self.numMiscDestRegs:
1147 parser.maxMiscDestRegs = self.numMiscDestRegs
1148
1149 # now make a final pass to finalize op_desc fields that may depend
1150 # on the register enumeration
1151 for op_desc in self.items:
1152 op_desc.finalize(self.predRead, self.predWrite)
1153
1154 def __len__(self):
1155 return len(self.items)
1156
1157 def __getitem__(self, index):
1158 return self.items[index]
1159
1160 def append(self, op_desc):
1161 self.items.append(op_desc)
1162 self.bases[op_desc.base_name] = op_desc
1163
1164 def find_base(self, base_name):
1165 # like self.bases[base_name], but returns None if not found
1166 # (rather than raising exception)
1167 return self.bases.get(base_name)
1168
1169 # internal helper function for concat[Some]Attr{Strings|Lists}
1170 def __internalConcatAttrs(self, attr_name, filter, result):
1171 for op_desc in self.items:
1172 if filter(op_desc):
1173 result += getattr(op_desc, attr_name)
1174 return result
1175
1176 # return a single string that is the concatenation of the (string)
1177 # values of the specified attribute for all operands
1178 def concatAttrStrings(self, attr_name):
1179 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1180
1181 # like concatAttrStrings, but only include the values for the operands
1182 # for which the provided filter function returns true
1183 def concatSomeAttrStrings(self, filter, attr_name):
1184 return self.__internalConcatAttrs(attr_name, filter, '')
1185
1186 # return a single list that is the concatenation of the (list)
1187 # values of the specified attribute for all operands
1188 def concatAttrLists(self, attr_name):
1189 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1190
1191 # like concatAttrLists, but only include the values for the operands
1192 # for which the provided filter function returns true
1193 def concatSomeAttrLists(self, filter, attr_name):
1194 return self.__internalConcatAttrs(attr_name, filter, [])
1195
1196 def sort(self):
1197 self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1198
1199 class SubOperandList(OperandList):
1200 '''Find all the operands in the given code block. Returns an operand
1201 descriptor list (instance of class OperandList).'''
1202 def __init__(self, parser, code, master_list):
1203 self.items = []
1204 self.bases = {}
1205 # delete strings and comments so we don't match on operands inside
1206 for regEx in (stringRE, commentRE):
1207 code = regEx.sub('', code)
1208 # search for operands
1209 next_pos = 0
1210 while 1:
1211 match = parser.operandsRE.search(code, next_pos)
1212 if not match:
1213 # no more matches: we're done
1214 break
1215 op = match.groups()
1216 # regexp groups are operand full name, base, and extension
1217 (op_full, op_base, op_ext) = op
1218 # If is a elem operand, define or update the corresponding
1219 # vector operand
1220 if op_base in parser.elemToVector:
1221 elem_op = op_base
1222 op_base = parser.elemToVector[elem_op]
1223 # find this op in the master list
1224 op_desc = master_list.find_base(op_base)
1225 if not op_desc:
1226 error('Found operand %s which is not in the master list!'
1227 % op_base)
1228 else:
1229 # See if we've already found this operand
1230 op_desc = self.find_base(op_base)
1231 if not op_desc:
1232 # if not, add a reference to it to this sub list
1233 self.append(master_list.bases[op_base])
1234
1235 # start next search after end of current match
1236 next_pos = match.end()
1237 self.sort()
1238 self.memOperand = None
1239 # Whether the whole PC needs to be read so parts of it can be accessed
1240 self.readPC = False
1241 # Whether the whole PC needs to be written after parts of it were
1242 # changed
1243 self.setPC = False
1244 # Whether this instruction manipulates the whole PC or parts of it.
1245 # Mixing the two is a bad idea and flagged as an error.
1246 self.pcPart = None
1247
1248 # Flags to keep track if one or more operands are to be read/written
1249 # conditionally.
1250 self.predRead = False
1251 self.predWrite = False
1252
1253 for op_desc in self.items:
1254 if op_desc.isPCPart():
1255 self.readPC = True
1256 if op_desc.is_dest:
1257 self.setPC = True
1258
1259 if op_desc.isPCState():
1260 if self.pcPart is not None:
1261 if self.pcPart and not op_desc.isPCPart() or \
1262 not self.pcPart and op_desc.isPCPart():
1263 error("Mixed whole and partial PC state operands.")
1264 self.pcPart = op_desc.isPCPart()
1265
1266 if op_desc.isMem():
1267 if self.memOperand:
1268 error("Code block has more than one memory operand.")
1269 self.memOperand = op_desc
1270
1271 # Check if this operand has read/write predication. If true, then
1272 # the microop will dynamically index source/dest registers.
1273 self.predRead = self.predRead or op_desc.hasReadPred()
1274 self.predWrite = self.predWrite or op_desc.hasWritePred()
1275
1276 # Regular expression object to match C++ strings
1277 stringRE = re.compile(r'"([^"\\]|\\.)*"')
1278
1279 # Regular expression object to match C++ comments
1280 # (used in findOperands())
1281 commentRE = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
1282 re.DOTALL | re.MULTILINE)
1283
1284 # Regular expression object to match assignment statements (used in
1285 # findOperands()). If the code immediately following the first
1286 # appearance of the operand matches this regex, then the operand
1287 # appears to be on the LHS of an assignment, and is thus a
1288 # destination. basically we're looking for an '=' that's not '=='.
1289 # The heinous tangle before that handles the case where the operand
1290 # has an array subscript.
1291 assignRE = re.compile(r'(\[[^\]]+\])?\s*=(?!=)', re.MULTILINE)
1292
1293 def makeFlagConstructor(flag_list):
1294 if len(flag_list) == 0:
1295 return ''
1296 # filter out repeated flags
1297 flag_list.sort()
1298 i = 1
1299 while i < len(flag_list):
1300 if flag_list[i] == flag_list[i-1]:
1301 del flag_list[i]
1302 else:
1303 i += 1
1304 pre = '\n\tflags['
1305 post = '] = true;'
1306 code = pre + string.join(flag_list, post + pre) + post
1307 return code
1308
1309 # Assume all instruction flags are of the form 'IsFoo'
1310 instFlagRE = re.compile(r'Is.*')
1311
1312 # OpClass constants end in 'Op' except No_OpClass
1313 opClassRE = re.compile(r'.*Op|No_OpClass')
1314
1315 class InstObjParams(object):
1316 def __init__(self, parser, mnem, class_name, base_class = '',
1317 snippets = {}, opt_args = []):
1318 self.mnemonic = mnem
1319 self.class_name = class_name
1320 self.base_class = base_class
1321 if not isinstance(snippets, dict):
1322 snippets = {'code' : snippets}
1323 compositeCode = ' '.join(map(str, snippets.values()))
1324 self.snippets = snippets
1325
1326 self.operands = OperandList(parser, compositeCode)
1327
1328 # The header of the constructor declares the variables to be used
1329 # in the body of the constructor.
1330 header = ''
1331 header += '\n\t_numSrcRegs = 0;'
1332 header += '\n\t_numDestRegs = 0;'
1333 header += '\n\t_numFPDestRegs = 0;'
1334 header += '\n\t_numVecDestRegs = 0;'
1335 header += '\n\t_numVecElemDestRegs = 0;'
1336 header += '\n\t_numIntDestRegs = 0;'
1337 header += '\n\t_numCCDestRegs = 0;'
1338
1339 self.constructor = header + \
1340 self.operands.concatAttrStrings('constructor')
1341
1342 self.flags = self.operands.concatAttrLists('flags')
1343
1344 self.op_class = None
1345
1346 # Optional arguments are assumed to be either StaticInst flags
1347 # or an OpClass value. To avoid having to import a complete
1348 # list of these values to match against, we do it ad-hoc
1349 # with regexps.
1350 for oa in opt_args:
1351 if instFlagRE.match(oa):
1352 self.flags.append(oa)
1353 elif opClassRE.match(oa):
1354 self.op_class = oa
1355 else:
1356 error('InstObjParams: optional arg "%s" not recognized '
1357 'as StaticInst::Flag or OpClass.' % oa)
1358
1359 # Make a basic guess on the operand class if not set.
1360 # These are good enough for most cases.
1361 if not self.op_class:
1362 if 'IsStore' in self.flags:
1363 # The order matters here: 'IsFloating' and 'IsInteger' are
1364 # usually set in FP instructions because of the base
1365 # register
1366 if 'IsFloating' in self.flags:
1367 self.op_class = 'FloatMemWriteOp'
1368 else:
1369 self.op_class = 'MemWriteOp'
1370 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1371 # The order matters here: 'IsFloating' and 'IsInteger' are
1372 # usually set in FP instructions because of the base
1373 # register
1374 if 'IsFloating' in self.flags:
1375 self.op_class = 'FloatMemReadOp'
1376 else:
1377 self.op_class = 'MemReadOp'
1378 elif 'IsFloating' in self.flags:
1379 self.op_class = 'FloatAddOp'
1380 elif 'IsVector' in self.flags:
1381 self.op_class = 'SimdAddOp'
1382 else:
1383 self.op_class = 'IntAluOp'
1384
1385 # add flag initialization to contructor here to include
1386 # any flags added via opt_args
1387 self.constructor += makeFlagConstructor(self.flags)
1388
1389 # if 'IsFloating' is set, add call to the FP enable check
1390 # function (which should be provided by isa_desc via a declare)
1391 # if 'IsVector' is set, add call to the Vector enable check
1392 # function (which should be provided by isa_desc via a declare)
1393 if 'IsFloating' in self.flags:
1394 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1395 elif 'IsVector' in self.flags:
1396 self.fp_enable_check = 'fault = checkVecEnableFault(xc);'
1397 else:
1398 self.fp_enable_check = ''
1399
1400 ##############
1401 # Stack: a simple stack object. Used for both formats (formatStack)
1402 # and default cases (defaultStack). Simply wraps a list to give more
1403 # stack-like syntax and enable initialization with an argument list
1404 # (as opposed to an argument that's a list).
1405
1406 class Stack(list):
1407 def __init__(self, *items):
1408 list.__init__(self, items)
1409
1410 def push(self, item):
1411 self.append(item);
1412
1413 def top(self):
1414 return self[-1]
1415
1416 # Format a file include stack backtrace as a string
1417 def backtrace(filename_stack):
1418 fmt = "In file included from %s:"
1419 return "\n".join([fmt % f for f in filename_stack])
1420
1421
1422 #######################
1423 #
1424 # LineTracker: track filenames along with line numbers in PLY lineno fields
1425 # PLY explicitly doesn't do anything with 'lineno' except propagate
1426 # it. This class lets us tie filenames with the line numbers with a
1427 # minimum of disruption to existing increment code.
1428 #
1429
1430 class LineTracker(object):
1431 def __init__(self, filename, lineno=1):
1432 self.filename = filename
1433 self.lineno = lineno
1434
1435 # Overload '+=' for increments. We need to create a new object on
1436 # each update else every token ends up referencing the same
1437 # constantly incrementing instance.
1438 def __iadd__(self, incr):
1439 return LineTracker(self.filename, self.lineno + incr)
1440
1441 def __str__(self):
1442 return "%s:%d" % (self.filename, self.lineno)
1443
1444 # In case there are places where someone really expects a number
1445 def __int__(self):
1446 return self.lineno
1447
1448
1449 #######################
1450 #
1451 # ISA Parser
1452 # parses ISA DSL and emits C++ headers and source
1453 #
1454
1455 class ISAParser(Grammar):
1456 def __init__(self, output_dir):
1457 super(ISAParser, self).__init__()
1458 self.output_dir = output_dir
1459
1460 self.filename = None # for output file watermarking/scaremongering
1461
1462 # variable to hold templates
1463 self.templateMap = {}
1464
1465 # This dictionary maps format name strings to Format objects.
1466 self.formatMap = {}
1467
1468 # Track open files and, if applicable, how many chunks it has been
1469 # split into so far.
1470 self.files = {}
1471 self.splits = {}
1472
1473 # isa_name / namespace identifier from namespace declaration.
1474 # before the namespace declaration, None.
1475 self.isa_name = None
1476 self.namespace = None
1477
1478 # The format stack.
1479 self.formatStack = Stack(NoFormat())
1480
1481 # The default case stack.
1482 self.defaultStack = Stack(None)
1483
1484 # Stack that tracks current file and line number. Each
1485 # element is a tuple (filename, lineno) that records the
1486 # *current* filename and the line number in the *previous*
1487 # file where it was included.
1488 self.fileNameStack = Stack()
1489
1490 symbols = ('makeList', 're', 'string')
1491 self.exportContext = dict([(s, eval(s)) for s in symbols])
1492
1493 self.maxInstSrcRegs = 0
1494 self.maxInstDestRegs = 0
1495 self.maxMiscDestRegs = 0
1496
1497 def __getitem__(self, i): # Allow object (self) to be
1498 return getattr(self, i) # passed to %-substitutions
1499
1500 # Change the file suffix of a base filename:
1501 # (e.g.) decoder.cc -> decoder-g.cc.inc for 'global' outputs
1502 def suffixize(self, s, sec):
1503 extn = re.compile('(\.[^\.]+)$') # isolate extension
1504 if self.namespace:
1505 return extn.sub(r'-ns\1.inc', s) # insert some text on either side
1506 else:
1507 return extn.sub(r'-g\1.inc', s)
1508
1509 # Get the file object for emitting code into the specified section
1510 # (header, decoder, exec, decode_block).
1511 def get_file(self, section):
1512 if section == 'decode_block':
1513 filename = 'decode-method.cc.inc'
1514 else:
1515 if section == 'header':
1516 file = 'decoder.hh'
1517 else:
1518 file = '%s.cc' % section
1519 filename = self.suffixize(file, section)
1520 try:
1521 return self.files[filename]
1522 except KeyError: pass
1523
1524 f = self.open(filename)
1525 self.files[filename] = f
1526
1527 # The splittable files are the ones with many independent
1528 # per-instruction functions - the decoder's instruction constructors
1529 # and the instruction execution (execute()) methods. These both have
1530 # the suffix -ns.cc.inc, meaning they are within the namespace part
1531 # of the ISA, contain object-emitting C++ source, and are included
1532 # into other top-level files. These are the files that need special
1533 # #define's to allow parts of them to be compiled separately. Rather
1534 # than splitting the emissions into separate files, the monolithic
1535 # output of the ISA parser is maintained, but the value (or lack
1536 # thereof) of the __SPLIT definition during C preprocessing will
1537 # select the different chunks. If no 'split' directives are used,
1538 # the cpp emissions have no effect.
1539 if re.search('-ns.cc.inc$', filename):
1540 print('#if !defined(__SPLIT) || (__SPLIT == 1)', file=f)
1541 self.splits[f] = 1
1542 # ensure requisite #include's
1543 elif filename == 'decoder-g.hh.inc':
1544 print('#include "base/bitfield.hh"', file=f)
1545
1546 return f
1547
1548 # Weave together the parts of the different output sections by
1549 # #include'ing them into some very short top-level .cc/.hh files.
1550 # These small files make it much clearer how this tool works, since
1551 # you directly see the chunks emitted as files that are #include'd.
1552 def write_top_level_files(self):
1553 # decoder header - everything depends on this
1554 file = 'decoder.hh'
1555 with self.open(file) as f:
1556 fn = 'decoder-g.hh.inc'
1557 assert(fn in self.files)
1558 f.write('#include "%s"\n' % fn)
1559
1560 fn = 'decoder-ns.hh.inc'
1561 assert(fn in self.files)
1562 f.write('namespace %s {\n#include "%s"\n}\n'
1563 % (self.namespace, fn))
1564
1565 # decoder method - cannot be split
1566 file = 'decoder.cc'
1567 with self.open(file) as f:
1568 fn = 'base/compiler.hh'
1569 f.write('#include "%s"\n' % fn)
1570
1571 fn = 'decoder-g.cc.inc'
1572 assert(fn in self.files)
1573 f.write('#include "%s"\n' % fn)
1574
1575 fn = 'decoder.hh'
1576 f.write('#include "%s"\n' % fn)
1577
1578 fn = 'decode-method.cc.inc'
1579 # is guaranteed to have been written for parse to complete
1580 f.write('#include "%s"\n' % fn)
1581
1582 extn = re.compile('(\.[^\.]+)$')
1583
1584 # instruction constructors
1585 splits = self.splits[self.get_file('decoder')]
1586 file_ = 'inst-constrs.cc'
1587 for i in range(1, splits+1):
1588 if splits > 1:
1589 file = extn.sub(r'-%d\1' % i, file_)
1590 else:
1591 file = file_
1592 with self.open(file) as f:
1593 fn = 'decoder-g.cc.inc'
1594 assert(fn in self.files)
1595 f.write('#include "%s"\n' % fn)
1596
1597 fn = 'decoder.hh'
1598 f.write('#include "%s"\n' % fn)
1599
1600 fn = 'decoder-ns.cc.inc'
1601 assert(fn in self.files)
1602 print('namespace %s {' % self.namespace, file=f)
1603 if splits > 1:
1604 print('#define __SPLIT %u' % i, file=f)
1605 print('#include "%s"' % fn, file=f)
1606 print('}', file=f)
1607
1608 # instruction execution
1609 splits = self.splits[self.get_file('exec')]
1610 for i in range(1, splits+1):
1611 file = 'generic_cpu_exec.cc'
1612 if splits > 1:
1613 file = extn.sub(r'_%d\1' % i, file)
1614 with self.open(file) as f:
1615 fn = 'exec-g.cc.inc'
1616 assert(fn in self.files)
1617 f.write('#include "%s"\n' % fn)
1618 f.write('#include "cpu/exec_context.hh"\n')
1619 f.write('#include "decoder.hh"\n')
1620
1621 fn = 'exec-ns.cc.inc'
1622 assert(fn in self.files)
1623 print('namespace %s {' % self.namespace, file=f)
1624 if splits > 1:
1625 print('#define __SPLIT %u' % i, file=f)
1626 print('#include "%s"' % fn, file=f)
1627 print('}', file=f)
1628
1629 # max_inst_regs.hh
1630 self.update('max_inst_regs.hh',
1631 '''namespace %(namespace)s {
1632 const int MaxInstSrcRegs = %(maxInstSrcRegs)d;
1633 const int MaxInstDestRegs = %(maxInstDestRegs)d;
1634 const int MaxMiscDestRegs = %(maxMiscDestRegs)d;\n}\n''' % self)
1635
1636 scaremonger_template ='''// DO NOT EDIT
1637 // This file was automatically generated from an ISA description:
1638 // %(filename)s
1639
1640 ''';
1641
1642 #####################################################################
1643 #
1644 # Lexer
1645 #
1646 # The PLY lexer module takes two things as input:
1647 # - A list of token names (the string list 'tokens')
1648 # - A regular expression describing a match for each token. The
1649 # regexp for token FOO can be provided in two ways:
1650 # - as a string variable named t_FOO
1651 # - as the doc string for a function named t_FOO. In this case,
1652 # the function is also executed, allowing an action to be
1653 # associated with each token match.
1654 #
1655 #####################################################################
1656
1657 # Reserved words. These are listed separately as they are matched
1658 # using the same regexp as generic IDs, but distinguished in the
1659 # t_ID() function. The PLY documentation suggests this approach.
1660 reserved = (
1661 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
1662 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
1663 'OUTPUT', 'SIGNED', 'SPLIT', 'TEMPLATE'
1664 )
1665
1666 # List of tokens. The lex module requires this.
1667 tokens = reserved + (
1668 # identifier
1669 'ID',
1670
1671 # integer literal
1672 'INTLIT',
1673
1674 # string literal
1675 'STRLIT',
1676
1677 # code literal
1678 'CODELIT',
1679
1680 # ( ) [ ] { } < > , ; . : :: *
1681 'LPAREN', 'RPAREN',
1682 'LBRACKET', 'RBRACKET',
1683 'LBRACE', 'RBRACE',
1684 'LESS', 'GREATER', 'EQUALS',
1685 'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON',
1686 'ASTERISK',
1687
1688 # C preprocessor directives
1689 'CPPDIRECTIVE'
1690
1691 # The following are matched but never returned. commented out to
1692 # suppress PLY warning
1693 # newfile directive
1694 # 'NEWFILE',
1695
1696 # endfile directive
1697 # 'ENDFILE'
1698 )
1699
1700 # Regular expressions for token matching
1701 t_LPAREN = r'\('
1702 t_RPAREN = r'\)'
1703 t_LBRACKET = r'\['
1704 t_RBRACKET = r'\]'
1705 t_LBRACE = r'\{'
1706 t_RBRACE = r'\}'
1707 t_LESS = r'\<'
1708 t_GREATER = r'\>'
1709 t_EQUALS = r'='
1710 t_COMMA = r','
1711 t_SEMI = r';'
1712 t_DOT = r'\.'
1713 t_COLON = r':'
1714 t_DBLCOLON = r'::'
1715 t_ASTERISK = r'\*'
1716
1717 # Identifiers and reserved words
1718 reserved_map = { }
1719 for r in reserved:
1720 reserved_map[r.lower()] = r
1721
1722 def t_ID(self, t):
1723 r'[A-Za-z_]\w*'
1724 t.type = self.reserved_map.get(t.value, 'ID')
1725 return t
1726
1727 # Integer literal
1728 def t_INTLIT(self, t):
1729 r'-?(0x[\da-fA-F]+)|\d+'
1730 try:
1731 t.value = int(t.value,0)
1732 except ValueError:
1733 error(t.lexer.lineno, 'Integer value "%s" too large' % t.value)
1734 t.value = 0
1735 return t
1736
1737 # String literal. Note that these use only single quotes, and
1738 # can span multiple lines.
1739 def t_STRLIT(self, t):
1740 r"(?m)'([^'])+'"
1741 # strip off quotes
1742 t.value = t.value[1:-1]
1743 t.lexer.lineno += t.value.count('\n')
1744 return t
1745
1746
1747 # "Code literal"... like a string literal, but delimiters are
1748 # '{{' and '}}' so they get formatted nicely under emacs c-mode
1749 def t_CODELIT(self, t):
1750 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
1751 # strip off {{ & }}
1752 t.value = t.value[2:-2]
1753 t.lexer.lineno += t.value.count('\n')
1754 return t
1755
1756 def t_CPPDIRECTIVE(self, t):
1757 r'^\#[^\#].*\n'
1758 t.lexer.lineno += t.value.count('\n')
1759 return t
1760
1761 def t_NEWFILE(self, t):
1762 r'^\#\#newfile\s+"[^"]*"\n'
1763 self.fileNameStack.push(t.lexer.lineno)
1764 t.lexer.lineno = LineTracker(t.value[11:-2])
1765
1766 def t_ENDFILE(self, t):
1767 r'^\#\#endfile\n'
1768 t.lexer.lineno = self.fileNameStack.pop()
1769
1770 #
1771 # The functions t_NEWLINE, t_ignore, and t_error are
1772 # special for the lex module.
1773 #
1774
1775 # Newlines
1776 def t_NEWLINE(self, t):
1777 r'\n+'
1778 t.lexer.lineno += t.value.count('\n')
1779
1780 # Comments
1781 def t_comment(self, t):
1782 r'//.*'
1783
1784 # Completely ignored characters
1785 t_ignore = ' \t\x0c'
1786
1787 # Error handler
1788 def t_error(self, t):
1789 error(t.lexer.lineno, "illegal character '%s'" % t.value[0])
1790 t.skip(1)
1791
1792 #####################################################################
1793 #
1794 # Parser
1795 #
1796 # Every function whose name starts with 'p_' defines a grammar
1797 # rule. The rule is encoded in the function's doc string, while
1798 # the function body provides the action taken when the rule is
1799 # matched. The argument to each function is a list of the values
1800 # of the rule's symbols: t[0] for the LHS, and t[1..n] for the
1801 # symbols on the RHS. For tokens, the value is copied from the
1802 # t.value attribute provided by the lexer. For non-terminals, the
1803 # value is assigned by the producing rule; i.e., the job of the
1804 # grammar rule function is to set the value for the non-terminal
1805 # on the LHS (by assigning to t[0]).
1806 #####################################################################
1807
1808 # The LHS of the first grammar rule is used as the start symbol
1809 # (in this case, 'specification'). Note that this rule enforces
1810 # that there will be exactly one namespace declaration, with 0 or
1811 # more global defs/decls before and after it. The defs & decls
1812 # before the namespace decl will be outside the namespace; those
1813 # after will be inside. The decoder function is always inside the
1814 # namespace.
1815 def p_specification(self, t):
1816 'specification : opt_defs_and_outputs top_level_decode_block'
1817
1818 for f in self.splits.iterkeys():
1819 f.write('\n#endif\n')
1820
1821 for f in self.files.itervalues(): # close ALL the files;
1822 f.close() # not doing so can cause compilation to fail
1823
1824 self.write_top_level_files()
1825
1826 t[0] = True
1827
1828 # 'opt_defs_and_outputs' is a possibly empty sequence of def and/or
1829 # output statements. Its productions do the hard work of eventually
1830 # instantiating a GenCode, which are generally emitted (written to disk)
1831 # as soon as possible, except for the decode_block, which has to be
1832 # accumulated into one large function of nested switch/case blocks.
1833 def p_opt_defs_and_outputs_0(self, t):
1834 'opt_defs_and_outputs : empty'
1835
1836 def p_opt_defs_and_outputs_1(self, t):
1837 'opt_defs_and_outputs : defs_and_outputs'
1838
1839 def p_defs_and_outputs_0(self, t):
1840 'defs_and_outputs : def_or_output'
1841
1842 def p_defs_and_outputs_1(self, t):
1843 'defs_and_outputs : defs_and_outputs def_or_output'
1844
1845 # The list of possible definition/output statements.
1846 # They are all processed as they are seen.
1847 def p_def_or_output(self, t):
1848 '''def_or_output : name_decl
1849 | def_format
1850 | def_bitfield
1851 | def_bitfield_struct
1852 | def_template
1853 | def_operand_types
1854 | def_operands
1855 | output
1856 | global_let
1857 | split'''
1858
1859 # Utility function used by both invocations of splitting - explicit
1860 # 'split' keyword and split() function inside "let {{ }};" blocks.
1861 def split(self, sec, write=False):
1862 assert(sec != 'header' and "header cannot be split")
1863
1864 f = self.get_file(sec)
1865 self.splits[f] += 1
1866 s = '\n#endif\n#if __SPLIT == %u\n' % self.splits[f]
1867 if write:
1868 f.write(s)
1869 else:
1870 return s
1871
1872 # split output file to reduce compilation time
1873 def p_split(self, t):
1874 'split : SPLIT output_type SEMI'
1875 assert(self.isa_name and "'split' not allowed before namespace decl")
1876
1877 self.split(t[2], True)
1878
1879 def p_output_type(self, t):
1880 '''output_type : DECODER
1881 | HEADER
1882 | EXEC'''
1883 t[0] = t[1]
1884
1885 # ISA name declaration looks like "namespace <foo>;"
1886 def p_name_decl(self, t):
1887 'name_decl : NAMESPACE ID SEMI'
1888 assert(self.isa_name == None and "Only 1 namespace decl permitted")
1889 self.isa_name = t[2]
1890 self.namespace = t[2] + 'Inst'
1891
1892 # Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
1893 # directly to the appropriate output section.
1894
1895 # Massage output block by substituting in template definitions and
1896 # bit operators. We handle '%'s embedded in the string that don't
1897 # indicate template substitutions by doubling them first so that the
1898 # format operation will reduce them back to single '%'s.
1899 def process_output(self, s):
1900 s = self.protectNonSubstPercents(s)
1901 return substBitOps(s % self.templateMap)
1902
1903 def p_output(self, t):
1904 'output : OUTPUT output_type CODELIT SEMI'
1905 kwargs = { t[2]+'_output' : self.process_output(t[3]) }
1906 GenCode(self, **kwargs).emit()
1907
1908 # global let blocks 'let {{...}}' (Python code blocks) are
1909 # executed directly when seen. Note that these execute in a
1910 # special variable context 'exportContext' to prevent the code
1911 # from polluting this script's namespace.
1912 def p_global_let(self, t):
1913 'global_let : LET CODELIT SEMI'
1914 def _split(sec):
1915 return self.split(sec)
1916 self.updateExportContext()
1917 self.exportContext["header_output"] = ''
1918 self.exportContext["decoder_output"] = ''
1919 self.exportContext["exec_output"] = ''
1920 self.exportContext["decode_block"] = ''
1921 self.exportContext["split"] = _split
1922 split_setup = '''
1923 def wrap(func):
1924 def split(sec):
1925 globals()[sec + '_output'] += func(sec)
1926 return split
1927 split = wrap(split)
1928 del wrap
1929 '''
1930 # This tricky setup (immediately above) allows us to just write
1931 # (e.g.) "split('exec')" in the Python code and the split #ifdef's
1932 # will automatically be added to the exec_output variable. The inner
1933 # Python execution environment doesn't know about the split points,
1934 # so we carefully inject and wrap a closure that can retrieve the
1935 # next split's #define from the parser and add it to the current
1936 # emission-in-progress.
1937 try:
1938 exec split_setup+fixPythonIndentation(t[2]) in self.exportContext
1939 except Exception, exc:
1940 if debug:
1941 raise
1942 error(t.lineno(1), 'In global let block: %s' % exc)
1943 GenCode(self,
1944 header_output=self.exportContext["header_output"],
1945 decoder_output=self.exportContext["decoder_output"],
1946 exec_output=self.exportContext["exec_output"],
1947 decode_block=self.exportContext["decode_block"]).emit()
1948
1949 # Define the mapping from operand type extensions to C++ types and
1950 # bit widths (stored in operandTypeMap).
1951 def p_def_operand_types(self, t):
1952 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
1953 try:
1954 self.operandTypeMap = eval('{' + t[3] + '}')
1955 except Exception, exc:
1956 if debug:
1957 raise
1958 error(t.lineno(1),
1959 'In def operand_types: %s' % exc)
1960
1961 # Define the mapping from operand names to operand classes and
1962 # other traits. Stored in operandNameMap.
1963 def p_def_operands(self, t):
1964 'def_operands : DEF OPERANDS CODELIT SEMI'
1965 if not hasattr(self, 'operandTypeMap'):
1966 error(t.lineno(1),
1967 'error: operand types must be defined before operands')
1968 try:
1969 user_dict = eval('{' + t[3] + '}', self.exportContext)
1970 except Exception, exc:
1971 if debug:
1972 raise
1973 error(t.lineno(1), 'In def operands: %s' % exc)
1974 self.buildOperandNameMap(user_dict, t.lexer.lineno)
1975
1976 # A bitfield definition looks like:
1977 # 'def [signed] bitfield <ID> [<first>:<last>]'
1978 # This generates a preprocessor macro in the output file.
1979 def p_def_bitfield_0(self, t):
1980 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
1981 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
1982 if (t[2] == 'signed'):
1983 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
1984 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
1985 GenCode(self, header_output=hash_define).emit()
1986
1987 # alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
1988 def p_def_bitfield_1(self, t):
1989 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
1990 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
1991 if (t[2] == 'signed'):
1992 expr = 'sext<%d>(%s)' % (1, expr)
1993 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
1994 GenCode(self, header_output=hash_define).emit()
1995
1996 # alternate form for structure member: 'def bitfield <ID> <ID>'
1997 def p_def_bitfield_struct(self, t):
1998 'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI'
1999 if (t[2] != ''):
2000 error(t.lineno(1),
2001 'error: structure bitfields are always unsigned.')
2002 expr = 'machInst.%s' % t[5]
2003 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
2004 GenCode(self, header_output=hash_define).emit()
2005
2006 def p_id_with_dot_0(self, t):
2007 'id_with_dot : ID'
2008 t[0] = t[1]
2009
2010 def p_id_with_dot_1(self, t):
2011 'id_with_dot : ID DOT id_with_dot'
2012 t[0] = t[1] + t[2] + t[3]
2013
2014 def p_opt_signed_0(self, t):
2015 'opt_signed : SIGNED'
2016 t[0] = t[1]
2017
2018 def p_opt_signed_1(self, t):
2019 'opt_signed : empty'
2020 t[0] = ''
2021
2022 def p_def_template(self, t):
2023 'def_template : DEF TEMPLATE ID CODELIT SEMI'
2024 if t[3] in self.templateMap:
2025 print("warning: template %s already defined" % t[3])
2026 self.templateMap[t[3]] = Template(self, t[4])
2027
2028 # An instruction format definition looks like
2029 # "def format <fmt>(<params>) {{...}};"
2030 def p_def_format(self, t):
2031 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
2032 (id, params, code) = (t[3], t[5], t[7])
2033 self.defFormat(id, params, code, t.lexer.lineno)
2034
2035 # The formal parameter list for an instruction format is a
2036 # possibly empty list of comma-separated parameters. Positional
2037 # (standard, non-keyword) parameters must come first, followed by
2038 # keyword parameters, followed by a '*foo' parameter that gets
2039 # excess positional arguments (as in Python). Each of these three
2040 # parameter categories is optional.
2041 #
2042 # Note that we do not support the '**foo' parameter for collecting
2043 # otherwise undefined keyword args. Otherwise the parameter list
2044 # is (I believe) identical to what is supported in Python.
2045 #
2046 # The param list generates a tuple, where the first element is a
2047 # list of the positional params and the second element is a dict
2048 # containing the keyword params.
2049 def p_param_list_0(self, t):
2050 'param_list : positional_param_list COMMA nonpositional_param_list'
2051 t[0] = t[1] + t[3]
2052
2053 def p_param_list_1(self, t):
2054 '''param_list : positional_param_list
2055 | nonpositional_param_list'''
2056 t[0] = t[1]
2057
2058 def p_positional_param_list_0(self, t):
2059 'positional_param_list : empty'
2060 t[0] = []
2061
2062 def p_positional_param_list_1(self, t):
2063 'positional_param_list : ID'
2064 t[0] = [t[1]]
2065
2066 def p_positional_param_list_2(self, t):
2067 'positional_param_list : positional_param_list COMMA ID'
2068 t[0] = t[1] + [t[3]]
2069
2070 def p_nonpositional_param_list_0(self, t):
2071 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
2072 t[0] = t[1] + t[3]
2073
2074 def p_nonpositional_param_list_1(self, t):
2075 '''nonpositional_param_list : keyword_param_list
2076 | excess_args_param'''
2077 t[0] = t[1]
2078
2079 def p_keyword_param_list_0(self, t):
2080 'keyword_param_list : keyword_param'
2081 t[0] = [t[1]]
2082
2083 def p_keyword_param_list_1(self, t):
2084 'keyword_param_list : keyword_param_list COMMA keyword_param'
2085 t[0] = t[1] + [t[3]]
2086
2087 def p_keyword_param(self, t):
2088 'keyword_param : ID EQUALS expr'
2089 t[0] = t[1] + ' = ' + t[3].__repr__()
2090
2091 def p_excess_args_param(self, t):
2092 'excess_args_param : ASTERISK ID'
2093 # Just concatenate them: '*ID'. Wrap in list to be consistent
2094 # with positional_param_list and keyword_param_list.
2095 t[0] = [t[1] + t[2]]
2096
2097 # End of format definition-related rules.
2098 ##############
2099
2100 #
2101 # A decode block looks like:
2102 # decode <field1> [, <field2>]* [default <inst>] { ... }
2103 #
2104 def p_top_level_decode_block(self, t):
2105 'top_level_decode_block : decode_block'
2106 codeObj = t[1]
2107 codeObj.wrap_decode_block('''
2108 StaticInstPtr
2109 %(isa_name)s::Decoder::decodeInst(%(isa_name)s::ExtMachInst machInst)
2110 {
2111 using namespace %(namespace)s;
2112 ''' % self, '}')
2113
2114 codeObj.emit()
2115
2116 def p_decode_block(self, t):
2117 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
2118 default_defaults = self.defaultStack.pop()
2119 codeObj = t[5]
2120 # use the "default defaults" only if there was no explicit
2121 # default statement in decode_stmt_list
2122 if not codeObj.has_decode_default:
2123 codeObj += default_defaults
2124 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
2125 t[0] = codeObj
2126
2127 # The opt_default statement serves only to push the "default
2128 # defaults" onto defaultStack. This value will be used by nested
2129 # decode blocks, and used and popped off when the current
2130 # decode_block is processed (in p_decode_block() above).
2131 def p_opt_default_0(self, t):
2132 'opt_default : empty'
2133 # no default specified: reuse the one currently at the top of
2134 # the stack
2135 self.defaultStack.push(self.defaultStack.top())
2136 # no meaningful value returned
2137 t[0] = None
2138
2139 def p_opt_default_1(self, t):
2140 'opt_default : DEFAULT inst'
2141 # push the new default
2142 codeObj = t[2]
2143 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
2144 self.defaultStack.push(codeObj)
2145 # no meaningful value returned
2146 t[0] = None
2147
2148 def p_decode_stmt_list_0(self, t):
2149 'decode_stmt_list : decode_stmt'
2150 t[0] = t[1]
2151
2152 def p_decode_stmt_list_1(self, t):
2153 'decode_stmt_list : decode_stmt decode_stmt_list'
2154 if (t[1].has_decode_default and t[2].has_decode_default):
2155 error(t.lineno(1), 'Two default cases in decode block')
2156 t[0] = t[1] + t[2]
2157
2158 #
2159 # Decode statement rules
2160 #
2161 # There are four types of statements allowed in a decode block:
2162 # 1. Format blocks 'format <foo> { ... }'
2163 # 2. Nested decode blocks
2164 # 3. Instruction definitions.
2165 # 4. C preprocessor directives.
2166
2167
2168 # Preprocessor directives found in a decode statement list are
2169 # passed through to the output, replicated to all of the output
2170 # code streams. This works well for ifdefs, so we can ifdef out
2171 # both the declarations and the decode cases generated by an
2172 # instruction definition. Handling them as part of the grammar
2173 # makes it easy to keep them in the right place with respect to
2174 # the code generated by the other statements.
2175 def p_decode_stmt_cpp(self, t):
2176 'decode_stmt : CPPDIRECTIVE'
2177 t[0] = GenCode(self, t[1], t[1], t[1], t[1])
2178
2179 # A format block 'format <foo> { ... }' sets the default
2180 # instruction format used to handle instruction definitions inside
2181 # the block. This format can be overridden by using an explicit
2182 # format on the instruction definition or with a nested format
2183 # block.
2184 def p_decode_stmt_format(self, t):
2185 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
2186 # The format will be pushed on the stack when 'push_format_id'
2187 # is processed (see below). Once the parser has recognized
2188 # the full production (though the right brace), we're done
2189 # with the format, so now we can pop it.
2190 self.formatStack.pop()
2191 t[0] = t[4]
2192
2193 # This rule exists so we can set the current format (& push the
2194 # stack) when we recognize the format name part of the format
2195 # block.
2196 def p_push_format_id(self, t):
2197 'push_format_id : ID'
2198 try:
2199 self.formatStack.push(self.formatMap[t[1]])
2200 t[0] = ('', '// format %s' % t[1])
2201 except KeyError:
2202 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
2203
2204 # Nested decode block: if the value of the current field matches
2205 # the specified constant(s), do a nested decode on some other field.
2206 def p_decode_stmt_decode(self, t):
2207 'decode_stmt : case_list COLON decode_block'
2208 case_list = t[1]
2209 codeObj = t[3]
2210 # just wrap the decoding code from the block as a case in the
2211 # outer switch statement.
2212 codeObj.wrap_decode_block('\n%s\n' % ''.join(case_list),
2213 'M5_UNREACHABLE;\n')
2214 codeObj.has_decode_default = (case_list == ['default:'])
2215 t[0] = codeObj
2216
2217 # Instruction definition (finally!).
2218 def p_decode_stmt_inst(self, t):
2219 'decode_stmt : case_list COLON inst SEMI'
2220 case_list = t[1]
2221 codeObj = t[3]
2222 codeObj.wrap_decode_block('\n%s' % ''.join(case_list), 'break;\n')
2223 codeObj.has_decode_default = (case_list == ['default:'])
2224 t[0] = codeObj
2225
2226 # The constant list for a decode case label must be non-empty, and must
2227 # either be the keyword 'default', or made up of one or more
2228 # comma-separated integer literals or strings which evaluate to
2229 # constants when compiled as C++.
2230 def p_case_list_0(self, t):
2231 'case_list : DEFAULT'
2232 t[0] = ['default:']
2233
2234 def prep_int_lit_case_label(self, lit):
2235 if lit >= 2**32:
2236 return 'case ULL(%#x): ' % lit
2237 else:
2238 return 'case %#x: ' % lit
2239
2240 def prep_str_lit_case_label(self, lit):
2241 return 'case %s: ' % lit
2242
2243 def p_case_list_1(self, t):
2244 'case_list : INTLIT'
2245 t[0] = [self.prep_int_lit_case_label(t[1])]
2246
2247 def p_case_list_2(self, t):
2248 'case_list : STRLIT'
2249 t[0] = [self.prep_str_lit_case_label(t[1])]
2250
2251 def p_case_list_3(self, t):
2252 'case_list : case_list COMMA INTLIT'
2253 t[0] = t[1]
2254 t[0].append(self.prep_int_lit_case_label(t[3]))
2255
2256 def p_case_list_4(self, t):
2257 'case_list : case_list COMMA STRLIT'
2258 t[0] = t[1]
2259 t[0].append(self.prep_str_lit_case_label(t[3]))
2260
2261 # Define an instruction using the current instruction format
2262 # (specified by an enclosing format block).
2263 # "<mnemonic>(<args>)"
2264 def p_inst_0(self, t):
2265 'inst : ID LPAREN arg_list RPAREN'
2266 # Pass the ID and arg list to the current format class to deal with.
2267 currentFormat = self.formatStack.top()
2268 codeObj = currentFormat.defineInst(self, t[1], t[3], t.lexer.lineno)
2269 args = ','.join(map(str, t[3]))
2270 args = re.sub('(?m)^', '//', args)
2271 args = re.sub('^//', '', args)
2272 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
2273 codeObj.prepend_all(comment)
2274 t[0] = codeObj
2275
2276 # Define an instruction using an explicitly specified format:
2277 # "<fmt>::<mnemonic>(<args>)"
2278 def p_inst_1(self, t):
2279 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
2280 try:
2281 format = self.formatMap[t[1]]
2282 except KeyError:
2283 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
2284
2285 codeObj = format.defineInst(self, t[3], t[5], t.lexer.lineno)
2286 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
2287 codeObj.prepend_all(comment)
2288 t[0] = codeObj
2289
2290 # The arg list generates a tuple, where the first element is a
2291 # list of the positional args and the second element is a dict
2292 # containing the keyword args.
2293 def p_arg_list_0(self, t):
2294 'arg_list : positional_arg_list COMMA keyword_arg_list'
2295 t[0] = ( t[1], t[3] )
2296
2297 def p_arg_list_1(self, t):
2298 'arg_list : positional_arg_list'
2299 t[0] = ( t[1], {} )
2300
2301 def p_arg_list_2(self, t):
2302 'arg_list : keyword_arg_list'
2303 t[0] = ( [], t[1] )
2304
2305 def p_positional_arg_list_0(self, t):
2306 'positional_arg_list : empty'
2307 t[0] = []
2308
2309 def p_positional_arg_list_1(self, t):
2310 'positional_arg_list : expr'
2311 t[0] = [t[1]]
2312
2313 def p_positional_arg_list_2(self, t):
2314 'positional_arg_list : positional_arg_list COMMA expr'
2315 t[0] = t[1] + [t[3]]
2316
2317 def p_keyword_arg_list_0(self, t):
2318 'keyword_arg_list : keyword_arg'
2319 t[0] = t[1]
2320
2321 def p_keyword_arg_list_1(self, t):
2322 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
2323 t[0] = t[1]
2324 t[0].update(t[3])
2325
2326 def p_keyword_arg(self, t):
2327 'keyword_arg : ID EQUALS expr'
2328 t[0] = { t[1] : t[3] }
2329
2330 #
2331 # Basic expressions. These constitute the argument values of
2332 # "function calls" (i.e. instruction definitions in the decode
2333 # block) and default values for formal parameters of format
2334 # functions.
2335 #
2336 # Right now, these are either strings, integers, or (recursively)
2337 # lists of exprs (using Python square-bracket list syntax). Note
2338 # that bare identifiers are trated as string constants here (since
2339 # there isn't really a variable namespace to refer to).
2340 #
2341 def p_expr_0(self, t):
2342 '''expr : ID
2343 | INTLIT
2344 | STRLIT
2345 | CODELIT'''
2346 t[0] = t[1]
2347
2348 def p_expr_1(self, t):
2349 '''expr : LBRACKET list_expr RBRACKET'''
2350 t[0] = t[2]
2351
2352 def p_list_expr_0(self, t):
2353 'list_expr : expr'
2354 t[0] = [t[1]]
2355
2356 def p_list_expr_1(self, t):
2357 'list_expr : list_expr COMMA expr'
2358 t[0] = t[1] + [t[3]]
2359
2360 def p_list_expr_2(self, t):
2361 'list_expr : empty'
2362 t[0] = []
2363
2364 #
2365 # Empty production... use in other rules for readability.
2366 #
2367 def p_empty(self, t):
2368 'empty :'
2369 pass
2370
2371 # Parse error handler. Note that the argument here is the
2372 # offending *token*, not a grammar symbol (hence the need to use
2373 # t.value)
2374 def p_error(self, t):
2375 if t:
2376 error(t.lexer.lineno, "syntax error at '%s'" % t.value)
2377 else:
2378 error("unknown syntax error")
2379
2380 # END OF GRAMMAR RULES
2381
2382 def updateExportContext(self):
2383
2384 # create a continuation that allows us to grab the current parser
2385 def wrapInstObjParams(*args):
2386 return InstObjParams(self, *args)
2387 self.exportContext['InstObjParams'] = wrapInstObjParams
2388 self.exportContext.update(self.templateMap)
2389
2390 def defFormat(self, id, params, code, lineno):
2391 '''Define a new format'''
2392
2393 # make sure we haven't already defined this one
2394 if id in self.formatMap:
2395 error(lineno, 'format %s redefined.' % id)
2396
2397 # create new object and store in global map
2398 self.formatMap[id] = Format(id, params, code)
2399
2400 def protectNonSubstPercents(self, s):
2401 '''Protect any non-dict-substitution '%'s in a format string
2402 (i.e. those not followed by '(')'''
2403
2404 return re.sub(r'%(?!\()', '%%', s)
2405
2406 def buildOperandNameMap(self, user_dict, lineno):
2407 operand_name = {}
2408 for op_name, val in user_dict.iteritems():
2409
2410 # Check if extra attributes have been specified.
2411 if len(val) > 9:
2412 error(lineno, 'error: too many attributes for operand "%s"' %
2413 base_cls_name)
2414
2415 # Pad val with None in case optional args are missing
2416 val += (None, None, None, None)
2417 base_cls_name, dflt_ext, reg_spec, flags, sort_pri, \
2418 read_code, write_code, read_predicate, write_predicate = val[:9]
2419
2420 # Canonical flag structure is a triple of lists, where each list
2421 # indicates the set of flags implied by this operand always, when
2422 # used as a source, and when used as a dest, respectively.
2423 # For simplicity this can be initialized using a variety of fairly
2424 # obvious shortcuts; we convert these to canonical form here.
2425 if not flags:
2426 # no flags specified (e.g., 'None')
2427 flags = ( [], [], [] )
2428 elif isinstance(flags, str):
2429 # a single flag: assumed to be unconditional
2430 flags = ( [ flags ], [], [] )
2431 elif isinstance(flags, list):
2432 # a list of flags: also assumed to be unconditional
2433 flags = ( flags, [], [] )
2434 elif isinstance(flags, tuple):
2435 # it's a tuple: it should be a triple,
2436 # but each item could be a single string or a list
2437 (uncond_flags, src_flags, dest_flags) = flags
2438 flags = (makeList(uncond_flags),
2439 makeList(src_flags), makeList(dest_flags))
2440
2441 # Accumulate attributes of new operand class in tmp_dict
2442 tmp_dict = {}
2443 attrList = ['reg_spec', 'flags', 'sort_pri',
2444 'read_code', 'write_code',
2445 'read_predicate', 'write_predicate']
2446 if dflt_ext:
2447 dflt_ctype = self.operandTypeMap[dflt_ext]
2448 attrList.extend(['dflt_ctype', 'dflt_ext'])
2449 # reg_spec is either just a string or a dictionary
2450 # (for elems of vector)
2451 if isinstance(reg_spec, tuple):
2452 (reg_spec, elem_spec) = reg_spec
2453 if isinstance(elem_spec, str):
2454 attrList.append('elem_spec')
2455 else:
2456 assert(isinstance(elem_spec, dict))
2457 elems = elem_spec
2458 attrList.append('elems')
2459 for attr in attrList:
2460 tmp_dict[attr] = eval(attr)
2461 tmp_dict['base_name'] = op_name
2462
2463 # New class name will be e.g. "IntReg_Ra"
2464 cls_name = base_cls_name + '_' + op_name
2465 # Evaluate string arg to get class object. Note that the
2466 # actual base class for "IntReg" is "IntRegOperand", i.e. we
2467 # have to append "Operand".
2468 try:
2469 base_cls = eval(base_cls_name + 'Operand')
2470 except NameError:
2471 error(lineno,
2472 'error: unknown operand base class "%s"' % base_cls_name)
2473 # The following statement creates a new class called
2474 # <cls_name> as a subclass of <base_cls> with the attributes
2475 # in tmp_dict, just as if we evaluated a class declaration.
2476 operand_name[op_name] = type(cls_name, (base_cls,), tmp_dict)
2477
2478 self.operandNameMap = operand_name
2479
2480 # Define operand variables.
2481 operands = user_dict.keys()
2482 # Add the elems defined in the vector operands and
2483 # build a map elem -> vector (used in OperandList)
2484 elem_to_vec = {}
2485 for op in user_dict.keys():
2486 if hasattr(self.operandNameMap[op], 'elems'):
2487 for elem in self.operandNameMap[op].elems.keys():
2488 operands.append(elem)
2489 elem_to_vec[elem] = op
2490 self.elemToVector = elem_to_vec
2491 extensions = self.operandTypeMap.keys()
2492
2493 operandsREString = r'''
2494 (?<!\w) # neg. lookbehind assertion: prevent partial matches
2495 ((%s)(?:_(%s))?) # match: operand with optional '_' then suffix
2496 (?!\w) # neg. lookahead assertion: prevent partial matches
2497 ''' % (string.join(operands, '|'), string.join(extensions, '|'))
2498
2499 self.operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
2500
2501 # Same as operandsREString, but extension is mandatory, and only two
2502 # groups are returned (base and ext, not full name as above).
2503 # Used for subtituting '_' for '.' to make C++ identifiers.
2504 operandsWithExtREString = r'(?<!\w)(%s)_(%s)(?!\w)' \
2505 % (string.join(operands, '|'), string.join(extensions, '|'))
2506
2507 self.operandsWithExtRE = \
2508 re.compile(operandsWithExtREString, re.MULTILINE)
2509
2510 def substMungedOpNames(self, code):
2511 '''Munge operand names in code string to make legal C++
2512 variable names. This means getting rid of the type extension
2513 if any. Will match base_name attribute of Operand object.)'''
2514 return self.operandsWithExtRE.sub(r'\1', code)
2515
2516 def mungeSnippet(self, s):
2517 '''Fix up code snippets for final substitution in templates.'''
2518 if isinstance(s, str):
2519 return self.substMungedOpNames(substBitOps(s))
2520 else:
2521 return s
2522
2523 def open(self, name, bare=False):
2524 '''Open the output file for writing and include scary warning.'''
2525 filename = os.path.join(self.output_dir, name)
2526 f = open(filename, 'w')
2527 if f:
2528 if not bare:
2529 f.write(ISAParser.scaremonger_template % self)
2530 return f
2531
2532 def update(self, file, contents):
2533 '''Update the output file only. Scons should handle the case when
2534 the new contents are unchanged using its built-in hash feature.'''
2535 f = self.open(file)
2536 f.write(contents)
2537 f.close()
2538
2539 # This regular expression matches '##include' directives
2540 includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[^"]*)".*$',
2541 re.MULTILINE)
2542
2543 def replace_include(self, matchobj, dirname):
2544 """Function to replace a matched '##include' directive with the
2545 contents of the specified file (with nested ##includes
2546 replaced recursively). 'matchobj' is an re match object
2547 (from a match of includeRE) and 'dirname' is the directory
2548 relative to which the file path should be resolved."""
2549
2550 fname = matchobj.group('filename')
2551 full_fname = os.path.normpath(os.path.join(dirname, fname))
2552 contents = '##newfile "%s"\n%s\n##endfile\n' % \
2553 (full_fname, self.read_and_flatten(full_fname))
2554 return contents
2555
2556 def read_and_flatten(self, filename):
2557 """Read a file and recursively flatten nested '##include' files."""
2558
2559 current_dir = os.path.dirname(filename)
2560 try:
2561 contents = open(filename).read()
2562 except IOError:
2563 error('Error including file "%s"' % filename)
2564
2565 self.fileNameStack.push(LineTracker(filename))
2566
2567 # Find any includes and include them
2568 def replace(matchobj):
2569 return self.replace_include(matchobj, current_dir)
2570 contents = self.includeRE.sub(replace, contents)
2571
2572 self.fileNameStack.pop()
2573 return contents
2574
2575 AlreadyGenerated = {}
2576
2577 def _parse_isa_desc(self, isa_desc_file):
2578 '''Read in and parse the ISA description.'''
2579
2580 # The build system can end up running the ISA parser twice: once to
2581 # finalize the build dependencies, and then to actually generate
2582 # the files it expects (in src/arch/$ARCH/generated). This code
2583 # doesn't do anything different either time, however; the SCons
2584 # invocations just expect different things. Since this code runs
2585 # within SCons, we can just remember that we've already run and
2586 # not perform a completely unnecessary run, since the ISA parser's
2587 # effect is idempotent.
2588 if isa_desc_file in ISAParser.AlreadyGenerated:
2589 return
2590
2591 # grab the last three path components of isa_desc_file
2592 self.filename = '/'.join(isa_desc_file.split('/')[-3:])
2593
2594 # Read file and (recursively) all included files into a string.
2595 # PLY requires that the input be in a single string so we have to
2596 # do this up front.
2597 isa_desc = self.read_and_flatten(isa_desc_file)
2598
2599 # Initialize lineno tracker
2600 self.lex.lineno = LineTracker(isa_desc_file)
2601
2602 # Parse.
2603 self.parse_string(isa_desc)
2604
2605 ISAParser.AlreadyGenerated[isa_desc_file] = None
2606
2607 def parse_isa_desc(self, *args, **kwargs):
2608 try:
2609 self._parse_isa_desc(*args, **kwargs)
2610 except ISAParserError, e:
2611 print(backtrace(self.fileNameStack))
2612 print("At %s:" % e.lineno)
2613 print(e)
2614 sys.exit(1)
2615
2616 # Called as script: get args from command line.
2617 # Args are: <isa desc file> <output dir>
2618 if __name__ == '__main__':
2619 ISAParser(sys.argv[2]).parse_isa_desc(sys.argv[1])