Merge changes.
[gem5.git] / arch / alpha / isa_desc
1 // -*- mode:c++ -*-
2
3 ////////////////////////////////////////////////////////////////////
4 //
5 // Alpha ISA description file.
6 //
7 ////////////////////////////////////////////////////////////////////
8
9
10 ////////////////////////////////////////////////////////////////////
11 //
12 // Output include file directives.
13 //
14
15 output header {{
16 #include <sstream>
17 #include <iostream>
18 #include <iomanip>
19
20 #include "cpu/static_inst.hh"
21 #include "mem/mem_req.hh" // some constructors use MemReq flags
22 }};
23
24 output decoder {{
25 #include "base/cprintf.hh"
26 #include "base/loader/symtab.hh"
27 #include "cpu/exec_context.hh" // for Jump::branchTarget()
28
29 #include <math.h>
30 #if defined(linux)
31 #include <fenv.h>
32 #endif
33 }};
34
35 output exec {{
36 #include <math.h>
37 #if defined(linux)
38 #include <fenv.h>
39 #endif
40
41 #ifdef FULL_SYSTEM
42 #include "arch/alpha/pseudo_inst.hh"
43 #endif
44 #include "cpu/base_cpu.hh"
45 #include "cpu/exetrace.hh"
46 #include "sim/sim_exit.hh"
47 }};
48
49 ////////////////////////////////////////////////////////////////////
50 //
51 // Namespace statement. Everything below this line will be in the
52 // AlphaISAInst namespace.
53 //
54
55
56 namespace AlphaISA;
57
58 ////////////////////////////////////////////////////////////////////
59 //
60 // Bitfield definitions.
61 //
62
63 // Universal (format-independent) fields
64 def bitfield OPCODE <31:26>;
65 def bitfield RA <25:21>;
66 def bitfield RB <20:16>;
67
68 // Memory format
69 def signed bitfield MEMDISP <15: 0>; // displacement
70 def bitfield MEMFUNC <15: 0>; // function code (same field, unsigned)
71
72 // Memory-format jumps
73 def bitfield JMPFUNC <15:14>; // function code (disp<15:14>)
74 def bitfield JMPHINT <13: 0>; // tgt Icache idx hint (disp<13:0>)
75
76 // Branch format
77 def signed bitfield BRDISP <20: 0>; // displacement
78
79 // Integer operate format(s>;
80 def bitfield INTIMM <20:13>; // integer immediate (literal)
81 def bitfield IMM <12:12>; // immediate flag
82 def bitfield INTFUNC <11: 5>; // function code
83 def bitfield RC < 4: 0>; // dest reg
84
85 // Floating-point operate format
86 def bitfield FA <25:21>;
87 def bitfield FB <20:16>;
88 def bitfield FP_FULLFUNC <15: 5>; // complete function code
89 def bitfield FP_TRAPMODE <15:13>; // trapping mode
90 def bitfield FP_ROUNDMODE <12:11>; // rounding mode
91 def bitfield FP_TYPEFUNC <10: 5>; // type+func: handiest for decoding
92 def bitfield FP_SRCTYPE <10: 9>; // source reg type
93 def bitfield FP_SHORTFUNC < 8: 5>; // short function code
94 def bitfield FP_SHORTFUNC_TOP2 <8:7>; // top 2 bits of short func code
95 def bitfield FC < 4: 0>; // dest reg
96
97 // PALcode format
98 def bitfield PALFUNC <25: 0>; // function code
99
100 // EV5 PAL instructions:
101 // HW_LD/HW_ST
102 def bitfield HW_LDST_PHYS <15>; // address is physical
103 def bitfield HW_LDST_ALT <14>; // use ALT_MODE IPR
104 def bitfield HW_LDST_WRTCK <13>; // HW_LD only: fault if no write acc
105 def bitfield HW_LDST_QUAD <12>; // size: 0=32b, 1=64b
106 def bitfield HW_LDST_VPTE <11>; // HW_LD only: is PTE fetch
107 def bitfield HW_LDST_LOCK <10>; // HW_LD only: is load locked
108 def bitfield HW_LDST_COND <10>; // HW_ST only: is store conditional
109 def signed bitfield HW_LDST_DISP <9:0>; // signed displacement
110
111 // HW_REI
112 def bitfield HW_REI_TYP <15:14>; // type: stalling vs. non-stallingk
113 def bitfield HW_REI_MBZ <13: 0>; // must be zero
114
115 // HW_MTPR/MW_MFPR
116 def bitfield HW_IPR_IDX <15:0>; // IPR index
117
118 // M5 instructions
119 def bitfield M5FUNC <7:0>;
120
121 def operand_types {{
122 'sb' : ('signed int', 8),
123 'ub' : ('unsigned int', 8),
124 'sw' : ('signed int', 16),
125 'uw' : ('unsigned int', 16),
126 'sl' : ('signed int', 32),
127 'ul' : ('unsigned int', 32),
128 'sq' : ('signed int', 64),
129 'uq' : ('unsigned int', 64),
130 'sf' : ('float', 32),
131 'df' : ('float', 64)
132 }};
133
134 def operands {{
135 # Int regs default to unsigned, but code should not count on this.
136 # For clarity, descriptions that depend on unsigned behavior should
137 # explicitly specify '.uq'.
138 'Ra': IntRegOperandTraits('uq', 'RA', 'IsInteger', 1),
139 'Rb': IntRegOperandTraits('uq', 'RB', 'IsInteger', 2),
140 'Rc': IntRegOperandTraits('uq', 'RC', 'IsInteger', 3),
141 'Fa': FloatRegOperandTraits('df', 'FA', 'IsFloating', 1),
142 'Fb': FloatRegOperandTraits('df', 'FB', 'IsFloating', 2),
143 'Fc': FloatRegOperandTraits('df', 'FC', 'IsFloating', 3),
144 'Mem': MemOperandTraits('uq', None,
145 ('IsMemRef', 'IsLoad', 'IsStore'), 4),
146 'NPC': NPCOperandTraits('uq', None, ( None, None, 'IsControl' ), 4),
147 'Runiq': ControlRegOperandTraits('uq', 'Uniq', None, 1),
148 'FPCR': ControlRegOperandTraits('uq', 'Fpcr', None, 1),
149 # The next two are hacks for non-full-system call-pal emulation
150 'R0': IntRegOperandTraits('uq', '0', None, 1),
151 'R16': IntRegOperandTraits('uq', '16', None, 1)
152 }};
153
154 ////////////////////////////////////////////////////////////////////
155 //
156 // Basic instruction classes/templates/formats etc.
157 //
158
159 output header {{
160 // uncomment the following to get SimpleScalar-compatible disassembly
161 // (useful for diffing output traces).
162 // #define SS_COMPATIBLE_DISASSEMBLY
163
164 /**
165 * Base class for all Alpha static instructions.
166 */
167 class AlphaStaticInst : public StaticInst<AlphaISA>
168 {
169 protected:
170
171 /// Make AlphaISA register dependence tags directly visible in
172 /// this class and derived classes. Maybe these should really
173 /// live here and not in the AlphaISA namespace.
174 enum DependenceTags {
175 FP_Base_DepTag = AlphaISA::FP_Base_DepTag,
176 Fpcr_DepTag = AlphaISA::Fpcr_DepTag,
177 Uniq_DepTag = AlphaISA::Uniq_DepTag,
178 IPR_Base_DepTag = AlphaISA::IPR_Base_DepTag
179 };
180
181 /// Constructor.
182 AlphaStaticInst(const char *mnem, MachInst _machInst,
183 OpClass __opClass)
184 : StaticInst<AlphaISA>(mnem, _machInst, __opClass)
185 {
186 }
187
188 /// Print a register name for disassembly given the unique
189 /// dependence tag number (FP or int).
190 void printReg(std::ostream &os, int reg);
191
192 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
193 };
194 }};
195
196 output decoder {{
197 void
198 AlphaStaticInst::printReg(std::ostream &os, int reg)
199 {
200 if (reg < FP_Base_DepTag) {
201 ccprintf(os, "r%d", reg);
202 }
203 else {
204 ccprintf(os, "f%d", reg - FP_Base_DepTag);
205 }
206 }
207
208 std::string
209 AlphaStaticInst::generateDisassembly(Addr pc, const SymbolTable *symtab)
210 {
211 std::stringstream ss;
212
213 ccprintf(ss, "%-10s ", mnemonic);
214
215 // just print the first two source regs... if there's
216 // a third one, it's a read-modify-write dest (Rc),
217 // e.g. for CMOVxx
218 if (_numSrcRegs > 0) {
219 printReg(ss, _srcRegIdx[0]);
220 }
221 if (_numSrcRegs > 1) {
222 ss << ",";
223 printReg(ss, _srcRegIdx[1]);
224 }
225
226 // just print the first dest... if there's a second one,
227 // it's generally implicit
228 if (_numDestRegs > 0) {
229 if (_numSrcRegs > 0)
230 ss << ",";
231 printReg(ss, _destRegIdx[0]);
232 }
233
234 return ss.str();
235 }
236 }};
237
238 // Declarations for execute() methods.
239 def template BasicExecDeclare {{
240 Fault execute(%(CPU_exec_context)s *, Trace::InstRecord *);
241 }};
242
243 // Basic instruction class declaration template.
244 def template BasicDeclare {{
245 /**
246 * Static instruction class for "%(mnemonic)s".
247 */
248 class %(class_name)s : public %(base_class)s
249 {
250 public:
251 /// Constructor.
252 %(class_name)s(MachInst machInst);
253
254 %(BasicExecDeclare)s
255 };
256 }};
257
258 // Basic instruction class constructor template.
259 def template BasicConstructor {{
260 inline %(class_name)s::%(class_name)s(MachInst machInst)
261 : %(base_class)s("%(mnemonic)s", machInst, %(op_class)s)
262 {
263 %(constructor)s;
264 }
265 }};
266
267 // Basic instruction class execute method template.
268 def template BasicExecute {{
269 Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
270 Trace::InstRecord *traceData)
271 {
272 Fault fault = No_Fault;
273
274 %(fp_enable_check)s;
275 %(op_decl)s;
276 %(op_rd)s;
277 %(code)s;
278
279 if (fault == No_Fault) {
280 %(op_wb)s;
281 }
282
283 return fault;
284 }
285 }};
286
287 // Basic decode template.
288 def template BasicDecode {{
289 return new %(class_name)s(machInst);
290 }};
291
292 // Basic decode template, passing mnemonic in as string arg to constructor.
293 def template BasicDecodeWithMnemonic {{
294 return new %(class_name)s("%(mnemonic)s", machInst);
295 }};
296
297 // The most basic instruction format... used only for a few misc. insts
298 def format BasicOperate(code, *flags) {{
299 iop = InstObjParams(name, Name, 'AlphaStaticInst', CodeBlock(code), flags)
300 header_output = BasicDeclare.subst(iop)
301 decoder_output = BasicConstructor.subst(iop)
302 decode_block = BasicDecode.subst(iop)
303 exec_output = BasicExecute.subst(iop)
304 }};
305
306
307
308 ////////////////////////////////////////////////////////////////////
309 //
310 // Nop
311 //
312
313 output header {{
314 /**
315 * Static instruction class for no-ops. This is a leaf class.
316 */
317 class Nop : public AlphaStaticInst
318 {
319 /// Disassembly of original instruction.
320 const std::string originalDisassembly;
321
322 public:
323 /// Constructor
324 Nop(const std::string _originalDisassembly, MachInst _machInst)
325 : AlphaStaticInst("nop", _machInst, No_OpClass),
326 originalDisassembly(_originalDisassembly)
327 {
328 flags[IsNop] = true;
329 }
330
331 ~Nop() { }
332
333 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
334
335 %(BasicExecDeclare)s
336 };
337 }};
338
339 output decoder {{
340 std::string Nop::generateDisassembly(Addr pc, const SymbolTable *symtab)
341 {
342 #ifdef SS_COMPATIBLE_DISASSEMBLY
343 return originalDisassembly;
344 #else
345 return csprintf("%-10s (%s)", "nop", originalDisassembly);
346 #endif
347 }
348
349 /// Helper function for decoding nops. Substitute Nop object
350 /// for original inst passed in as arg (and delete latter).
351 inline
352 AlphaStaticInst *
353 makeNop(AlphaStaticInst *inst)
354 {
355 AlphaStaticInst *nop = new Nop(inst->disassemble(0), inst->machInst);
356 delete inst;
357 return nop;
358 }
359 }};
360
361 output exec {{
362 Fault
363 Nop::execute(%(CPU_exec_context)s *, Trace::InstRecord *)
364 {
365 return No_Fault;
366 }
367 }};
368
369 // integer & FP operate instructions use Rc as dest, so check for
370 // Rc == 31 to detect nops
371 def template OperateNopCheckDecode {{
372 {
373 AlphaStaticInst *i = new %(class_name)s(machInst);
374 if (RC == 31) {
375 i = makeNop(i);
376 }
377 return i;
378 }
379 }};
380
381 // Like BasicOperate format, but generates NOP if RC/FC == 31
382 def format BasicOperateWithNopCheck(code, *opt_args) {{
383 iop = InstObjParams(name, Name, 'AlphaStaticInst', CodeBlock(code),
384 opt_args)
385 header_output = BasicDeclare.subst(iop)
386 decoder_output = BasicConstructor.subst(iop)
387 decode_block = OperateNopCheckDecode.subst(iop)
388 exec_output = BasicExecute.subst(iop)
389 }};
390
391
392 ////////////////////////////////////////////////////////////////////
393 //
394 // Integer operate instructions
395 //
396
397 output header {{
398 /**
399 * Base class for integer immediate instructions.
400 */
401 class IntegerImm : public AlphaStaticInst
402 {
403 protected:
404 /// Immediate operand value (unsigned 8-bit int).
405 uint8_t imm;
406
407 /// Constructor
408 IntegerImm(const char *mnem, MachInst _machInst, OpClass __opClass)
409 : AlphaStaticInst(mnem, _machInst, __opClass), imm(INTIMM)
410 {
411 }
412
413 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
414 };
415 }};
416
417 output decoder {{
418 std::string
419 IntegerImm::generateDisassembly(Addr pc, const SymbolTable *symtab)
420 {
421 std::stringstream ss;
422
423 ccprintf(ss, "%-10s ", mnemonic);
424
425 // just print the first source reg... if there's
426 // a second one, it's a read-modify-write dest (Rc),
427 // e.g. for CMOVxx
428 if (_numSrcRegs > 0) {
429 printReg(ss, _srcRegIdx[0]);
430 ss << ",";
431 }
432
433 ss << (int)imm;
434
435 if (_numDestRegs > 0) {
436 ss << ",";
437 printReg(ss, _destRegIdx[0]);
438 }
439
440 return ss.str();
441 }
442 }};
443
444
445 def template RegOrImmDecode {{
446 {
447 AlphaStaticInst *i =
448 (IMM) ? (AlphaStaticInst *)new %(class_name)sImm(machInst)
449 : (AlphaStaticInst *)new %(class_name)s(machInst);
450 if (RC == 31) {
451 i = makeNop(i);
452 }
453 return i;
454 }
455 }};
456
457 // Primary format for integer operate instructions:
458 // - Generates both reg-reg and reg-imm versions if Rb_or_imm is used.
459 // - Generates NOP if RC == 31.
460 def format IntegerOperate(code, *opt_flags) {{
461 # If the code block contains 'Rb_or_imm', we define two instructions,
462 # one using 'Rb' and one using 'imm', and have the decoder select
463 # the right one.
464 uses_imm = (code.find('Rb_or_imm') != -1)
465 if uses_imm:
466 orig_code = code
467 # base code is reg version:
468 # rewrite by substituting 'Rb' for 'Rb_or_imm'
469 code = re.sub(r'Rb_or_imm', 'Rb', orig_code)
470 # generate immediate version by substituting 'imm'
471 # note that imm takes no extenstion, so we extend
472 # the regexp to replace any extension as well
473 imm_code = re.sub(r'Rb_or_imm(\.\w+)?', 'imm', orig_code)
474
475 # generate declaration for register version
476 cblk = CodeBlock(code)
477 iop = InstObjParams(name, Name, 'AlphaStaticInst', cblk, opt_flags)
478 header_output = BasicDeclare.subst(iop)
479 decoder_output = BasicConstructor.subst(iop)
480 exec_output = BasicExecute.subst(iop)
481
482 if uses_imm:
483 # append declaration for imm version
484 imm_cblk = CodeBlock(imm_code)
485 imm_iop = InstObjParams(name, Name + 'Imm', 'IntegerImm', imm_cblk,
486 opt_flags)
487 header_output += BasicDeclare.subst(imm_iop)
488 decoder_output += BasicConstructor.subst(imm_iop)
489 exec_output += BasicExecute.subst(imm_iop)
490 # decode checks IMM bit to pick correct version
491 decode_block = RegOrImmDecode.subst(iop)
492 else:
493 # no imm version: just check for nop
494 decode_block = OperateNopCheckDecode.subst(iop)
495 }};
496
497
498 ////////////////////////////////////////////////////////////////////
499 //
500 // Floating-point instructions
501 //
502 // Note that many FP-type instructions which do not support all the
503 // various rounding & trapping modes use the simpler format
504 // BasicOperateWithNopCheck.
505 //
506
507 output exec {{
508 /// Check "FP enabled" machine status bit. Called when executing any FP
509 /// instruction in full-system mode.
510 /// @retval Full-system mode: No_Fault if FP is enabled, Fen_Fault
511 /// if not. Non-full-system mode: always returns No_Fault.
512 #ifdef FULL_SYSTEM
513 inline Fault checkFpEnableFault(%(CPU_exec_context)s *xc)
514 {
515 Fault fault = No_Fault; // dummy... this ipr access should not fault
516 if (!EV5::ICSR_FPE(xc->readIpr(AlphaISA::IPR_ICSR, fault))) {
517 fault = Fen_Fault;
518 }
519 return fault;
520 }
521 #else
522 inline Fault checkFpEnableFault(%(CPU_exec_context)s *xc)
523 {
524 return No_Fault;
525 }
526 #endif
527 }};
528
529 output header {{
530 /**
531 * Base class for general floating-point instructions. Includes
532 * support for various Alpha rounding and trapping modes. Only FP
533 * instructions that require this support are derived from this
534 * class; the rest derive directly from AlphaStaticInst.
535 */
536 class AlphaFP : public AlphaStaticInst
537 {
538 public:
539 /// Alpha FP rounding modes.
540 enum RoundingMode {
541 Chopped = 0, ///< round toward zero
542 Minus_Infinity = 1, ///< round toward minus infinity
543 Normal = 2, ///< round to nearest (default)
544 Dynamic = 3, ///< use FPCR setting (in instruction)
545 Plus_Infinity = 3 ///< round to plus inifinity (in FPCR)
546 };
547
548 /// Alpha FP trapping modes.
549 /// For instructions that produce integer results, the
550 /// "Underflow Enable" modes really mean "Overflow Enable", and
551 /// the assembly modifier is V rather than U.
552 enum TrappingMode {
553 /// default: nothing enabled
554 Imprecise = 0, ///< no modifier
555 /// underflow/overflow traps enabled, inexact disabled
556 Underflow_Imprecise = 1, ///< /U or /V
557 Underflow_Precise = 5, ///< /SU or /SV
558 /// underflow/overflow and inexact traps enabled
559 Underflow_Inexact_Precise = 7 ///< /SUI or /SVI
560 };
561
562 protected:
563 #if defined(linux)
564 static const int alphaToC99RoundingMode[];
565 #endif
566
567 /// Map enum RoundingMode values to disassembly suffixes.
568 static const char *roundingModeSuffix[];
569 /// Map enum TrappingMode values to FP disassembly suffixes.
570 static const char *fpTrappingModeSuffix[];
571 /// Map enum TrappingMode values to integer disassembly suffixes.
572 static const char *intTrappingModeSuffix[];
573
574 /// This instruction's rounding mode.
575 RoundingMode roundingMode;
576 /// This instruction's trapping mode.
577 TrappingMode trappingMode;
578
579 /// Constructor
580 AlphaFP(const char *mnem, MachInst _machInst, OpClass __opClass)
581 : AlphaStaticInst(mnem, _machInst, __opClass),
582 roundingMode((enum RoundingMode)FP_ROUNDMODE),
583 trappingMode((enum TrappingMode)FP_TRAPMODE)
584 {
585 if (trappingMode != Imprecise) {
586 warn("precise FP traps unimplemented\n");
587 }
588 }
589
590 #if defined(linux)
591 int getC99RoundingMode(uint64_t fpcr_val);
592 #endif
593
594 // This differs from the AlphaStaticInst version only in
595 // printing suffixes for non-default rounding & trapping modes.
596 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
597 };
598
599 }};
600
601
602 def template FloatingPointDecode {{
603 {
604 bool fast = (FP_TRAPMODE == AlphaFP::Imprecise
605 && FP_ROUNDMODE == AlphaFP::Normal);
606 AlphaStaticInst *i =
607 fast ? (AlphaStaticInst *)new %(class_name)sFast(machInst) :
608 (AlphaStaticInst *)new %(class_name)sGeneral(machInst);
609
610 if (FC == 31) {
611 i = makeNop(i);
612 }
613
614 return i;
615 }
616 }};
617
618 output decoder {{
619 #if defined(linux)
620 int
621 AlphaFP::getC99RoundingMode(uint64_t fpcr_val)
622 {
623 if (roundingMode == Dynamic) {
624 return alphaToC99RoundingMode[bits(fpcr_val, 59, 58)];
625 }
626 else {
627 return alphaToC99RoundingMode[roundingMode];
628 }
629 }
630 #endif
631
632 std::string
633 AlphaFP::generateDisassembly(Addr pc, const SymbolTable *symtab)
634 {
635 std::string mnem_str(mnemonic);
636
637 #ifndef SS_COMPATIBLE_DISASSEMBLY
638 std::string suffix("");
639 suffix += ((_destRegIdx[0] >= FP_Base_DepTag)
640 ? fpTrappingModeSuffix[trappingMode]
641 : intTrappingModeSuffix[trappingMode]);
642 suffix += roundingModeSuffix[roundingMode];
643
644 if (suffix != "") {
645 mnem_str = csprintf("%s/%s", mnemonic, suffix);
646 }
647 #endif
648
649 std::stringstream ss;
650 ccprintf(ss, "%-10s ", mnem_str.c_str());
651
652 // just print the first two source regs... if there's
653 // a third one, it's a read-modify-write dest (Rc),
654 // e.g. for CMOVxx
655 if (_numSrcRegs > 0) {
656 printReg(ss, _srcRegIdx[0]);
657 }
658 if (_numSrcRegs > 1) {
659 ss << ",";
660 printReg(ss, _srcRegIdx[1]);
661 }
662
663 // just print the first dest... if there's a second one,
664 // it's generally implicit
665 if (_numDestRegs > 0) {
666 if (_numSrcRegs > 0)
667 ss << ",";
668 printReg(ss, _destRegIdx[0]);
669 }
670
671 return ss.str();
672 }
673
674 #if defined(linux)
675 const int AlphaFP::alphaToC99RoundingMode[] = {
676 FE_TOWARDZERO, // Chopped
677 FE_DOWNWARD, // Minus_Infinity
678 FE_TONEAREST, // Normal
679 FE_UPWARD // Dynamic in inst, Plus_Infinity in FPCR
680 };
681 #endif
682
683 const char *AlphaFP::roundingModeSuffix[] = { "c", "m", "", "d" };
684 // mark invalid trapping modes, but don't fail on them, because
685 // you could decode anything on a misspeculated path
686 const char *AlphaFP::fpTrappingModeSuffix[] =
687 { "", "u", "INVTM2", "INVTM3", "INVTM4", "su", "INVTM6", "sui" };
688 const char *AlphaFP::intTrappingModeSuffix[] =
689 { "", "v", "INVTM2", "INVTM3", "INVTM4", "sv", "INVTM6", "svi" };
690 }};
691
692 // General format for floating-point operate instructions:
693 // - Checks trapping and rounding mode flags. Trapping modes
694 // currently unimplemented (will fail).
695 // - Generates NOP if FC == 31.
696 def format FloatingPointOperate(code, *opt_args) {{
697 iop = InstObjParams(name, Name, 'AlphaFP', CodeBlock(code), opt_args)
698 decode_block = FloatingPointDecode.subst(iop)
699
700 fast_iop = InstObjParams(name, Name + 'Fast', 'AlphaFP',
701 CodeBlock(code), opt_args)
702 header_output = BasicDeclare.subst(fast_iop)
703 decoder_output = BasicConstructor.subst(fast_iop)
704 exec_output = BasicExecute.subst(fast_iop)
705
706 gen_code_prefix = r'''
707 #if defined(linux)
708 fesetround(getC99RoundingMode(xc->readFpcr()));
709 #endif
710 '''
711 gen_code_suffix = r'''
712 #if defined(linux)
713 fesetround(FE_TONEAREST);
714 #endif
715 '''
716
717 gen_iop = InstObjParams(name, Name + 'General', 'AlphaFP',
718 CodeBlock(gen_code_prefix + code + gen_code_suffix), opt_args)
719 header_output += BasicDeclare.subst(gen_iop)
720 decoder_output += BasicConstructor.subst(gen_iop)
721 exec_output += BasicExecute.subst(gen_iop)
722 }};
723
724
725 ////////////////////////////////////////////////////////////////////
726 //
727 // Memory-format instructions: LoadAddress, Load, Store
728 //
729
730 output header {{
731 /**
732 * Base class for general Alpha memory-format instructions.
733 */
734 class Memory : public AlphaStaticInst
735 {
736 protected:
737
738 /// Memory request flags. See mem_req_base.hh.
739 unsigned memAccessFlags;
740 /// Pointer to EAComp object.
741 const StaticInstPtr<AlphaISA> eaCompPtr;
742 /// Pointer to MemAcc object.
743 const StaticInstPtr<AlphaISA> memAccPtr;
744
745 /// Constructor
746 Memory(const char *mnem, MachInst _machInst, OpClass __opClass,
747 StaticInstPtr<AlphaISA> _eaCompPtr = nullStaticInstPtr,
748 StaticInstPtr<AlphaISA> _memAccPtr = nullStaticInstPtr)
749 : AlphaStaticInst(mnem, _machInst, __opClass),
750 memAccessFlags(0), eaCompPtr(_eaCompPtr), memAccPtr(_memAccPtr)
751 {
752 }
753
754 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
755
756 public:
757
758 const StaticInstPtr<AlphaISA> &eaCompInst() const { return eaCompPtr; }
759 const StaticInstPtr<AlphaISA> &memAccInst() const { return memAccPtr; }
760 };
761
762 /**
763 * Base class for memory-format instructions using a 32-bit
764 * displacement (i.e. most of them).
765 */
766 class MemoryDisp32 : public Memory
767 {
768 protected:
769 /// Displacement for EA calculation (signed).
770 int32_t disp;
771
772 /// Constructor.
773 MemoryDisp32(const char *mnem, MachInst _machInst, OpClass __opClass,
774 StaticInstPtr<AlphaISA> _eaCompPtr = nullStaticInstPtr,
775 StaticInstPtr<AlphaISA> _memAccPtr = nullStaticInstPtr)
776 : Memory(mnem, _machInst, __opClass, _eaCompPtr, _memAccPtr),
777 disp(MEMDISP)
778 {
779 }
780 };
781
782
783 /**
784 * Base class for a few miscellaneous memory-format insts
785 * that don't interpret the disp field: wh64, fetch, fetch_m, ecb.
786 * None of these instructions has a destination register either.
787 */
788 class MemoryNoDisp : public Memory
789 {
790 protected:
791 /// Constructor
792 MemoryNoDisp(const char *mnem, MachInst _machInst, OpClass __opClass,
793 StaticInstPtr<AlphaISA> _eaCompPtr,
794 StaticInstPtr<AlphaISA> _memAccPtr)
795 : Memory(mnem, _machInst, __opClass, _eaCompPtr, _memAccPtr)
796 {
797 }
798
799 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
800 };
801
802
803 /**
804 * Base class for "fake" effective-address computation
805 * instructions returnded by eaCompInst().
806 */
807 class EACompBase : public AlphaStaticInst
808 {
809 public:
810 /// Constructor
811 EACompBase(MachInst machInst)
812 : AlphaStaticInst("(eacomp)", machInst, IntAluOp)
813 {
814 }
815
816 %(BasicExecDeclare)s
817 };
818
819 /**
820 * Base class for "fake" memory-access instructions returnded by
821 * memAccInst().
822 */
823 class MemAccBase : public AlphaStaticInst
824 {
825 public:
826 /// Constructor
827 MemAccBase(MachInst machInst, OpClass __opClass)
828 : AlphaStaticInst("(memacc)", machInst, __opClass)
829 {
830 }
831
832 %(BasicExecDeclare)s
833 };
834
835 }};
836
837
838 output decoder {{
839 std::string
840 Memory::generateDisassembly(Addr pc, const SymbolTable *symtab)
841 {
842 return csprintf("%-10s %c%d,%d(r%d)", mnemonic,
843 flags[IsFloating] ? 'f' : 'r', RA, MEMDISP, RB);
844 }
845
846 std::string
847 MemoryNoDisp::generateDisassembly(Addr pc, const SymbolTable *symtab)
848 {
849 return csprintf("%-10s (r%d)", mnemonic, RB);
850 }
851 }};
852
853 output exec {{
854 Fault
855 EACompBase::execute(%(CPU_exec_context)s *, Trace::InstRecord *)
856 {
857 panic("attempt to execute eacomp");
858 }
859
860 Fault
861 MemAccBase::execute(%(CPU_exec_context)s *, Trace::InstRecord *)
862 {
863 panic("attempt to execute memacc");
864 }
865 }};
866
867
868 def format LoadAddress(code) {{
869 iop = InstObjParams(name, Name, 'MemoryDisp32', CodeBlock(code))
870 header_output = BasicDeclare.subst(iop)
871 decoder_output = BasicConstructor.subst(iop)
872 decode_block = BasicDecode.subst(iop)
873 exec_output = BasicExecute.subst(iop)
874 }};
875
876
877 def template LoadStoreDeclare {{
878 /**
879 * Static instruction class for "%(mnemonic)s".
880 */
881 class %(class_name)s : public %(base_class)s
882 {
883 protected:
884
885 /**
886 * "Fake" effective address computation class for "%(mnemonic)s".
887 */
888 class EAComp : public EACompBase
889 {
890 public:
891 /// Constructor
892 EAComp(MachInst machInst);
893 };
894
895 /**
896 * "Fake" memory access instruction class for "%(mnemonic)s".
897 */
898 class MemAcc : public MemAccBase
899 {
900 public:
901 /// Constructor
902 MemAcc(MachInst machInst);
903 };
904
905 public:
906
907 /// Constructor.
908 %(class_name)s(MachInst machInst);
909
910 %(BasicExecDeclare)s
911 };
912 }};
913
914 def template LoadStoreConstructor {{
915 inline %(class_name)s::EAComp::EAComp(MachInst machInst)
916 : EACompBase(machInst)
917 {
918 %(ea_constructor)s;
919 }
920
921 inline %(class_name)s::MemAcc::MemAcc(MachInst machInst)
922 : MemAccBase(machInst, %(op_class)s)
923 {
924 %(memacc_constructor)s;
925 }
926
927 inline %(class_name)s::%(class_name)s(MachInst machInst)
928 : %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
929 new EAComp(machInst), new MemAcc(machInst))
930 {
931 %(constructor)s;
932 }
933 }};
934
935 def template LoadStoreExecute {{
936 Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
937 Trace::InstRecord *traceData)
938 {
939 Addr EA;
940 Fault fault = No_Fault;
941
942 %(fp_enable_check)s;
943 %(op_decl)s;
944 %(op_nonmem_rd)s;
945 %(ea_code)s;
946
947 if (fault == No_Fault) {
948 %(op_mem_rd)s;
949 %(memacc_code)s;
950 }
951
952 if (fault == No_Fault) {
953 %(op_mem_wb)s;
954 }
955
956 if (fault == No_Fault) {
957 %(postacc_code)s;
958 }
959
960 if (fault == No_Fault) {
961 %(op_nonmem_wb)s;
962 }
963
964 return fault;
965 }
966 }};
967
968
969 def template PrefetchExecute {{
970 Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
971 Trace::InstRecord *traceData)
972 {
973 Addr EA;
974 Fault fault = No_Fault;
975
976 %(fp_enable_check)s;
977 %(op_decl)s;
978 %(op_nonmem_rd)s;
979 %(ea_code)s;
980
981 if (fault == No_Fault) {
982 xc->prefetch(EA, memAccessFlags);
983 }
984
985 return No_Fault;
986 }
987 }};
988
989 // load instructions use Ra as dest, so check for
990 // Ra == 31 to detect nops
991 def template LoadNopCheckDecode {{
992 {
993 AlphaStaticInst *i = new %(class_name)s(machInst);
994 if (RA == 31) {
995 i = makeNop(i);
996 }
997 return i;
998 }
999 }};
1000
1001
1002 // for some load instructions, Ra == 31 indicates a prefetch (not a nop)
1003 def template LoadPrefetchCheckDecode {{
1004 {
1005 if (RA != 31) {
1006 return new %(class_name)s(machInst);
1007 }
1008 else {
1009 return new %(class_name)sPrefetch(machInst);
1010 }
1011 }
1012 }};
1013
1014
1015 let {{
1016 def LoadStoreBase(name, Name, ea_code, memacc_code, postacc_code = '',
1017 base_class = 'MemoryDisp32', flags = [],
1018 decode_template = BasicDecode,
1019 exec_template = LoadStoreExecute):
1020 # Segregate flags into instruction flags (handled by InstObjParams)
1021 # and memory access flags (handled here).
1022
1023 # Would be nice to autogenerate this list, but oh well.
1024 valid_mem_flags = ['LOCKED', 'NO_FAULT', 'EVICT_NEXT', 'PF_EXCLUSIVE']
1025 inst_flags = []
1026 mem_flags = []
1027 for f in flags:
1028 if f in valid_mem_flags:
1029 mem_flags.append(f)
1030 else:
1031 inst_flags.append(f)
1032
1033 ea_cblk = CodeBlock(ea_code)
1034 memacc_cblk = CodeBlock(memacc_code)
1035 postacc_cblk = CodeBlock(postacc_code)
1036
1037 cblk = CodeBlock(ea_code + memacc_code + postacc_code)
1038 iop = InstObjParams(name, Name, base_class, cblk, inst_flags)
1039
1040 iop.ea_constructor = ea_cblk.constructor
1041 iop.ea_code = ea_cblk.code
1042 iop.memacc_constructor = memacc_cblk.constructor
1043 iop.memacc_code = memacc_cblk.code
1044 iop.postacc_code = postacc_cblk.code
1045
1046 mem_flags = string.join(mem_flags, '|')
1047 if mem_flags != '':
1048 iop.constructor += '\n\tmemAccessFlags = ' + mem_flags + ';'
1049
1050 # (header_output, decoder_output, decode_block, exec_output)
1051 return (LoadStoreDeclare.subst(iop), LoadStoreConstructor.subst(iop),
1052 decode_template.subst(iop), exec_template.subst(iop))
1053 }};
1054
1055
1056 def format LoadOrNop(ea_code, memacc_code, *flags) {{
1057 (header_output, decoder_output, decode_block, exec_output) = \
1058 LoadStoreBase(name, Name, ea_code, memacc_code, flags = flags,
1059 decode_template = LoadNopCheckDecode)
1060 }};
1061
1062
1063 // Note that the flags passed in apply only to the prefetch version
1064 def format LoadOrPrefetch(ea_code, memacc_code, *pf_flags) {{
1065 # declare the load instruction object and generate the decode block
1066 (header_output, decoder_output, decode_block, exec_output) = \
1067 LoadStoreBase(name, Name, ea_code, memacc_code,
1068 decode_template = LoadPrefetchCheckDecode)
1069
1070 # Declare the prefetch instruction object.
1071
1072 # convert flags from tuple to list to make them mutable
1073 pf_flags = list(pf_flags) + ['IsMemRef', 'IsLoad', 'IsDataPrefetch', 'MemReadOp', 'NO_FAULT']
1074
1075 (pf_header_output, pf_decoder_output, _, pf_exec_output) = \
1076 LoadStoreBase(name, Name + 'Prefetch', ea_code, '',
1077 flags = pf_flags, exec_template = PrefetchExecute)
1078
1079 header_output += pf_header_output
1080 decoder_output += pf_decoder_output
1081 exec_output += pf_exec_output
1082 }};
1083
1084
1085 def format Store(ea_code, memacc_code, *flags) {{
1086 (header_output, decoder_output, decode_block, exec_output) = \
1087 LoadStoreBase(name, Name, ea_code, memacc_code, flags = flags)
1088 }};
1089
1090
1091 def format StoreCond(ea_code, memacc_code, postacc_code, *flags) {{
1092 (header_output, decoder_output, decode_block, exec_output) = \
1093 LoadStoreBase(name, Name, ea_code, memacc_code, postacc_code,
1094 flags = flags)
1095 }};
1096
1097
1098 // Use 'MemoryNoDisp' as base: for wh64, fetch, ecb
1099 def format MiscPrefetch(ea_code, memacc_code, *flags) {{
1100 (header_output, decoder_output, decode_block, exec_output) = \
1101 LoadStoreBase(name, Name, ea_code, memacc_code, flags = flags,
1102 base_class = 'MemoryNoDisp')
1103 }};
1104
1105
1106 ////////////////////////////////////////////////////////////////////
1107 //
1108 // Control transfer instructions
1109 //
1110
1111 output header {{
1112
1113 /**
1114 * Base class for instructions whose disassembly is not purely a
1115 * function of the machine instruction (i.e., it depends on the
1116 * PC). This class overrides the disassemble() method to check
1117 * the PC and symbol table values before re-using a cached
1118 * disassembly string. This is necessary for branches and jumps,
1119 * where the disassembly string includes the target address (which
1120 * may depend on the PC and/or symbol table).
1121 */
1122 class PCDependentDisassembly : public AlphaStaticInst
1123 {
1124 protected:
1125 /// Cached program counter from last disassembly
1126 Addr cachedPC;
1127 /// Cached symbol table pointer from last disassembly
1128 const SymbolTable *cachedSymtab;
1129
1130 /// Constructor
1131 PCDependentDisassembly(const char *mnem, MachInst _machInst,
1132 OpClass __opClass)
1133 : AlphaStaticInst(mnem, _machInst, __opClass),
1134 cachedPC(0), cachedSymtab(0)
1135 {
1136 }
1137
1138 const std::string &disassemble(Addr pc, const SymbolTable *symtab);
1139 };
1140
1141 /**
1142 * Base class for branches (PC-relative control transfers),
1143 * conditional or unconditional.
1144 */
1145 class Branch : public PCDependentDisassembly
1146 {
1147 protected:
1148 /// Displacement to target address (signed).
1149 int32_t disp;
1150
1151 /// Constructor.
1152 Branch(const char *mnem, MachInst _machInst, OpClass __opClass)
1153 : PCDependentDisassembly(mnem, _machInst, __opClass),
1154 disp(BRDISP << 2)
1155 {
1156 }
1157
1158 Addr branchTarget(Addr branchPC) const;
1159
1160 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1161 };
1162
1163 /**
1164 * Base class for jumps (register-indirect control transfers). In
1165 * the Alpha ISA, these are always unconditional.
1166 */
1167 class Jump : public PCDependentDisassembly
1168 {
1169 protected:
1170
1171 /// Displacement to target address (signed).
1172 int32_t disp;
1173
1174 public:
1175 /// Constructor
1176 Jump(const char *mnem, MachInst _machInst, OpClass __opClass)
1177 : PCDependentDisassembly(mnem, _machInst, __opClass),
1178 disp(BRDISP)
1179 {
1180 }
1181
1182 Addr branchTarget(ExecContext *xc) const;
1183
1184 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1185 };
1186 }};
1187
1188 output decoder {{
1189 Addr
1190 Branch::branchTarget(Addr branchPC) const
1191 {
1192 return branchPC + 4 + disp;
1193 }
1194
1195 Addr
1196 Jump::branchTarget(ExecContext *xc) const
1197 {
1198 Addr NPC = xc->readPC() + 4;
1199 uint64_t Rb = xc->readIntReg(_srcRegIdx[0]);
1200 return (Rb & ~3) | (NPC & 1);
1201 }
1202
1203 const std::string &
1204 PCDependentDisassembly::disassemble(Addr pc, const SymbolTable *symtab)
1205 {
1206 if (!cachedDisassembly ||
1207 pc != cachedPC || symtab != cachedSymtab)
1208 {
1209 if (cachedDisassembly)
1210 delete cachedDisassembly;
1211
1212 cachedDisassembly =
1213 new std::string(generateDisassembly(pc, symtab));
1214 cachedPC = pc;
1215 cachedSymtab = symtab;
1216 }
1217
1218 return *cachedDisassembly;
1219 }
1220
1221 std::string
1222 Branch::generateDisassembly(Addr pc, const SymbolTable *symtab)
1223 {
1224 std::stringstream ss;
1225
1226 ccprintf(ss, "%-10s ", mnemonic);
1227
1228 // There's only one register arg (RA), but it could be
1229 // either a source (the condition for conditional
1230 // branches) or a destination (the link reg for
1231 // unconditional branches)
1232 if (_numSrcRegs > 0) {
1233 printReg(ss, _srcRegIdx[0]);
1234 ss << ",";
1235 }
1236 else if (_numDestRegs > 0) {
1237 printReg(ss, _destRegIdx[0]);
1238 ss << ",";
1239 }
1240
1241 #ifdef SS_COMPATIBLE_DISASSEMBLY
1242 if (_numSrcRegs == 0 && _numDestRegs == 0) {
1243 printReg(ss, 31);
1244 ss << ",";
1245 }
1246 #endif
1247
1248 Addr target = pc + 4 + disp;
1249
1250 std::string str;
1251 if (symtab && symtab->findSymbol(target, str))
1252 ss << str;
1253 else
1254 ccprintf(ss, "0x%x", target);
1255
1256 return ss.str();
1257 }
1258
1259 std::string
1260 Jump::generateDisassembly(Addr pc, const SymbolTable *symtab)
1261 {
1262 std::stringstream ss;
1263
1264 ccprintf(ss, "%-10s ", mnemonic);
1265
1266 #ifdef SS_COMPATIBLE_DISASSEMBLY
1267 if (_numDestRegs == 0) {
1268 printReg(ss, 31);
1269 ss << ",";
1270 }
1271 #endif
1272
1273 if (_numDestRegs > 0) {
1274 printReg(ss, _destRegIdx[0]);
1275 ss << ",";
1276 }
1277
1278 ccprintf(ss, "(r%d)", RB);
1279
1280 return ss.str();
1281 }
1282 }};
1283
1284 def template JumpOrBranchDecode {{
1285 return (RA == 31)
1286 ? (StaticInst<AlphaISA> *)new %(class_name)s(machInst)
1287 : (StaticInst<AlphaISA> *)new %(class_name)sAndLink(machInst);
1288 }};
1289
1290 def format CondBranch(code) {{
1291 code = 'bool cond;\n' + code + '\nif (cond) NPC = NPC + disp;\n';
1292 iop = InstObjParams(name, Name, 'Branch', CodeBlock(code),
1293 ('IsDirectControl', 'IsCondControl'))
1294 header_output = BasicDeclare.subst(iop)
1295 decoder_output = BasicConstructor.subst(iop)
1296 decode_block = BasicDecode.subst(iop)
1297 exec_output = BasicExecute.subst(iop)
1298 }};
1299
1300 let {{
1301 def UncondCtrlBase(name, Name, base_class, npc_expr, flags):
1302 # Declare basic control transfer w/o link (i.e. link reg is R31)
1303 nolink_code = 'NPC = %s;\n' % npc_expr
1304 nolink_iop = InstObjParams(name, Name, base_class,
1305 CodeBlock(nolink_code), flags)
1306 header_output = BasicDeclare.subst(nolink_iop)
1307 decoder_output = BasicConstructor.subst(nolink_iop)
1308 exec_output = BasicExecute.subst(nolink_iop)
1309
1310 # Generate declaration of '*AndLink' version, append to decls
1311 link_code = 'Ra = NPC & ~3;\n' + nolink_code
1312 link_iop = InstObjParams(name, Name + 'AndLink', base_class,
1313 CodeBlock(link_code), flags)
1314 header_output += BasicDeclare.subst(link_iop)
1315 decoder_output += BasicConstructor.subst(link_iop)
1316 exec_output += BasicExecute.subst(link_iop)
1317
1318 # need to use link_iop for the decode template since it is expecting
1319 # the shorter version of class_name (w/o "AndLink")
1320
1321 return (header_output, decoder_output,
1322 JumpOrBranchDecode.subst(nolink_iop), exec_output)
1323 }};
1324
1325 def format UncondBranch(*flags) {{
1326 flags += ('IsUncondControl', 'IsDirectControl')
1327 (header_output, decoder_output, decode_block, exec_output) = \
1328 UncondCtrlBase(name, Name, 'Branch', 'NPC + disp', flags)
1329 }};
1330
1331 def format Jump(*flags) {{
1332 flags += ('IsUncondControl', 'IsIndirectControl')
1333 (header_output, decoder_output, decode_block, exec_output) = \
1334 UncondCtrlBase(name, Name, 'Jump', '(Rb & ~3) | (NPC & 1)', flags)
1335 }};
1336
1337
1338 ////////////////////////////////////////////////////////////////////
1339 //
1340 // PAL calls
1341 //
1342
1343 output header {{
1344 /**
1345 * Base class for emulated call_pal calls (used only in
1346 * non-full-system mode).
1347 */
1348 class EmulatedCallPal : public AlphaStaticInst
1349 {
1350 protected:
1351
1352 /// Constructor.
1353 EmulatedCallPal(const char *mnem, MachInst _machInst,
1354 OpClass __opClass)
1355 : AlphaStaticInst(mnem, _machInst, __opClass)
1356 {
1357 }
1358
1359 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1360 };
1361 }};
1362
1363 output decoder {{
1364 std::string
1365 EmulatedCallPal::generateDisassembly(Addr pc, const SymbolTable *symtab)
1366 {
1367 #ifdef SS_COMPATIBLE_DISASSEMBLY
1368 return csprintf("%s %s", "call_pal", mnemonic);
1369 #else
1370 return csprintf("%-10s %s", "call_pal", mnemonic);
1371 #endif
1372 }
1373 }};
1374
1375 def format EmulatedCallPal(code, *flags) {{
1376 iop = InstObjParams(name, Name, 'EmulatedCallPal', CodeBlock(code), flags)
1377 header_output = BasicDeclare.subst(iop)
1378 decoder_output = BasicConstructor.subst(iop)
1379 decode_block = BasicDecode.subst(iop)
1380 exec_output = BasicExecute.subst(iop)
1381 }};
1382
1383 output header {{
1384 /**
1385 * Base class for full-system-mode call_pal instructions.
1386 * Probably could turn this into a leaf class and get rid of the
1387 * parser template.
1388 */
1389 class CallPalBase : public AlphaStaticInst
1390 {
1391 protected:
1392 int palFunc; ///< Function code part of instruction
1393 int palOffset; ///< Target PC, offset from IPR_PAL_BASE
1394 bool palValid; ///< is the function code valid?
1395 bool palPriv; ///< is this call privileged?
1396
1397 /// Constructor.
1398 CallPalBase(const char *mnem, MachInst _machInst,
1399 OpClass __opClass);
1400
1401 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1402 };
1403 }};
1404
1405 output decoder {{
1406 inline
1407 CallPalBase::CallPalBase(const char *mnem, MachInst _machInst,
1408 OpClass __opClass)
1409 : AlphaStaticInst(mnem, _machInst, __opClass),
1410 palFunc(PALFUNC)
1411 {
1412 // From the 21164 HRM (paraphrased):
1413 // Bit 7 of the function code (mask 0x80) indicates
1414 // whether the call is privileged (bit 7 == 0) or
1415 // unprivileged (bit 7 == 1). The privileged call table
1416 // starts at 0x2000, the unprivielged call table starts at
1417 // 0x3000. Bits 5-0 (mask 0x3f) are used to calculate the
1418 // offset.
1419 const int palPrivMask = 0x80;
1420 const int palOffsetMask = 0x3f;
1421
1422 // Pal call is invalid unless all other bits are 0
1423 palValid = ((machInst & ~(palPrivMask | palOffsetMask)) == 0);
1424 palPriv = ((machInst & palPrivMask) == 0);
1425 int shortPalFunc = (machInst & palOffsetMask);
1426 // Add 1 to base to set pal-mode bit
1427 palOffset = (palPriv ? 0x2001 : 0x3001) + (shortPalFunc << 6);
1428 }
1429
1430 std::string
1431 CallPalBase::generateDisassembly(Addr pc, const SymbolTable *symtab)
1432 {
1433 return csprintf("%-10s %#x", "call_pal", palFunc);
1434 }
1435 }};
1436
1437 def format CallPal(code, *flags) {{
1438 iop = InstObjParams(name, Name, 'CallPalBase', CodeBlock(code), flags)
1439 header_output = BasicDeclare.subst(iop)
1440 decoder_output = BasicConstructor.subst(iop)
1441 decode_block = BasicDecode.subst(iop)
1442 exec_output = BasicExecute.subst(iop)
1443 }};
1444
1445 ////////////////////////////////////////////////////////////////////
1446 //
1447 // hw_ld, hw_st
1448 //
1449
1450 output header {{
1451 /**
1452 * Base class for hw_ld and hw_st.
1453 */
1454 class HwLoadStore : public Memory
1455 {
1456 protected:
1457
1458 /// Displacement for EA calculation (signed).
1459 int16_t disp;
1460
1461 /// Constructor
1462 HwLoadStore(const char *mnem, MachInst _machInst, OpClass __opClass,
1463 StaticInstPtr<AlphaISA> _eaCompPtr,
1464 StaticInstPtr<AlphaISA> _memAccPtr);
1465
1466 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1467 };
1468 }};
1469
1470
1471 output decoder {{
1472 inline
1473 HwLoadStore::HwLoadStore(const char *mnem, MachInst _machInst,
1474 OpClass __opClass,
1475 StaticInstPtr<AlphaISA> _eaCompPtr,
1476 StaticInstPtr<AlphaISA> _memAccPtr)
1477 : Memory(mnem, _machInst, __opClass, _eaCompPtr, _memAccPtr),
1478 disp(HW_LDST_DISP)
1479 {
1480 memAccessFlags = 0;
1481 if (HW_LDST_PHYS) memAccessFlags |= PHYSICAL;
1482 if (HW_LDST_ALT) memAccessFlags |= ALTMODE;
1483 if (HW_LDST_VPTE) memAccessFlags |= VPTE;
1484 if (HW_LDST_LOCK) memAccessFlags |= LOCKED;
1485 }
1486
1487 std::string
1488 HwLoadStore::generateDisassembly(Addr pc, const SymbolTable *symtab)
1489 {
1490 #ifdef SS_COMPATIBLE_DISASSEMBLY
1491 return csprintf("%-10s r%d,%d(r%d)", mnemonic, RA, disp, RB);
1492 #else
1493 // HW_LDST_LOCK and HW_LDST_COND are the same bit.
1494 const char *lock_str =
1495 (HW_LDST_LOCK) ? (flags[IsLoad] ? ",LOCK" : ",COND") : "";
1496
1497 return csprintf("%-10s r%d,%d(r%d)%s%s%s%s%s",
1498 mnemonic, RA, disp, RB,
1499 HW_LDST_PHYS ? ",PHYS" : "",
1500 HW_LDST_ALT ? ",ALT" : "",
1501 HW_LDST_QUAD ? ",QUAD" : "",
1502 HW_LDST_VPTE ? ",VPTE" : "",
1503 lock_str);
1504 #endif
1505 }
1506 }};
1507
1508 def format HwLoadStore(ea_code, memacc_code, class_ext, *flags) {{
1509 (header_output, decoder_output, decode_block, exec_output) = \
1510 LoadStoreBase(name, Name + class_ext, ea_code, memacc_code,
1511 flags = flags, base_class = 'HwLoadStore')
1512 }};
1513
1514
1515 def format HwStoreCond(ea_code, memacc_code, postacc_code, class_ext, *flags) {{
1516 (header_output, decoder_output, decode_block, exec_output) = \
1517 LoadStoreBase(name, Name + class_ext, ea_code, memacc_code,
1518 postacc_code, flags = flags, base_class = 'HwLoadStore')
1519 }};
1520
1521
1522 output header {{
1523 /**
1524 * Base class for hw_mfpr and hw_mtpr.
1525 */
1526 class HwMoveIPR : public AlphaStaticInst
1527 {
1528 protected:
1529 /// Index of internal processor register.
1530 int ipr_index;
1531
1532 /// Constructor
1533 HwMoveIPR(const char *mnem, MachInst _machInst, OpClass __opClass)
1534 : AlphaStaticInst(mnem, _machInst, __opClass),
1535 ipr_index(HW_IPR_IDX)
1536 {
1537 }
1538
1539 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1540 };
1541 }};
1542
1543 output decoder {{
1544 std::string
1545 HwMoveIPR::generateDisassembly(Addr pc, const SymbolTable *symtab)
1546 {
1547 if (_numSrcRegs > 0) {
1548 // must be mtpr
1549 return csprintf("%-10s r%d,IPR(%#x)",
1550 mnemonic, RA, ipr_index);
1551 }
1552 else {
1553 // must be mfpr
1554 return csprintf("%-10s IPR(%#x),r%d",
1555 mnemonic, ipr_index, RA);
1556 }
1557 }
1558 }};
1559
1560 def format HwMoveIPR(code) {{
1561 iop = InstObjParams(name, Name, 'HwMoveIPR', CodeBlock(code))
1562 header_output = BasicDeclare.subst(iop)
1563 decoder_output = BasicConstructor.subst(iop)
1564 decode_block = BasicDecode.subst(iop)
1565 exec_output = BasicExecute.subst(iop)
1566 }};
1567
1568
1569 ////////////////////////////////////////////////////////////////////
1570 //
1571 // Unimplemented instructions
1572 //
1573
1574 output header {{
1575 /**
1576 * Static instruction class for unimplemented instructions that
1577 * cause simulator termination. Note that these are recognized
1578 * (legal) instructions that the simulator does not support; the
1579 * 'Unknown' class is used for unrecognized/illegal instructions.
1580 * This is a leaf class.
1581 */
1582 class FailUnimplemented : public AlphaStaticInst
1583 {
1584 public:
1585 /// Constructor
1586 FailUnimplemented(const char *_mnemonic, MachInst _machInst)
1587 : AlphaStaticInst(_mnemonic, _machInst, No_OpClass)
1588 {
1589 // don't call execute() (which panics) if we're on a
1590 // speculative path
1591 flags[IsNonSpeculative] = true;
1592 }
1593
1594 %(BasicExecDeclare)s
1595
1596 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1597 };
1598
1599 /**
1600 * Base class for unimplemented instructions that cause a warning
1601 * to be printed (but do not terminate simulation). This
1602 * implementation is a little screwy in that it will print a
1603 * warning for each instance of a particular unimplemented machine
1604 * instruction, not just for each unimplemented opcode. Should
1605 * probably make the 'warned' flag a static member of the derived
1606 * class.
1607 */
1608 class WarnUnimplemented : public AlphaStaticInst
1609 {
1610 private:
1611 /// Have we warned on this instruction yet?
1612 bool warned;
1613
1614 public:
1615 /// Constructor
1616 WarnUnimplemented(const char *_mnemonic, MachInst _machInst)
1617 : AlphaStaticInst(_mnemonic, _machInst, No_OpClass), warned(false)
1618 {
1619 // don't call execute() (which panics) if we're on a
1620 // speculative path
1621 flags[IsNonSpeculative] = true;
1622 }
1623
1624 %(BasicExecDeclare)s
1625
1626 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1627 };
1628 }};
1629
1630 output decoder {{
1631 std::string
1632 FailUnimplemented::generateDisassembly(Addr pc, const SymbolTable *symtab)
1633 {
1634 return csprintf("%-10s (unimplemented)", mnemonic);
1635 }
1636
1637 std::string
1638 WarnUnimplemented::generateDisassembly(Addr pc, const SymbolTable *symtab)
1639 {
1640 #ifdef SS_COMPATIBLE_DISASSEMBLY
1641 return csprintf("%-10s", mnemonic);
1642 #else
1643 return csprintf("%-10s (unimplemented)", mnemonic);
1644 #endif
1645 }
1646 }};
1647
1648 output exec {{
1649 Fault
1650 FailUnimplemented::execute(%(CPU_exec_context)s *xc,
1651 Trace::InstRecord *traceData)
1652 {
1653 panic("attempt to execute unimplemented instruction '%s' "
1654 "(inst 0x%08x, opcode 0x%x)", mnemonic, machInst, OPCODE);
1655 return Unimplemented_Opcode_Fault;
1656 }
1657
1658 Fault
1659 WarnUnimplemented::execute(%(CPU_exec_context)s *xc,
1660 Trace::InstRecord *traceData)
1661 {
1662 if (!warned) {
1663 warn("instruction '%s' unimplemented\n", mnemonic);
1664 warned = true;
1665 }
1666
1667 return No_Fault;
1668 }
1669 }};
1670
1671
1672 def format FailUnimpl() {{
1673 iop = InstObjParams(name, 'FailUnimplemented')
1674 decode_block = BasicDecodeWithMnemonic.subst(iop)
1675 }};
1676
1677 def format WarnUnimpl() {{
1678 iop = InstObjParams(name, 'WarnUnimplemented')
1679 decode_block = BasicDecodeWithMnemonic.subst(iop)
1680 }};
1681
1682 output header {{
1683 /**
1684 * Static instruction class for unknown (illegal) instructions.
1685 * These cause simulator termination if they are executed in a
1686 * non-speculative mode. This is a leaf class.
1687 */
1688 class Unknown : public AlphaStaticInst
1689 {
1690 public:
1691 /// Constructor
1692 Unknown(MachInst _machInst)
1693 : AlphaStaticInst("unknown", _machInst, No_OpClass)
1694 {
1695 // don't call execute() (which panics) if we're on a
1696 // speculative path
1697 flags[IsNonSpeculative] = true;
1698 }
1699
1700 %(BasicExecDeclare)s
1701
1702 std::string generateDisassembly(Addr pc, const SymbolTable *symtab);
1703 };
1704 }};
1705
1706 ////////////////////////////////////////////////////////////////////
1707 //
1708 // Unknown instructions
1709 //
1710
1711 output decoder {{
1712 std::string
1713 Unknown::generateDisassembly(Addr pc, const SymbolTable *symtab)
1714 {
1715 return csprintf("%-10s (inst 0x%x, opcode 0x%x)",
1716 "unknown", machInst, OPCODE);
1717 }
1718 }};
1719
1720 output exec {{
1721 Fault
1722 Unknown::execute(%(CPU_exec_context)s *xc, Trace::InstRecord *traceData)
1723 {
1724 panic("attempt to execute unknown instruction "
1725 "(inst 0x%08x, opcode 0x%x)", machInst, OPCODE);
1726 return Unimplemented_Opcode_Fault;
1727 }
1728 }};
1729
1730 def format Unknown() {{
1731 decode_block = 'return new Unknown(machInst);\n'
1732 }};
1733
1734 ////////////////////////////////////////////////////////////////////
1735 //
1736 // Utility functions for execute methods
1737 //
1738
1739 output exec {{
1740
1741 /// Return opa + opb, summing carry into third arg.
1742 inline uint64_t
1743 addc(uint64_t opa, uint64_t opb, int &carry)
1744 {
1745 uint64_t res = opa + opb;
1746 if (res < opa || res < opb)
1747 ++carry;
1748 return res;
1749 }
1750
1751 /// Multiply two 64-bit values (opa * opb), returning the 128-bit
1752 /// product in res_hi and res_lo.
1753 inline void
1754 mul128(uint64_t opa, uint64_t opb, uint64_t &res_hi, uint64_t &res_lo)
1755 {
1756 // do a 64x64 --> 128 multiply using four 32x32 --> 64 multiplies
1757 uint64_t opa_hi = opa<63:32>;
1758 uint64_t opa_lo = opa<31:0>;
1759 uint64_t opb_hi = opb<63:32>;
1760 uint64_t opb_lo = opb<31:0>;
1761
1762 res_lo = opa_lo * opb_lo;
1763
1764 // The middle partial products logically belong in bit
1765 // positions 95 to 32. Thus the lower 32 bits of each product
1766 // sum into the upper 32 bits of the low result, while the
1767 // upper 32 sum into the low 32 bits of the upper result.
1768 uint64_t partial1 = opa_hi * opb_lo;
1769 uint64_t partial2 = opa_lo * opb_hi;
1770
1771 uint64_t partial1_lo = partial1<31:0> << 32;
1772 uint64_t partial1_hi = partial1<63:32>;
1773 uint64_t partial2_lo = partial2<31:0> << 32;
1774 uint64_t partial2_hi = partial2<63:32>;
1775
1776 // Add partial1_lo and partial2_lo to res_lo, keeping track
1777 // of any carries out
1778 int carry_out = 0;
1779 res_lo = addc(partial1_lo, res_lo, carry_out);
1780 res_lo = addc(partial2_lo, res_lo, carry_out);
1781
1782 // Now calculate the high 64 bits...
1783 res_hi = (opa_hi * opb_hi) + partial1_hi + partial2_hi + carry_out;
1784 }
1785
1786 /// Map 8-bit S-floating exponent to 11-bit T-floating exponent.
1787 /// See Table 2-2 of Alpha AHB.
1788 inline int
1789 map_s(int old_exp)
1790 {
1791 int hibit = old_exp<7:>;
1792 int lobits = old_exp<6:0>;
1793
1794 if (hibit == 1) {
1795 return (lobits == 0x7f) ? 0x7ff : (0x400 | lobits);
1796 }
1797 else {
1798 return (lobits == 0) ? 0 : (0x380 | lobits);
1799 }
1800 }
1801
1802 /// Convert a 32-bit S-floating value to the equivalent 64-bit
1803 /// representation to be stored in an FP reg.
1804 inline uint64_t
1805 s_to_t(uint32_t s_val)
1806 {
1807 uint64_t tmp = s_val;
1808 return (tmp<31:> << 63 // sign bit
1809 | (uint64_t)map_s(tmp<30:23>) << 52 // exponent
1810 | tmp<22:0> << 29); // fraction
1811 }
1812
1813 /// Convert a 64-bit T-floating value to the equivalent 32-bit
1814 /// S-floating representation to be stored in memory.
1815 inline int32_t
1816 t_to_s(uint64_t t_val)
1817 {
1818 return (t_val<63:62> << 30 // sign bit & hi exp bit
1819 | t_val<58:29>); // rest of exp & fraction
1820 }
1821 }};
1822
1823 ////////////////////////////////////////////////////////////////////
1824 //
1825 // The actual decoder specification
1826 //
1827
1828 decode OPCODE default Unknown::unknown() {
1829
1830 format LoadAddress {
1831 0x08: lda({{ Ra = Rb + disp; }});
1832 0x09: ldah({{ Ra = Rb + (disp << 16); }});
1833 }
1834
1835 format LoadOrNop {
1836 0x0a: ldbu({{ EA = Rb + disp; }}, {{ Ra.uq = Mem.ub; }});
1837 0x0c: ldwu({{ EA = Rb + disp; }}, {{ Ra.uq = Mem.uw; }});
1838 0x0b: ldq_u({{ EA = (Rb + disp) & ~7; }}, {{ Ra = Mem.uq; }});
1839 0x23: ldt({{ EA = Rb + disp; }}, {{ Fa = Mem.df; }});
1840 0x2a: ldl_l({{ EA = Rb + disp; }}, {{ Ra.sl = Mem.sl; }}, LOCKED);
1841 0x2b: ldq_l({{ EA = Rb + disp; }}, {{ Ra.uq = Mem.uq; }}, LOCKED);
1842 0x20: copy_load({{EA = Ra;}},
1843 {{fault = xc->copySrcTranslate(EA);}},
1844 IsMemRef, IsLoad, IsCopy);
1845 }
1846
1847 format LoadOrPrefetch {
1848 0x28: ldl({{ EA = Rb + disp; }}, {{ Ra.sl = Mem.sl; }});
1849 0x29: ldq({{ EA = Rb + disp; }}, {{ Ra.uq = Mem.uq; }}, EVICT_NEXT);
1850 // IsFloating flag on lds gets the prefetch to disassemble
1851 // using f31 instead of r31... funcitonally it's unnecessary
1852 0x22: lds({{ EA = Rb + disp; }}, {{ Fa.uq = s_to_t(Mem.ul); }},
1853 PF_EXCLUSIVE, IsFloating);
1854 }
1855
1856 format Store {
1857 0x0e: stb({{ EA = Rb + disp; }}, {{ Mem.ub = Ra<7:0>; }});
1858 0x0d: stw({{ EA = Rb + disp; }}, {{ Mem.uw = Ra<15:0>; }});
1859 0x2c: stl({{ EA = Rb + disp; }}, {{ Mem.ul = Ra<31:0>; }});
1860 0x2d: stq({{ EA = Rb + disp; }}, {{ Mem.uq = Ra.uq; }});
1861 0x0f: stq_u({{ EA = (Rb + disp) & ~7; }}, {{ Mem.uq = Ra.uq; }});
1862 0x26: sts({{ EA = Rb + disp; }}, {{ Mem.ul = t_to_s(Fa.uq); }});
1863 0x27: stt({{ EA = Rb + disp; }}, {{ Mem.df = Fa; }});
1864 0x24: copy_store({{EA = Rb;}},
1865 {{fault = xc->copy(EA);}},
1866 IsMemRef, IsStore, IsCopy);
1867 }
1868
1869 format StoreCond {
1870 0x2e: stl_c({{ EA = Rb + disp; }}, {{ Mem.ul = Ra<31:0>; }},
1871 {{
1872 uint64_t tmp = Mem_write_result;
1873 // see stq_c
1874 Ra = (tmp == 0 || tmp == 1) ? tmp : Ra;
1875 }}, LOCKED);
1876 0x2f: stq_c({{ EA = Rb + disp; }}, {{ Mem.uq = Ra; }},
1877 {{
1878 uint64_t tmp = Mem_write_result;
1879 // If the write operation returns 0 or 1, then
1880 // this was a conventional store conditional,
1881 // and the value indicates the success/failure
1882 // of the operation. If another value is
1883 // returned, then this was a Turbolaser
1884 // mailbox access, and we don't update the
1885 // result register at all.
1886 Ra = (tmp == 0 || tmp == 1) ? tmp : Ra;
1887 }}, LOCKED);
1888 }
1889
1890 format IntegerOperate {
1891
1892 0x10: decode INTFUNC { // integer arithmetic operations
1893
1894 0x00: addl({{ Rc.sl = Ra.sl + Rb_or_imm.sl; }});
1895 0x40: addlv({{
1896 uint32_t tmp = Ra.sl + Rb_or_imm.sl;
1897 // signed overflow occurs when operands have same sign
1898 // and sign of result does not match.
1899 if (Ra.sl<31:> == Rb_or_imm.sl<31:> && tmp<31:> != Ra.sl<31:>)
1900 fault = Integer_Overflow_Fault;
1901 Rc.sl = tmp;
1902 }});
1903 0x02: s4addl({{ Rc.sl = (Ra.sl << 2) + Rb_or_imm.sl; }});
1904 0x12: s8addl({{ Rc.sl = (Ra.sl << 3) + Rb_or_imm.sl; }});
1905
1906 0x20: addq({{ Rc = Ra + Rb_or_imm; }});
1907 0x60: addqv({{
1908 uint64_t tmp = Ra + Rb_or_imm;
1909 // signed overflow occurs when operands have same sign
1910 // and sign of result does not match.
1911 if (Ra<63:> == Rb_or_imm<63:> && tmp<63:> != Ra<63:>)
1912 fault = Integer_Overflow_Fault;
1913 Rc = tmp;
1914 }});
1915 0x22: s4addq({{ Rc = (Ra << 2) + Rb_or_imm; }});
1916 0x32: s8addq({{ Rc = (Ra << 3) + Rb_or_imm; }});
1917
1918 0x09: subl({{ Rc.sl = Ra.sl - Rb_or_imm.sl; }});
1919 0x49: sublv({{
1920 uint32_t tmp = Ra.sl - Rb_or_imm.sl;
1921 // signed overflow detection is same as for add,
1922 // except we need to look at the *complemented*
1923 // sign bit of the subtrahend (Rb), i.e., if the initial
1924 // signs are the *same* then no overflow can occur
1925 if (Ra.sl<31:> != Rb_or_imm.sl<31:> && tmp<31:> != Ra.sl<31:>)
1926 fault = Integer_Overflow_Fault;
1927 Rc.sl = tmp;
1928 }});
1929 0x0b: s4subl({{ Rc.sl = (Ra.sl << 2) - Rb_or_imm.sl; }});
1930 0x1b: s8subl({{ Rc.sl = (Ra.sl << 3) - Rb_or_imm.sl; }});
1931
1932 0x29: subq({{ Rc = Ra - Rb_or_imm; }});
1933 0x69: subqv({{
1934 uint64_t tmp = Ra - Rb_or_imm;
1935 // signed overflow detection is same as for add,
1936 // except we need to look at the *complemented*
1937 // sign bit of the subtrahend (Rb), i.e., if the initial
1938 // signs are the *same* then no overflow can occur
1939 if (Ra<63:> != Rb_or_imm<63:> && tmp<63:> != Ra<63:>)
1940 fault = Integer_Overflow_Fault;
1941 Rc = tmp;
1942 }});
1943 0x2b: s4subq({{ Rc = (Ra << 2) - Rb_or_imm; }});
1944 0x3b: s8subq({{ Rc = (Ra << 3) - Rb_or_imm; }});
1945
1946 0x2d: cmpeq({{ Rc = (Ra == Rb_or_imm); }});
1947 0x6d: cmple({{ Rc = (Ra.sq <= Rb_or_imm.sq); }});
1948 0x4d: cmplt({{ Rc = (Ra.sq < Rb_or_imm.sq); }});
1949 0x3d: cmpule({{ Rc = (Ra.uq <= Rb_or_imm.uq); }});
1950 0x1d: cmpult({{ Rc = (Ra.uq < Rb_or_imm.uq); }});
1951
1952 0x0f: cmpbge({{
1953 int hi = 7;
1954 int lo = 0;
1955 uint64_t tmp = 0;
1956 for (int i = 0; i < 8; ++i) {
1957 tmp |= (Ra.uq<hi:lo> >= Rb_or_imm.uq<hi:lo>) << i;
1958 hi += 8;
1959 lo += 8;
1960 }
1961 Rc = tmp;
1962 }});
1963 }
1964
1965 0x11: decode INTFUNC { // integer logical operations
1966
1967 0x00: and({{ Rc = Ra & Rb_or_imm; }});
1968 0x08: bic({{ Rc = Ra & ~Rb_or_imm; }});
1969 0x20: bis({{ Rc = Ra | Rb_or_imm; }});
1970 0x28: ornot({{ Rc = Ra | ~Rb_or_imm; }});
1971 0x40: xor({{ Rc = Ra ^ Rb_or_imm; }});
1972 0x48: eqv({{ Rc = Ra ^ ~Rb_or_imm; }});
1973
1974 // conditional moves
1975 0x14: cmovlbs({{ Rc = ((Ra & 1) == 1) ? Rb_or_imm : Rc; }});
1976 0x16: cmovlbc({{ Rc = ((Ra & 1) == 0) ? Rb_or_imm : Rc; }});
1977 0x24: cmoveq({{ Rc = (Ra == 0) ? Rb_or_imm : Rc; }});
1978 0x26: cmovne({{ Rc = (Ra != 0) ? Rb_or_imm : Rc; }});
1979 0x44: cmovlt({{ Rc = (Ra.sq < 0) ? Rb_or_imm : Rc; }});
1980 0x46: cmovge({{ Rc = (Ra.sq >= 0) ? Rb_or_imm : Rc; }});
1981 0x64: cmovle({{ Rc = (Ra.sq <= 0) ? Rb_or_imm : Rc; }});
1982 0x66: cmovgt({{ Rc = (Ra.sq > 0) ? Rb_or_imm : Rc; }});
1983
1984 // For AMASK, RA must be R31.
1985 0x61: decode RA {
1986 31: amask({{ Rc = Rb_or_imm & ~ULL(0x17); }});
1987 }
1988
1989 // For IMPLVER, RA must be R31 and the B operand
1990 // must be the immediate value 1.
1991 0x6c: decode RA {
1992 31: decode IMM {
1993 1: decode INTIMM {
1994 // return EV5 for FULL_SYSTEM and EV6 otherwise
1995 1: implver({{
1996 #ifdef FULL_SYSTEM
1997 Rc = 1;
1998 #else
1999 Rc = 2;
2000 #endif
2001 }});
2002 }
2003 }
2004 }
2005
2006 #ifdef FULL_SYSTEM
2007 // The mysterious 11.25...
2008 0x25: WarnUnimpl::eleven25();
2009 #endif
2010 }
2011
2012 0x12: decode INTFUNC {
2013 0x39: sll({{ Rc = Ra << Rb_or_imm<5:0>; }});
2014 0x34: srl({{ Rc = Ra.uq >> Rb_or_imm<5:0>; }});
2015 0x3c: sra({{ Rc = Ra.sq >> Rb_or_imm<5:0>; }});
2016
2017 0x02: mskbl({{ Rc = Ra & ~(mask( 8) << (Rb_or_imm<2:0> * 8)); }});
2018 0x12: mskwl({{ Rc = Ra & ~(mask(16) << (Rb_or_imm<2:0> * 8)); }});
2019 0x22: mskll({{ Rc = Ra & ~(mask(32) << (Rb_or_imm<2:0> * 8)); }});
2020 0x32: mskql({{ Rc = Ra & ~(mask(64) << (Rb_or_imm<2:0> * 8)); }});
2021
2022 0x52: mskwh({{
2023 int bv = Rb_or_imm<2:0>;
2024 Rc = bv ? (Ra & ~(mask(16) >> (64 - 8 * bv))) : Ra;
2025 }});
2026 0x62: msklh({{
2027 int bv = Rb_or_imm<2:0>;
2028 Rc = bv ? (Ra & ~(mask(32) >> (64 - 8 * bv))) : Ra;
2029 }});
2030 0x72: mskqh({{
2031 int bv = Rb_or_imm<2:0>;
2032 Rc = bv ? (Ra & ~(mask(64) >> (64 - 8 * bv))) : Ra;
2033 }});
2034
2035 0x06: extbl({{ Rc = (Ra.uq >> (Rb_or_imm<2:0> * 8))< 7:0>; }});
2036 0x16: extwl({{ Rc = (Ra.uq >> (Rb_or_imm<2:0> * 8))<15:0>; }});
2037 0x26: extll({{ Rc = (Ra.uq >> (Rb_or_imm<2:0> * 8))<31:0>; }});
2038 0x36: extql({{ Rc = (Ra.uq >> (Rb_or_imm<2:0> * 8)); }});
2039
2040 0x5a: extwh({{
2041 Rc = (Ra << (64 - (Rb_or_imm<2:0> * 8))<5:0>)<15:0>; }});
2042 0x6a: extlh({{
2043 Rc = (Ra << (64 - (Rb_or_imm<2:0> * 8))<5:0>)<31:0>; }});
2044 0x7a: extqh({{
2045 Rc = (Ra << (64 - (Rb_or_imm<2:0> * 8))<5:0>); }});
2046
2047 0x0b: insbl({{ Rc = Ra< 7:0> << (Rb_or_imm<2:0> * 8); }});
2048 0x1b: inswl({{ Rc = Ra<15:0> << (Rb_or_imm<2:0> * 8); }});
2049 0x2b: insll({{ Rc = Ra<31:0> << (Rb_or_imm<2:0> * 8); }});
2050 0x3b: insql({{ Rc = Ra << (Rb_or_imm<2:0> * 8); }});
2051
2052 0x57: inswh({{
2053 int bv = Rb_or_imm<2:0>;
2054 Rc = bv ? (Ra.uq<15:0> >> (64 - 8 * bv)) : 0;
2055 }});
2056 0x67: inslh({{
2057 int bv = Rb_or_imm<2:0>;
2058 Rc = bv ? (Ra.uq<31:0> >> (64 - 8 * bv)) : 0;
2059 }});
2060 0x77: insqh({{
2061 int bv = Rb_or_imm<2:0>;
2062 Rc = bv ? (Ra.uq >> (64 - 8 * bv)) : 0;
2063 }});
2064
2065 0x30: zap({{
2066 uint64_t zapmask = 0;
2067 for (int i = 0; i < 8; ++i) {
2068 if (Rb_or_imm<i:>)
2069 zapmask |= (mask(8) << (i * 8));
2070 }
2071 Rc = Ra & ~zapmask;
2072 }});
2073 0x31: zapnot({{
2074 uint64_t zapmask = 0;
2075 for (int i = 0; i < 8; ++i) {
2076 if (!Rb_or_imm<i:>)
2077 zapmask |= (mask(8) << (i * 8));
2078 }
2079 Rc = Ra & ~zapmask;
2080 }});
2081 }
2082
2083 0x13: decode INTFUNC { // integer multiplies
2084 0x00: mull({{ Rc.sl = Ra.sl * Rb_or_imm.sl; }}, IntMultOp);
2085 0x20: mulq({{ Rc = Ra * Rb_or_imm; }}, IntMultOp);
2086 0x30: umulh({{
2087 uint64_t hi, lo;
2088 mul128(Ra, Rb_or_imm, hi, lo);
2089 Rc = hi;
2090 }}, IntMultOp);
2091 0x40: mullv({{
2092 // 32-bit multiply with trap on overflow
2093 int64_t Rax = Ra.sl; // sign extended version of Ra.sl
2094 int64_t Rbx = Rb_or_imm.sl;
2095 int64_t tmp = Rax * Rbx;
2096 // To avoid overflow, all the upper 32 bits must match
2097 // the sign bit of the lower 32. We code this as
2098 // checking the upper 33 bits for all 0s or all 1s.
2099 uint64_t sign_bits = tmp<63:31>;
2100 if (sign_bits != 0 && sign_bits != mask(33))
2101 fault = Integer_Overflow_Fault;
2102 Rc.sl = tmp<31:0>;
2103 }}, IntMultOp);
2104 0x60: mulqv({{
2105 // 64-bit multiply with trap on overflow
2106 uint64_t hi, lo;
2107 mul128(Ra, Rb_or_imm, hi, lo);
2108 // all the upper 64 bits must match the sign bit of
2109 // the lower 64
2110 if (!((hi == 0 && lo<63:> == 0) ||
2111 (hi == mask(64) && lo<63:> == 1)))
2112 fault = Integer_Overflow_Fault;
2113 Rc = lo;
2114 }}, IntMultOp);
2115 }
2116
2117 0x1c: decode INTFUNC {
2118 0x00: decode RA { 31: sextb({{ Rc.sb = Rb_or_imm< 7:0>; }}); }
2119 0x01: decode RA { 31: sextw({{ Rc.sw = Rb_or_imm<15:0>; }}); }
2120 0x32: ctlz({{
2121 uint64_t count = 0;
2122 uint64_t temp = Rb;
2123 if (temp<63:32>) temp >>= 32; else count += 32;
2124 if (temp<31:16>) temp >>= 16; else count += 16;
2125 if (temp<15:8>) temp >>= 8; else count += 8;
2126 if (temp<7:4>) temp >>= 4; else count += 4;
2127 if (temp<3:2>) temp >>= 2; else count += 2;
2128 if (temp<1:1>) temp >>= 1; else count += 1;
2129 if ((temp<0:0>) != 0x1) count += 1;
2130 Rc = count;
2131 }}, IntAluOp);
2132
2133 0x33: cttz({{
2134 uint64_t count = 0;
2135 uint64_t temp = Rb;
2136 if (!(temp<31:0>)) { temp >>= 32; count += 32; }
2137 if (!(temp<15:0>)) { temp >>= 16; count += 16; }
2138 if (!(temp<7:0>)) { temp >>= 8; count += 8; }
2139 if (!(temp<3:0>)) { temp >>= 4; count += 4; }
2140 if (!(temp<1:0>)) { temp >>= 2; count += 2; }
2141 if (!(temp<0:0> & ULL(0x1))) count += 1;
2142 Rc = count;
2143 }}, IntAluOp);
2144
2145 format FailUnimpl {
2146 0x30: ctpop();
2147 0x31: perr();
2148 0x34: unpkbw();
2149 0x35: unpkbl();
2150 0x36: pkwb();
2151 0x37: pklb();
2152 0x38: minsb8();
2153 0x39: minsw4();
2154 0x3a: minub8();
2155 0x3b: minuw4();
2156 0x3c: maxub8();
2157 0x3d: maxuw4();
2158 0x3e: maxsb8();
2159 0x3f: maxsw4();
2160 }
2161
2162 format BasicOperateWithNopCheck {
2163 0x70: decode RB {
2164 31: ftoit({{ Rc = Fa.uq; }}, FloatCvtOp);
2165 }
2166 0x78: decode RB {
2167 31: ftois({{ Rc.sl = t_to_s(Fa.uq); }},
2168 FloatCvtOp);
2169 }
2170 }
2171 }
2172 }
2173
2174 // Conditional branches.
2175 format CondBranch {
2176 0x39: beq({{ cond = (Ra == 0); }});
2177 0x3d: bne({{ cond = (Ra != 0); }});
2178 0x3e: bge({{ cond = (Ra.sq >= 0); }});
2179 0x3f: bgt({{ cond = (Ra.sq > 0); }});
2180 0x3b: ble({{ cond = (Ra.sq <= 0); }});
2181 0x3a: blt({{ cond = (Ra.sq < 0); }});
2182 0x38: blbc({{ cond = ((Ra & 1) == 0); }});
2183 0x3c: blbs({{ cond = ((Ra & 1) == 1); }});
2184
2185 0x31: fbeq({{ cond = (Fa == 0); }});
2186 0x35: fbne({{ cond = (Fa != 0); }});
2187 0x36: fbge({{ cond = (Fa >= 0); }});
2188 0x37: fbgt({{ cond = (Fa > 0); }});
2189 0x33: fble({{ cond = (Fa <= 0); }});
2190 0x32: fblt({{ cond = (Fa < 0); }});
2191 }
2192
2193 // unconditional branches
2194 format UncondBranch {
2195 0x30: br();
2196 0x34: bsr(IsCall);
2197 }
2198
2199 // indirect branches
2200 0x1a: decode JMPFUNC {
2201 format Jump {
2202 0: jmp();
2203 1: jsr(IsCall);
2204 2: ret(IsReturn);
2205 3: jsr_coroutine(IsCall, IsReturn);
2206 }
2207 }
2208
2209 // IEEE floating point
2210 0x14: decode FP_SHORTFUNC {
2211 // Integer to FP register moves must have RB == 31
2212 0x4: decode RB {
2213 31: decode FP_FULLFUNC {
2214 format BasicOperateWithNopCheck {
2215 0x004: itofs({{ Fc.uq = s_to_t(Ra.ul); }}, FloatCvtOp);
2216 0x024: itoft({{ Fc.uq = Ra.uq; }}, FloatCvtOp);
2217 0x014: FailUnimpl::itoff(); // VAX-format conversion
2218 }
2219 }
2220 }
2221
2222 // Square root instructions must have FA == 31
2223 0xb: decode FA {
2224 31: decode FP_TYPEFUNC {
2225 format FloatingPointOperate {
2226 #ifdef SS_COMPATIBLE_FP
2227 0x0b: sqrts({{
2228 if (Fb < 0.0)
2229 fault = Arithmetic_Fault;
2230 Fc = sqrt(Fb);
2231 }}, FloatSqrtOp);
2232 #else
2233 0x0b: sqrts({{
2234 if (Fb.sf < 0.0)
2235 fault = Arithmetic_Fault;
2236 Fc.sf = sqrt(Fb.sf);
2237 }}, FloatSqrtOp);
2238 #endif
2239 0x2b: sqrtt({{
2240 if (Fb < 0.0)
2241 fault = Arithmetic_Fault;
2242 Fc = sqrt(Fb);
2243 }}, FloatSqrtOp);
2244 }
2245 }
2246 }
2247
2248 // VAX-format sqrtf and sqrtg are not implemented
2249 0xa: FailUnimpl::sqrtfg();
2250 }
2251
2252 // IEEE floating point
2253 0x16: decode FP_SHORTFUNC_TOP2 {
2254 // The top two bits of the short function code break this space
2255 // into four groups: binary ops, compares, reserved, and conversions.
2256 // See Table 4-12 of AHB.
2257 // Most of these instructions may have various trapping and
2258 // rounding mode flags set; these are decoded in the
2259 // FloatingPointDecode template used by the
2260 // FloatingPointOperate format.
2261
2262 // add/sub/mul/div: just decode on the short function code
2263 // and source type.
2264 0: decode FP_TYPEFUNC {
2265 format FloatingPointOperate {
2266 #ifdef SS_COMPATIBLE_FP
2267 0x00: adds({{ Fc = Fa + Fb; }});
2268 0x01: subs({{ Fc = Fa - Fb; }});
2269 0x02: muls({{ Fc = Fa * Fb; }}, FloatMultOp);
2270 0x03: divs({{ Fc = Fa / Fb; }}, FloatDivOp);
2271 #else
2272 0x00: adds({{ Fc.sf = Fa.sf + Fb.sf; }});
2273 0x01: subs({{ Fc.sf = Fa.sf - Fb.sf; }});
2274 0x02: muls({{ Fc.sf = Fa.sf * Fb.sf; }}, FloatMultOp);
2275 0x03: divs({{ Fc.sf = Fa.sf / Fb.sf; }}, FloatDivOp);
2276 #endif
2277
2278 0x20: addt({{ Fc = Fa + Fb; }});
2279 0x21: subt({{ Fc = Fa - Fb; }});
2280 0x22: mult({{ Fc = Fa * Fb; }}, FloatMultOp);
2281 0x23: divt({{ Fc = Fa / Fb; }}, FloatDivOp);
2282 }
2283 }
2284
2285 // Floating-point compare instructions must have the default
2286 // rounding mode, and may use the default trapping mode or
2287 // /SU. Both trapping modes are treated the same by M5; the
2288 // only difference on the real hardware (as far a I can tell)
2289 // is that without /SU you'd get an imprecise trap if you
2290 // tried to compare a NaN with something else (instead of an
2291 // "unordered" result).
2292 1: decode FP_FULLFUNC {
2293 format BasicOperateWithNopCheck {
2294 0x0a5, 0x5a5: cmpteq({{ Fc = (Fa == Fb) ? 2.0 : 0.0; }},
2295 FloatCmpOp);
2296 0x0a7, 0x5a7: cmptle({{ Fc = (Fa <= Fb) ? 2.0 : 0.0; }},
2297 FloatCmpOp);
2298 0x0a6, 0x5a6: cmptlt({{ Fc = (Fa < Fb) ? 2.0 : 0.0; }},
2299 FloatCmpOp);
2300 0x0a4, 0x5a4: cmptun({{ // unordered
2301 Fc = (!(Fa < Fb) && !(Fa == Fb) && !(Fa > Fb)) ? 2.0 : 0.0;
2302 }}, FloatCmpOp);
2303 }
2304 }
2305
2306 // The FP-to-integer and integer-to-FP conversion insts
2307 // require that FA be 31.
2308 3: decode FA {
2309 31: decode FP_TYPEFUNC {
2310 format FloatingPointOperate {
2311 0x2f: cvttq({{ Fc.sq = (int64_t)rint(Fb); }});
2312
2313 // The cvtts opcode is overloaded to be cvtst if the trap
2314 // mode is 2 or 6 (which are not valid otherwise)
2315 0x2c: decode FP_FULLFUNC {
2316 format BasicOperateWithNopCheck {
2317 // trap on denorm version "cvtst/s" is
2318 // simulated same as cvtst
2319 0x2ac, 0x6ac: cvtst({{ Fc = Fb.sf; }});
2320 }
2321 default: cvtts({{ Fc.sf = Fb; }});
2322 }
2323
2324 // The trapping mode for integer-to-FP conversions
2325 // must be /SUI or nothing; /U and /SU are not
2326 // allowed. The full set of rounding modes are
2327 // supported though.
2328 0x3c: decode FP_TRAPMODE {
2329 0,7: cvtqs({{ Fc.sf = Fb.sq; }});
2330 }
2331 0x3e: decode FP_TRAPMODE {
2332 0,7: cvtqt({{ Fc = Fb.sq; }});
2333 }
2334 }
2335 }
2336 }
2337 }
2338
2339 // misc FP operate
2340 0x17: decode FP_FULLFUNC {
2341 format BasicOperateWithNopCheck {
2342 0x010: cvtlq({{
2343 Fc.sl = (Fb.uq<63:62> << 30) | Fb.uq<58:29>;
2344 }});
2345 0x030: cvtql({{
2346 Fc.uq = (Fb.uq<31:30> << 62) | (Fb.uq<29:0> << 29);
2347 }});
2348
2349 // We treat the precise & imprecise trapping versions of
2350 // cvtql identically.
2351 0x130, 0x530: cvtqlv({{
2352 // To avoid overflow, all the upper 32 bits must match
2353 // the sign bit of the lower 32. We code this as
2354 // checking the upper 33 bits for all 0s or all 1s.
2355 uint64_t sign_bits = Fb.uq<63:31>;
2356 if (sign_bits != 0 && sign_bits != mask(33))
2357 fault = Integer_Overflow_Fault;
2358 Fc.uq = (Fb.uq<31:30> << 62) | (Fb.uq<29:0> << 29);
2359 }});
2360
2361 0x020: cpys({{ // copy sign
2362 Fc.uq = (Fa.uq<63:> << 63) | Fb.uq<62:0>;
2363 }});
2364 0x021: cpysn({{ // copy sign negated
2365 Fc.uq = (~Fa.uq<63:> << 63) | Fb.uq<62:0>;
2366 }});
2367 0x022: cpyse({{ // copy sign and exponent
2368 Fc.uq = (Fa.uq<63:52> << 52) | Fb.uq<51:0>;
2369 }});
2370
2371 0x02a: fcmoveq({{ Fc = (Fa == 0) ? Fb : Fc; }});
2372 0x02b: fcmovne({{ Fc = (Fa != 0) ? Fb : Fc; }});
2373 0x02c: fcmovlt({{ Fc = (Fa < 0) ? Fb : Fc; }});
2374 0x02d: fcmovge({{ Fc = (Fa >= 0) ? Fb : Fc; }});
2375 0x02e: fcmovle({{ Fc = (Fa <= 0) ? Fb : Fc; }});
2376 0x02f: fcmovgt({{ Fc = (Fa > 0) ? Fb : Fc; }});
2377
2378 0x024: mt_fpcr({{ FPCR = Fa.uq; }});
2379 0x025: mf_fpcr({{ Fa.uq = FPCR; }});
2380 }
2381 }
2382
2383 // miscellaneous mem-format ops
2384 0x18: decode MEMFUNC {
2385 format WarnUnimpl {
2386 0x8000: fetch();
2387 0xa000: fetch_m();
2388 0xe800: ecb();
2389 }
2390
2391 format MiscPrefetch {
2392 0xf800: wh64({{ EA = Rb & ~ULL(63); }},
2393 {{ xc->writeHint(EA, 64, memAccessFlags); }},
2394 IsMemRef, IsDataPrefetch, IsStore, MemWriteOp,
2395 NO_FAULT);
2396 }
2397
2398 format BasicOperate {
2399 0xc000: rpcc({{
2400 #ifdef FULL_SYSTEM
2401 /* Rb is a fake dependency so here is a fun way to get
2402 * the parser to understand that.
2403 */
2404 Ra = xc->readIpr(AlphaISA::IPR_CC, fault) + (Rb & 0);
2405
2406 #else
2407 Ra = curTick;
2408 #endif
2409 }});
2410
2411 // All of the barrier instructions below do nothing in
2412 // their execute() methods (hence the empty code blocks).
2413 // All of their functionality is hard-coded in the
2414 // pipeline based on the flags IsSerializing,
2415 // IsMemBarrier, and IsWriteBarrier. In the current
2416 // detailed CPU model, the execute() function only gets
2417 // called at fetch, so there's no way to generate pipeline
2418 // behavior at any other stage. Once we go to an
2419 // exec-in-exec CPU model we should be able to get rid of
2420 // these flags and implement this behavior via the
2421 // execute() methods.
2422
2423 // trapb is just a barrier on integer traps, where excb is
2424 // a barrier on integer and FP traps. "EXCB is thus a
2425 // superset of TRAPB." (Alpha ARM, Sec 4.11.4) We treat
2426 // them the same though.
2427 0x0000: trapb({{ }}, IsSerializing, No_OpClass);
2428 0x0400: excb({{ }}, IsSerializing, No_OpClass);
2429 0x4000: mb({{ }}, IsMemBarrier, MemReadOp);
2430 0x4400: wmb({{ }}, IsWriteBarrier, MemWriteOp);
2431 }
2432
2433 #ifdef FULL_SYSTEM
2434 format BasicOperate {
2435 0xe000: rc({{
2436 Ra = xc->readIntrFlag();
2437 xc->setIntrFlag(0);
2438 }}, IsNonSpeculative);
2439 0xf000: rs({{
2440 Ra = xc->readIntrFlag();
2441 xc->setIntrFlag(1);
2442 }}, IsNonSpeculative);
2443 }
2444 #else
2445 format FailUnimpl {
2446 0xe000: rc();
2447 0xf000: rs();
2448 }
2449 #endif
2450 }
2451
2452 #ifdef FULL_SYSTEM
2453 0x00: CallPal::call_pal({{
2454 if (!palValid ||
2455 (palPriv
2456 && xc->readIpr(AlphaISA::IPR_ICM, fault) != AlphaISA::mode_kernel)) {
2457 // invalid pal function code, or attempt to do privileged
2458 // PAL call in non-kernel mode
2459 fault = Unimplemented_Opcode_Fault;
2460 }
2461 else {
2462 // check to see if simulator wants to do something special
2463 // on this PAL call (including maybe suppress it)
2464 bool dopal = xc->simPalCheck(palFunc);
2465
2466 if (dopal) {
2467 AlphaISA::swap_palshadow(&xc->xcBase()->regs, true);
2468 xc->setIpr(AlphaISA::IPR_EXC_ADDR, NPC);
2469 NPC = xc->readIpr(AlphaISA::IPR_PAL_BASE, fault) + palOffset;
2470 }
2471 }
2472 }}, IsNonSpeculative);
2473 #else
2474 0x00: decode PALFUNC {
2475 format EmulatedCallPal {
2476 0x00: halt ({{
2477 SimExit(curTick, "halt instruction encountered");
2478 }}, IsNonSpeculative);
2479 0x83: callsys({{
2480 xc->syscall();
2481 }}, IsNonSpeculative);
2482 // Read uniq reg into ABI return value register (r0)
2483 0x9e: rduniq({{ R0 = Runiq; }}, IsNonSpeculative);
2484 // Write uniq reg with value from ABI arg register (r16)
2485 0x9f: wruniq({{ Runiq = R16; }}, IsNonSpeculative);
2486 }
2487 }
2488 #endif
2489
2490 #ifdef FULL_SYSTEM
2491 format HwLoadStore {
2492 0x1b: decode HW_LDST_QUAD {
2493 0: hw_ld({{ EA = (Rb + disp) & ~3; }}, {{ Ra = Mem.ul; }}, L);
2494 1: hw_ld({{ EA = (Rb + disp) & ~7; }}, {{ Ra = Mem.uq; }}, Q);
2495 }
2496
2497 0x1f: decode HW_LDST_COND {
2498 0: decode HW_LDST_QUAD {
2499 0: hw_st({{ EA = (Rb + disp) & ~3; }},
2500 {{ Mem.ul = Ra<31:0>; }}, L);
2501 1: hw_st({{ EA = (Rb + disp) & ~7; }},
2502 {{ Mem.uq = Ra.uq; }}, Q);
2503 }
2504
2505 1: FailUnimpl::hw_st_cond();
2506 }
2507 }
2508
2509 format BasicOperate {
2510 0x1e: hw_rei({{ xc->hwrei(); }});
2511
2512 // M5 special opcodes use the reserved 0x01 opcode space
2513 0x01: decode M5FUNC {
2514 0x00: arm({{
2515 AlphaPseudo::arm(xc->xcBase());
2516 }}, IsNonSpeculative);
2517 0x01: quiesce({{
2518 AlphaPseudo::quiesce(xc->xcBase());
2519 }}, IsNonSpeculative);
2520 0x10: ivlb({{
2521 AlphaPseudo::ivlb(xc->xcBase());
2522 }}, No_OpClass, IsNonSpeculative);
2523 0x11: ivle({{
2524 AlphaPseudo::ivle(xc->xcBase());
2525 }}, No_OpClass, IsNonSpeculative);
2526 0x20: m5exit_old({{
2527 AlphaPseudo::m5exit_old(xc->xcBase());
2528 }}, No_OpClass, IsNonSpeculative);
2529 0x21: m5exit({{
2530 AlphaPseudo::m5exit(xc->xcBase());
2531 }}, No_OpClass, IsNonSpeculative);
2532 0x30: initparam({{ Ra = xc->xcBase()->cpu->system->init_param; }});
2533 0x40: resetstats({{
2534 AlphaPseudo::resetstats(xc->xcBase());
2535 }}, IsNonSpeculative);
2536 0x41: dumpstats({{
2537 AlphaPseudo::dumpstats(xc->xcBase());
2538 }}, IsNonSpeculative);
2539 0x42: dumpresetstats({{
2540 AlphaPseudo::dumpresetstats(xc->xcBase());
2541 }}, IsNonSpeculative);
2542 0x43: m5checkpoint({{
2543 AlphaPseudo::m5checkpoint(xc->xcBase());
2544 }}, IsNonSpeculative);
2545 0x50: m5readfile({{
2546 AlphaPseudo::readfile(xc->xcBase());
2547 }}, IsNonSpeculative);
2548 0x51: m5break({{
2549 AlphaPseudo::debugbreak(xc->xcBase());
2550 }}, IsNonSpeculative);
2551 0x52: m5switchcpu({{
2552 AlphaPseudo::switchcpu(xc->xcBase());
2553 }}, IsNonSpeculative);
2554
2555 }
2556 }
2557
2558 format HwMoveIPR {
2559 0x19: hw_mfpr({{
2560 // this instruction is only valid in PAL mode
2561 if (!xc->inPalMode()) {
2562 fault = Unimplemented_Opcode_Fault;
2563 }
2564 else {
2565 Ra = xc->readIpr(ipr_index, fault);
2566 }
2567 }});
2568 0x1d: hw_mtpr({{
2569 // this instruction is only valid in PAL mode
2570 if (!xc->inPalMode()) {
2571 fault = Unimplemented_Opcode_Fault;
2572 }
2573 else {
2574 xc->setIpr(ipr_index, Ra);
2575 if (traceData) { traceData->setData(Ra); }
2576 }
2577 }});
2578 }
2579 #endif
2580 }