arch-power: Add doubleword multiply-add instructions
[gem5.git] / src / arch / power / isa / decoder.isa
1 // -*- mode:c++ -*-
2
3 // Copyright (c) 2009 The University of Edinburgh
4 // All rights reserved.
5 //
6 // Redistribution and use in source and binary forms, with or without
7 // modification, are permitted provided that the following conditions are
8 // met: redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer;
10 // redistributions in binary form must reproduce the above copyright
11 // notice, this list of conditions and the following disclaimer in the
12 // documentation and/or other materials provided with the distribution;
13 // neither the name of the copyright holders nor the names of its
14 // contributors may be used to endorse or promote products derived from
15 // this software without specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 ////////////////////////////////////////////////////////////////////
30 //
31 // The actual Power ISA decoder
32 // ------------------------------
33 //
34 // I've used the Power ISA Book I v2.06 for instruction formats,
35 // opcode numbers, register names, etc.
36 //
37 decode PO default Unknown::unknown() {
38
39 // Unconditionally branch to a PC-relative or absoulute address.
40 format BranchOp {
41 18: b({{ NIA = CIA + disp; }},
42 {{ NIA = disp; }});
43 }
44
45 // Conditionally branch to a PC-relative or absoulute address based
46 // on CR and CTR.
47 format BranchDispCondOp {
48 16: bc({{ NIA = CIA + disp; }},
49 {{ NIA = disp; }});
50 }
51
52 19: decode XL_XO {
53
54 // Conditionally branch to an address in a register based on
55 // either CR only or both CR and CTR.
56 format BranchRegCondOp {
57 16: bclr({{ NIA = LR & -4ULL; }}, true, [ IsReturn ]);
58 528: bcctr({{ NIA = CTR & -4ULL; }});
59 560: bctar({{ NIA = TAR & -4ULL; }}, true);
60 }
61
62 // Condition register manipulation instructions.
63 format CondLogicOp {
64 257: crand({{
65 uint32_t crBa = bits(CR, 31 - ba);
66 uint32_t crBb = bits(CR, 31 - bb);
67 CR = insertBits(CR, 31 - bt, crBa & crBb);
68 }});
69
70 449: cror({{
71 uint32_t crBa = bits(CR, 31 - ba);
72 uint32_t crBb = bits(CR, 31 - bb);
73 CR = insertBits(CR, 31 - bt, crBa | crBb);
74 }});
75
76 255: crnand({{
77 uint32_t crBa = bits(CR, 31 - ba);
78 uint32_t crBb = bits(CR, 31 - bb);
79 CR = insertBits(CR, 31 - bt, !(crBa & crBb));
80 }});
81
82 193: crxor({{
83 uint32_t crBa = bits(CR, 31 - ba);
84 uint32_t crBb = bits(CR, 31 - bb);
85 CR = insertBits(CR, 31 - bt, crBa ^ crBb);
86 }});
87
88 33: crnor({{
89 uint32_t crBa = bits(CR, 31 - ba);
90 uint32_t crBb = bits(CR, 31 - bb);
91 CR = insertBits(CR, 31 - bt, !(crBa | crBb));
92 }});
93
94 289: creqv({{
95 uint32_t crBa = bits(CR, 31 - ba);
96 uint32_t crBb = bits(CR, 31 - bb);
97 CR = insertBits(CR, 31 - bt, crBa == crBb);
98 }});
99
100 129: crandc({{
101 uint32_t crBa = bits(CR, 31 - ba);
102 uint32_t crBb = bits(CR, 31 - bb);
103 CR = insertBits(CR, 31 - bt, crBa & !crBb);
104 }});
105
106 417: crorc({{
107 uint32_t crBa = bits(CR, 31 - ba);
108 uint32_t crBb = bits(CR, 31 - bb);
109 CR = insertBits(CR, 31 - bt, crBa | !crBb);
110 }});
111 }
112
113 format CondMoveOp {
114 0: mcrf({{
115 uint32_t crBfa = bits(CR, 31 - bfa*4, 28 - bfa*4);
116 CR = insertBits(CR, 31 - bf*4, 28 - bf*4, crBfa);
117 }});
118 }
119
120 format MiscOp {
121 150: isync({{ }}, [ IsSerializeAfter ]);
122 }
123
124 default: decode DX_XO {
125 format IntDispArithOp {
126 2: addpcis({{ Rt = NIA + (disp << 16); }});
127 }
128 }
129 }
130
131 17: IntOp::sc({{ return std::make_shared<SESyscallFault>(); }});
132
133 format LoadDispOp {
134 34: lbz({{ Rt = Mem_ub; }});
135 40: lhz({{ Rt = Mem_uh; }});
136 42: lha({{ Rt = Mem_sh; }});
137 32: lwz({{ Rt = Mem_uw; }});
138 }
139
140 58: decode DS_XO {
141 format LoadDispShiftOp {
142 2: lwa({{ Rt = Mem_sw; }});
143 0: ld({{ Rt = Mem; }});
144 }
145
146 format LoadDispShiftUpdateOp {
147 1: ldu({{ Rt = Mem; }});
148 }
149 }
150
151 62: decode DS_XO {
152 format StoreDispShiftOp {
153 0: std({{ Mem = Rs; }});
154 }
155
156 format StoreDispShiftUpdateOp {
157 1: stdu({{ Mem = Rs; }});
158 }
159 }
160
161 format LoadDispUpdateOp {
162 35: lbzu({{ Rt = Mem_ub; }});
163 41: lhzu({{ Rt = Mem_uh; }});
164 43: lhau({{ Rt = Mem_sh; }});
165 33: lwzu({{ Rt = Mem_uw; }});
166 }
167
168 format StoreDispOp {
169 38: stb({{ Mem_ub = Rs_ub; }});
170 44: sth({{ Mem_uh = Rs_uh; }});
171 36: stw({{ Mem_uw = Rs_uw; }});
172 }
173
174 format StoreDispUpdateOp {
175 39: stbu({{ Mem_ub = Rs_ub; }});
176 45: sthu({{ Mem_uh = Rs_uh; }});
177 37: stwu({{ Mem_uw = Rs_uw; }});
178 }
179
180 format IntImmArithCheckRaOp {
181 14: addi({{ Rt = Ra + simm; }},
182 {{ Rt = simm }});
183 15: addis({{ Rt = Ra + (simm << 16); }},
184 {{ Rt = simm << 16; }});
185 }
186
187 format IntImmArithOp {
188 12: addic({{
189 uint64_t src = Ra;
190 Rt = src + simm;
191 }},
192 true);
193
194 13: addic_({{
195 uint64_t src = Ra;
196 Rt = src + simm;
197 }},
198 true, true);
199
200 8: subfic({{
201 uint64_t src = ~Ra;
202 Rt = src + simm + 1;
203 }},
204 true);
205
206 7: mulli({{
207 int64_t res = Ra_sd * simm;
208 Rt = res;
209 }});
210 }
211
212 4: decode VA_XO {
213
214 // Arithmetic instructions that use source registers Ra, Rb and Rc,
215 // with destination register Rt.
216 format IntArithOp {
217 48: maddhd({{
218 int64_t res;
219 std::tie(std::ignore, res) = multiplyAdd(Ra_sd, Rb_sd, Rc_sd);
220 Rt = res;
221 }});
222
223 49: maddhdu({{
224 uint64_t res;
225 std::tie(std::ignore, res) = multiplyAdd(Ra, Rb, Rc);
226 Rt = res;
227 }});
228
229 51: maddld({{
230 uint64_t res;
231 std::tie(res, std::ignore) = multiplyAdd(Ra_sd, Rb_sd, Rc_sd);
232 Rt = res;
233 }});
234 }
235 }
236
237 format IntImmOp {
238 10: cmpli({{
239 Xer xer = XER;
240 uint32_t cr = makeCRField(Ra, (uint32_t)uimm, xer.so);
241 CR = insertCRField(CR, BF, cr);
242 }});
243 11: cmpi({{
244 Xer xer = XER;
245 uint32_t cr = makeCRField(Ra_sw, (int32_t)imm, xer.so);
246 CR = insertCRField(CR, BF, cr);
247 }});
248 }
249
250 format IntImmLogicOp {
251 24: ori({{ Ra = Rs | uimm; }});
252 25: oris({{ Ra = Rs | (uimm << 16); }});
253 26: xori({{ Ra = Rs ^ uimm; }});
254 27: xoris({{ Ra = Rs ^ (uimm << 16); }});
255 28: andi_({{ Ra = Rs & uimm; }},
256 true);
257 29: andis_({{ Ra = Rs & (uimm << 16); }},
258 true);
259 }
260
261 format IntRotateOp {
262 21: rlwinm({{ Ra = rotateValue(Rs, sh) & fullMask; }});
263 23: rlwnm({{ Ra = rotateValue(Rs, Rb) & fullMask; }});
264 20: rlwimi({{ Ra = (rotateValue(Rs, sh) & fullMask) |
265 (Ra & ~fullMask); }});
266 }
267
268 // There are a large number of instructions that have the same primary
269 // opcode (PO) of 31. In this case, the instructions are of different
270 // forms. For every form, the XO fields may vary in position and width.
271 // The X, XFL, XFX and XL form instructions use bits 21 - 30 and the
272 // XO form instructions use bits 22 - 30 as extended opcode (XO). To
273 // avoid conflicts, instructions of each form have to be defined under
274 // separate decode blocks. However, only a single decode block can be
275 // associated with a particular PO and it will recognize only one type
276 // of XO field. A solution for associating decode blocks for the other
277 // types of XO fields with the same PO is to have the other blocks as
278 // nested default cases.
279 31: decode X_XO {
280
281 // All loads with an index register. The non-update versions
282 // all use the value 0 if Ra == R0, not the value contained in
283 // R0. Others update Ra with the effective address. In all cases,
284 // Ra and Rb are source registers, Rt is the destintation.
285 format LoadIndexOp {
286 87: lbzx({{ Rt = Mem_ub; }});
287 52: lbarx({{ Rt = Mem_ub; Rsv = 1; RsvLen = 1; RsvAddr = EA; }});
288 279: lhzx({{ Rt = Mem_uh; }});
289 343: lhax({{ Rt = Mem_sh; }});
290 116: lharx({{ Rt = Mem_uh; Rsv = 1; RsvLen = 2; RsvAddr = EA; }});
291 790: lhbrx({{ Rt = swap_byte(Mem_uh); }});
292 23: lwzx({{ Rt = Mem_uw; }});
293 341: lwax({{ Rt = Mem_sw; }});
294 20: lwarx({{ Rt = Mem_uw; Rsv = 1; RsvLen = 4; RsvAddr = EA; }});
295 534: lwbrx({{ Rt = swap_byte(Mem_uw); }});
296 21: ldx({{ Rt = Mem; }});
297 84: ldarx({{ Rt = Mem_ud; Rsv = 1; RsvLen = 8; RsvAddr = EA; }});
298 532: ldbrx({{ Rt = swap_byte(Mem); }});
299 535: lfsx({{ Ft_sf = Mem_sf; }});
300 599: lfdx({{ Ft = Mem_df; }});
301 855: lfiwax({{ Ft_uw = Mem; }});
302 }
303
304 format LoadIndexUpdateOp {
305 119: lbzux({{ Rt = Mem_ub; }});
306 311: lhzux({{ Rt = Mem_uh; }});
307 375: lhaux({{ Rt = Mem_sh; }});
308 55: lwzux({{ Rt = Mem_uw; }});
309 373: lwaux({{ Rt = Mem_sw; }});
310 53: ldux({{ Rt = Mem; }});
311 567: lfsux({{ Ft_sf = Mem_sf; }});
312 631: lfdux({{ Ft = Mem_df; }});
313 }
314
315 format StoreIndexOp {
316 215: stbx({{ Mem_ub = Rs_ub; }});
317 694: stbcx({{
318 bool store_performed = false;
319 Mem_ub = Rs_ub;
320 if (Rsv) {
321 if (RsvLen == 1) {
322 if (RsvAddr == EA) {
323 store_performed = true;
324 }
325 }
326 }
327 Xer xer = XER;
328 Cr cr = CR;
329 cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
330 CR = cr;
331 Rsv = 0;
332 }});
333 407: sthx({{ Mem_uh = Rs_uh; }});
334 726: sthcx({{
335 bool store_performed = false;
336 Mem_uh = Rs_uh;
337 if (Rsv) {
338 if (RsvLen == 2) {
339 if (RsvAddr == EA) {
340 store_performed = true;
341 }
342 }
343 }
344 Xer xer = XER;
345 Cr cr = CR;
346 cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
347 CR = cr;
348 Rsv = 0;
349 }});
350 918: sthbrx({{ Mem_uh = swap_byte(Rs_uh); }});
351 151: stwx({{ Mem_uw = Rs_uw; }});
352 150: stwcx({{
353 bool store_performed = false;
354 Mem_uw = Rs_uw;
355 if (Rsv) {
356 if (RsvLen == 4) {
357 if (RsvAddr == EA) {
358 store_performed = true;
359 }
360 }
361 }
362 Xer xer = XER;
363 Cr cr = CR;
364 cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
365 CR = cr;
366 Rsv = 0;
367 }});
368 662: stwbrx({{ Mem_uw = swap_byte(Rs_uw); }});
369 149: stdx({{ Mem = Rs }});
370 214: stdcx({{
371 bool store_performed = false;
372 Mem = Rs;
373 if (Rsv) {
374 if (RsvLen == 8) {
375 if (RsvAddr == EA) {
376 store_performed = true;
377 }
378 }
379 }
380 Xer xer = XER;
381 Cr cr = CR;
382 cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
383 CR = cr;
384 Rsv = 0;
385 }});
386 660: stdbrx({{ Mem = swap_byte(Rs); }});
387 }
388
389 format StoreIndexUpdateOp {
390 247: stbux({{ Mem_ub = Rs_ub; }});
391 439: sthux({{ Mem_uh = Rs_uh; }});
392 183: stwux({{ Mem_uw = Rs_uw; }});
393 181: stdux({{ Mem = Rs; }});
394 }
395
396 format IntOp {
397 0: cmp({{
398 Xer xer = XER;
399 uint32_t cr = makeCRField(Ra_sw, Rb_sw, xer.so);
400 CR = insertCRField(CR, BF, cr);
401 }});
402
403 32: cmpl({{
404 Xer xer = XER;
405 uint32_t cr = makeCRField(Ra, Rb, xer.so);
406 CR = insertCRField(CR, BF, cr);
407 }});
408 }
409
410 // Integer logic instructions use source registers Rs and Rb,
411 // with destination register Ra.
412 format IntLogicOp {
413 28: and({{ Ra = Rs & Rb; }});
414 316: xor({{ Ra = Rs ^ Rb; }});
415 476: nand({{ Ra = ~(Rs & Rb); }});
416 444: or({{ Ra = Rs | Rb; }});
417 124: nor({{ Ra = ~(Rs | Rb); }});
418 60: andc({{ Ra = Rs & ~Rb; }});
419 954: extsb({{ Ra = sext<8>(Rs); }});
420 284: eqv({{ Ra = ~(Rs ^ Rb); }});
421 412: orc({{ Ra = Rs | ~Rb; }});
422 922: extsh({{ Ra = sext<16>(Rs); }});
423 26: cntlzw({{ Ra = Rs == 0 ? 32 : 31 - findMsbSet(Rs); }});
424 508: cmpb({{
425 uint32_t val = 0;
426 for (int n = 0; n < 32; n += 8) {
427 if(bits(Rs, n+7, n) == bits(Rb, n+7, n)) {
428 val = insertBits(val, n+7, n, 0xff);
429 }
430 }
431 Ra = val;
432 }});
433
434 24: slw({{
435 if (Rb & 0x20) {
436 Ra = 0;
437 } else {
438 Ra = Rs << (Rb & 0x1f);
439 }
440 }});
441
442 536: srw({{
443 if (Rb & 0x20) {
444 Ra = 0;
445 } else {
446 Ra = Rs >> (Rb & 0x1f);
447 }
448 }});
449
450 792: sraw({{
451 bool shiftSetCA = false;
452 int32_t s = Rs;
453 if (Rb == 0) {
454 Ra = Rs;
455 shiftSetCA = true;
456 } else if (Rb & 0x20) {
457 if (s < 0) {
458 Ra = (uint32_t)-1;
459 if (s & 0x7fffffff) {
460 shiftSetCA = true;
461 } else {
462 shiftSetCA = false;
463 }
464 } else {
465 Ra = 0;
466 shiftSetCA = false;
467 }
468 } else {
469 Ra = s >> (Rb & 0x1f);
470 if (s < 0 && (s << (32 - (Rb & 0x1f))) != 0) {
471 shiftSetCA = true;
472 } else {
473 shiftSetCA = false;
474 }
475 }
476 Xer xer1 = XER;
477 if (shiftSetCA) {
478 xer1.ca = 1;
479 } else {
480 xer1.ca = 0;
481 }
482 XER = xer1;
483 }});
484 }
485
486 // Integer logic instructions with a shift value.
487 format IntShiftOp {
488 824: srawi({{
489 bool shiftSetCA = false;
490 if (sh == 0) {
491 Ra = Rs;
492 shiftSetCA = false;
493 } else {
494 int32_t s = Rs;
495 Ra = s >> sh;
496 if (s < 0 && (s << (32 - sh)) != 0) {
497 shiftSetCA = true;
498 } else {
499 shiftSetCA = false;
500 }
501 }
502 Xer xer1 = XER;
503 if (shiftSetCA) {
504 xer1.ca = 1;
505 } else {
506 xer1.ca = 0;
507 }
508 XER = xer1;
509 }});
510 }
511
512 format StoreIndexOp {
513 663: stfsx({{ Mem_sf = Fs_sf; }});
514 727: stfdx({{ Mem_df = Fs; }});
515 983: stfiwx({{ Mem = Fs_uw; }});
516 }
517
518 format StoreIndexUpdateOp {
519 695: stfsux({{ Mem_sf = Fs_sf; }});
520 759: stfdux({{ Mem_df = Fs; }});
521 }
522
523 // These instructions all provide data cache hints
524 format MiscOp {
525 278: dcbt({{ }});
526 246: dcbtst({{ }});
527 598: sync({{ }}, [ IsReadBarrier, IsWriteBarrier ]);
528 854: eieio({{ }}, [ IsReadBarrier, IsWriteBarrier ]);
529 }
530
531 // These instructions are of XO form with bit 21 as the OE bit.
532 default: decode XO_XO {
533
534 // These instructions can all be reduced to the form
535 // Rt = src1 + src2 [+ CA], therefore we just give src1 and src2
536 // (and, if necessary, CA) definitions and let the python script
537 // deal with setting things up correctly. We also give flags to
538 // say which control registers to set.
539 format IntSumOp {
540 266: add({{ Ra }}, {{ Rb }});
541 40: subf({{ ~Ra }}, {{ Rb }}, {{ 1 }});
542 10: addc({{ Ra }}, {{ Rb }},
543 computeCA = true);
544 8: subfc({{ ~Ra }}, {{ Rb }}, {{ 1 }},
545 true);
546 104: neg({{ ~Ra }}, {{ 1 }});
547 138: adde({{ Ra }}, {{ Rb }}, {{ xer.ca }},
548 true);
549 234: addme({{ Ra }}, {{ -1ULL }}, {{ xer.ca }},
550 true);
551 136: subfe({{ ~Ra }}, {{ Rb }}, {{ xer.ca }},
552 true);
553 232: subfme({{ ~Ra }}, {{ -1ULL }}, {{ xer.ca }},
554 true);
555 202: addze({{ Ra }}, {{ xer.ca }},
556 computeCA = true);
557 200: subfze({{ ~Ra }}, {{ xer.ca }},
558 computeCA = true);
559 }
560
561 // Arithmetic instructions all use source registers Ra and Rb,
562 // with destination register Rt.
563 format IntArithCheckRcOp {
564 75: mulhw({{
565 uint64_t res = (int64_t)Ra_sw * Rb_sw;
566 res = res >> 32;
567 Rt = res;
568 }});
569
570 11: mulhwu({{
571 uint64_t res = (uint64_t)Ra_uw * Rb_uw;
572 res = res >> 32;
573 Rt = res;
574 }});
575
576 235: mullw({{
577 int64_t res = (int64_t)Ra_sw * Rb_sw;
578 if (res != (int32_t)res) {
579 setOV = true;
580 }
581 Rt = res;
582 }},
583 true);
584
585 73: mulhd({{
586 int64_t res;
587 std::tie(std::ignore, res) = multiply(Ra_sd, Rb_sd);
588 Rt = res;
589 }});
590
591 9: mulhdu({{
592 uint64_t res;
593 std::tie(std::ignore, res) = multiply(Ra, Rb);
594 Rt = res;
595 }});
596
597 233: mulld({{
598 int64_t src1 = Ra_sd;
599 int64_t src2 = Rb_sd;
600 uint64_t res = src1 * src2;
601 std::tie(res, std::ignore) = multiply(src1, src2);
602 if (src1 != 0 && (int64_t)res / src1 != src2) {
603 setOV = true;
604 }
605 Rt = res;
606 }},
607 true);
608
609 491: divw({{
610 int32_t src1 = Ra_sw;
611 int32_t src2 = Rb_sw;
612 if ((src1 != INT32_MIN || src2 != -1) && src2 != 0) {
613 Rt = (uint32_t)(src1 / src2);
614 } else {
615 Rt = 0;
616 setOV = true;
617 }
618 }},
619 true);
620
621 459: divwu({{
622 uint32_t src1 = Ra_uw;
623 uint32_t src2 = Rb_uw;
624 if (src2 != 0) {
625 Rt = src1 / src2;
626 } else {
627 Rt = 0;
628 setOV = true;
629 }
630 }},
631 true);
632 }
633
634 default: decode XFX_XO {
635 format IntOp {
636 339: decode SPR {
637 0x20: mfxer({{ Rt = XER; }});
638 0x100: mflr({{ Rt = LR; }});
639 0x120: mfctr({{ Rt = CTR; }});
640 0x1f9: mftar({{ Rt = TAR; }});
641 }
642
643 467: decode SPR {
644 0x20: mtxer({{ XER = Rs; }});
645 0x100: mtlr({{ LR = Rs; }});
646 0x120: mtctr({{ CTR = Rs; }});
647 0x1f9: mttar({{ TAR = Rs; }});
648 }
649
650 144: mtcrf({{
651 uint32_t mask = 0;
652 for (int i = 0; i < 8; ++i) {
653 if (((FXM >> i) & 0x1) == 0x1) {
654 mask |= 0xf << (4 * i);
655 }
656 }
657 CR = (Rs & mask) | (CR & ~mask);
658 }});
659
660 19: mfcr({{ Rt = CR; }});
661
662 512: mcrxr({{
663 CR = insertCRField(CR, BF, XER<31:28>);
664 XER = XER<27:0>;
665 }});
666 }
667 }
668 }
669 }
670
671 format LoadDispOp {
672 48: lfs({{ Ft_sf = Mem_sf; }});
673 50: lfd({{ Ft = Mem_df; }});
674 }
675
676 format LoadDispUpdateOp {
677 49: lfsu({{ Ft_sf = Mem_sf; }});
678 51: lfdu({{ Ft = Mem_df; }});
679 }
680
681 format StoreDispOp {
682 52: stfs({{ Mem_sf = Fs_sf; }});
683 54: stfd({{ Mem_df = Fs; }});
684 }
685
686 format StoreDispUpdateOp {
687 53: stfsu({{ Mem_sf = Fs_sf; }});
688 55: stfdu({{ Mem_df = Fs; }});
689 }
690
691 format FloatArithOp {
692 59: decode A_XO {
693 21: fadds({{ Ft = Fa + Fb; }});
694 20: fsubs({{ Ft = Fa - Fb; }});
695 25: fmuls({{ Ft = Fa * Fc; }});
696 18: fdivs({{ Ft = Fa / Fb; }});
697 29: fmadds({{ Ft = (Fa * Fc) + Fb; }});
698 28: fmsubs({{ Ft = (Fa * Fc) - Fb; }});
699 31: fnmadds({{ Ft = -((Fa * Fc) + Fb); }});
700 30: fnmsubs({{ Ft = -((Fa * Fc) - Fb); }});
701 }
702 }
703
704 63: decode A_XO {
705 format FloatArithOp {
706 21: fadd({{ Ft = Fa + Fb; }});
707 20: fsub({{ Ft = Fa - Fb; }});
708 25: fmul({{ Ft = Fa * Fc; }});
709 18: fdiv({{ Ft = Fa / Fb; }});
710 29: fmadd({{ Ft = (Fa * Fc) + Fb; }});
711 28: fmsub({{ Ft = (Fa * Fc) - Fb; }});
712 31: fnmadd({{ Ft = -((Fa * Fc) + Fb); }});
713 30: fnmsub({{ Ft = -((Fa * Fc) - Fb); }});
714 }
715
716 default: decode X_XO {
717 format FloatRCCheckOp {
718 72: fmr({{ Ft = Fb; }});
719 264: fabs({{
720 Ft_ud = Fb_ud;
721 Ft_ud = insertBits(Ft_ud, 63, 0); }});
722 136: fnabs({{
723 Ft_ud = Fb_ud;
724 Ft_ud = insertBits(Ft_ud, 63, 1); }});
725 40: fneg({{ Ft = -Fb; }});
726 8: fcpsgn({{
727 Ft_ud = Fb_ud;
728 Ft_ud = insertBits(Ft_ud, 63, Fa_ud<63:63>);
729 }});
730 }
731
732 format FloatConvertOp {
733 12: frsp({{ Ft_sf = Fb; }});
734 15: fctiwz({{ Ft_sw = (int32_t)trunc(Fb); }});
735 }
736
737 format FloatOp {
738 0: fcmpu({{
739 uint32_t c = makeCRField(Fa, Fb);
740 Fpscr fpscr = FPSCR;
741 fpscr.fprf.fpcc = c;
742 FPSCR = fpscr;
743 CR = insertCRField(CR, BF, c);
744 }});
745 }
746
747 format FloatRCCheckOp {
748 583: mffs({{ Ft_ud = FPSCR; }});
749 134: mtfsfi({{
750 FPSCR = insertCRField(FPSCR, BF + (8 * (1 - W_FIELD)),
751 U_FIELD);
752 }});
753 70: mtfsb0({{ FPSCR = insertBits(FPSCR, 31 - BT, 0); }});
754 38: mtfsb1({{ FPSCR = insertBits(FPSCR, 31 - BT, 1); }});
755
756 default: decode XFL_XO {
757 711: mtfsf({{
758 if (L_FIELD == 1) { FPSCR = Fb_ud; }
759 else {
760 for (int i = 0; i < 8; ++i) {
761 if (bits(FLM, i) == 1) {
762 int k = 4 * (i + (8 * (1 - W_FIELD)));
763 FPSCR = insertBits(FPSCR, k + 3, k,
764 bits(Fb_ud, k + 3, k));
765 }
766 }
767 }
768 }});
769 }
770 }
771 }
772 }
773 }