}
format LoadOrNop {
- 0x0a: ldbu({{ Ra.uq = Mem.ub; }});
- 0x0c: ldwu({{ Ra.uq = Mem.uw; }});
- 0x0b: ldq_u({{ Ra = Mem.uq; }}, ea_code = {{ EA = (Rb + disp) & ~7; }});
- 0x23: ldt({{ Fa = Mem.df; }});
- 0x2a: ldl_l({{ Ra.sl = Mem.sl; }}, mem_flags = LLSC);
- 0x2b: ldq_l({{ Ra.uq = Mem.uq; }}, mem_flags = LLSC);
+ 0x0a: ldbu({{ Ra_uq = Mem_ub; }});
+ 0x0c: ldwu({{ Ra_uq = Mem_uw; }});
+ 0x0b: ldq_u({{ Ra = Mem_uq; }}, ea_code = {{ EA = (Rb + disp) & ~7; }});
+ 0x23: ldt({{ Fa = Mem_df; }});
+ 0x2a: ldl_l({{ Ra_sl = Mem_sl; }}, mem_flags = LLSC);
+ 0x2b: ldq_l({{ Ra_uq = Mem_uq; }}, mem_flags = LLSC);
}
format LoadOrPrefetch {
- 0x28: ldl({{ Ra.sl = Mem.sl; }});
- 0x29: ldq({{ Ra.uq = Mem.uq; }}, pf_flags = EVICT_NEXT);
+ 0x28: ldl({{ Ra_sl = Mem_sl; }});
+ 0x29: ldq({{ Ra_uq = Mem_uq; }}, pf_flags = EVICT_NEXT);
// IsFloating flag on lds gets the prefetch to disassemble
// using f31 instead of r31... funcitonally it's unnecessary
- 0x22: lds({{ Fa.uq = s_to_t(Mem.ul); }},
+ 0x22: lds({{ Fa_uq = s_to_t(Mem_ul); }},
pf_flags = PF_EXCLUSIVE, inst_flags = IsFloating);
}
format Store {
- 0x0e: stb({{ Mem.ub = Ra<7:0>; }});
- 0x0d: stw({{ Mem.uw = Ra<15:0>; }});
- 0x2c: stl({{ Mem.ul = Ra<31:0>; }});
- 0x2d: stq({{ Mem.uq = Ra.uq; }});
- 0x0f: stq_u({{ Mem.uq = Ra.uq; }}, {{ EA = (Rb + disp) & ~7; }});
- 0x26: sts({{ Mem.ul = t_to_s(Fa.uq); }});
- 0x27: stt({{ Mem.df = Fa; }});
+ 0x0e: stb({{ Mem_ub = Ra<7:0>; }});
+ 0x0d: stw({{ Mem_uw = Ra<15:0>; }});
+ 0x2c: stl({{ Mem_ul = Ra<31:0>; }});
+ 0x2d: stq({{ Mem_uq = Ra_uq; }});
+ 0x0f: stq_u({{ Mem_uq = Ra_uq; }}, {{ EA = (Rb + disp) & ~7; }});
+ 0x26: sts({{ Mem_ul = t_to_s(Fa_uq); }});
+ 0x27: stt({{ Mem_df = Fa; }});
}
format StoreCond {
- 0x2e: stl_c({{ Mem.ul = Ra<31:0>; }},
+ 0x2e: stl_c({{ Mem_ul = Ra<31:0>; }},
{{
uint64_t tmp = write_result;
// see stq_c
xc->setStCondFailures(0);
}
}}, mem_flags = LLSC, inst_flags = IsStoreConditional);
- 0x2f: stq_c({{ Mem.uq = Ra; }},
+ 0x2f: stq_c({{ Mem_uq = Ra; }},
{{
uint64_t tmp = write_result;
// If the write operation returns 0 or 1, then
0x10: decode INTFUNC { // integer arithmetic operations
- 0x00: addl({{ Rc.sl = Ra.sl + Rb_or_imm.sl; }});
+ 0x00: addl({{ Rc_sl = Ra_sl + Rb_or_imm_sl; }});
0x40: addlv({{
- int32_t tmp = Ra.sl + Rb_or_imm.sl;
+ int32_t tmp = Ra_sl + Rb_or_imm_sl;
// signed overflow occurs when operands have same sign
// and sign of result does not match.
- if (Ra.sl<31:> == Rb_or_imm.sl<31:> && tmp<31:> != Ra.sl<31:>)
+ if (Ra_sl<31:> == Rb_or_imm_sl<31:> && tmp<31:> != Ra_sl<31:>)
fault = new IntegerOverflowFault;
- Rc.sl = tmp;
+ Rc_sl = tmp;
}});
- 0x02: s4addl({{ Rc.sl = (Ra.sl << 2) + Rb_or_imm.sl; }});
- 0x12: s8addl({{ Rc.sl = (Ra.sl << 3) + Rb_or_imm.sl; }});
+ 0x02: s4addl({{ Rc_sl = (Ra_sl << 2) + Rb_or_imm_sl; }});
+ 0x12: s8addl({{ Rc_sl = (Ra_sl << 3) + Rb_or_imm_sl; }});
0x20: addq({{ Rc = Ra + Rb_or_imm; }});
0x60: addqv({{
0x22: s4addq({{ Rc = (Ra << 2) + Rb_or_imm; }});
0x32: s8addq({{ Rc = (Ra << 3) + Rb_or_imm; }});
- 0x09: subl({{ Rc.sl = Ra.sl - Rb_or_imm.sl; }});
+ 0x09: subl({{ Rc_sl = Ra_sl - Rb_or_imm_sl; }});
0x49: sublv({{
- int32_t tmp = Ra.sl - Rb_or_imm.sl;
+ int32_t tmp = Ra_sl - Rb_or_imm_sl;
// signed overflow detection is same as for add,
// except we need to look at the *complemented*
// sign bit of the subtrahend (Rb), i.e., if the initial
// signs are the *same* then no overflow can occur
- if (Ra.sl<31:> != Rb_or_imm.sl<31:> && tmp<31:> != Ra.sl<31:>)
+ if (Ra_sl<31:> != Rb_or_imm_sl<31:> && tmp<31:> != Ra_sl<31:>)
fault = new IntegerOverflowFault;
- Rc.sl = tmp;
+ Rc_sl = tmp;
}});
- 0x0b: s4subl({{ Rc.sl = (Ra.sl << 2) - Rb_or_imm.sl; }});
- 0x1b: s8subl({{ Rc.sl = (Ra.sl << 3) - Rb_or_imm.sl; }});
+ 0x0b: s4subl({{ Rc_sl = (Ra_sl << 2) - Rb_or_imm_sl; }});
+ 0x1b: s8subl({{ Rc_sl = (Ra_sl << 3) - Rb_or_imm_sl; }});
0x29: subq({{ Rc = Ra - Rb_or_imm; }});
0x69: subqv({{
0x3b: s8subq({{ Rc = (Ra << 3) - Rb_or_imm; }});
0x2d: cmpeq({{ Rc = (Ra == Rb_or_imm); }});
- 0x6d: cmple({{ Rc = (Ra.sq <= Rb_or_imm.sq); }});
- 0x4d: cmplt({{ Rc = (Ra.sq < Rb_or_imm.sq); }});
- 0x3d: cmpule({{ Rc = (Ra.uq <= Rb_or_imm.uq); }});
- 0x1d: cmpult({{ Rc = (Ra.uq < Rb_or_imm.uq); }});
+ 0x6d: cmple({{ Rc = (Ra_sq <= Rb_or_imm_sq); }});
+ 0x4d: cmplt({{ Rc = (Ra_sq < Rb_or_imm_sq); }});
+ 0x3d: cmpule({{ Rc = (Ra_uq <= Rb_or_imm_uq); }});
+ 0x1d: cmpult({{ Rc = (Ra_uq < Rb_or_imm_uq); }});
0x0f: cmpbge({{
int hi = 7;
int lo = 0;
uint64_t tmp = 0;
for (int i = 0; i < 8; ++i) {
- tmp |= (Ra.uq<hi:lo> >= Rb_or_imm.uq<hi:lo>) << i;
+ tmp |= (Ra_uq<hi:lo> >= Rb_or_imm_uq<hi:lo>) << i;
hi += 8;
lo += 8;
}
0x16: cmovlbc({{ Rc = ((Ra & 1) == 0) ? Rb_or_imm : Rc; }});
0x24: cmoveq({{ Rc = (Ra == 0) ? Rb_or_imm : Rc; }});
0x26: cmovne({{ Rc = (Ra != 0) ? Rb_or_imm : Rc; }});
- 0x44: cmovlt({{ Rc = (Ra.sq < 0) ? Rb_or_imm : Rc; }});
- 0x46: cmovge({{ Rc = (Ra.sq >= 0) ? Rb_or_imm : Rc; }});
- 0x64: cmovle({{ Rc = (Ra.sq <= 0) ? Rb_or_imm : Rc; }});
- 0x66: cmovgt({{ Rc = (Ra.sq > 0) ? Rb_or_imm : Rc; }});
+ 0x44: cmovlt({{ Rc = (Ra_sq < 0) ? Rb_or_imm : Rc; }});
+ 0x46: cmovge({{ Rc = (Ra_sq >= 0) ? Rb_or_imm : Rc; }});
+ 0x64: cmovle({{ Rc = (Ra_sq <= 0) ? Rb_or_imm : Rc; }});
+ 0x66: cmovgt({{ Rc = (Ra_sq > 0) ? Rb_or_imm : Rc; }});
// For AMASK, RA must be R31.
0x61: decode RA {
0x12: decode INTFUNC {
0x39: sll({{ Rc = Ra << Rb_or_imm<5:0>; }});
- 0x34: srl({{ Rc = Ra.uq >> Rb_or_imm<5:0>; }});
- 0x3c: sra({{ Rc = Ra.sq >> Rb_or_imm<5:0>; }});
+ 0x34: srl({{ Rc = Ra_uq >> Rb_or_imm<5:0>; }});
+ 0x3c: sra({{ Rc = Ra_sq >> Rb_or_imm<5:0>; }});
0x02: mskbl({{ Rc = Ra & ~(mask( 8) << (Rb_or_imm<2:0> * 8)); }});
0x12: mskwl({{ Rc = Ra & ~(mask(16) << (Rb_or_imm<2:0> * 8)); }});
Rc = bv ? (Ra & ~(mask(64) >> (64 - 8 * bv))) : Ra;
}});
- 0x06: extbl({{ Rc = (Ra.uq >> (Rb_or_imm<2:0> * 8))< 7:0>; }});
- 0x16: extwl({{ Rc = (Ra.uq >> (Rb_or_imm<2:0> * 8))<15:0>; }});
- 0x26: extll({{ Rc = (Ra.uq >> (Rb_or_imm<2:0> * 8))<31:0>; }});
- 0x36: extql({{ Rc = (Ra.uq >> (Rb_or_imm<2:0> * 8)); }});
+ 0x06: extbl({{ Rc = (Ra_uq >> (Rb_or_imm<2:0> * 8))< 7:0>; }});
+ 0x16: extwl({{ Rc = (Ra_uq >> (Rb_or_imm<2:0> * 8))<15:0>; }});
+ 0x26: extll({{ Rc = (Ra_uq >> (Rb_or_imm<2:0> * 8))<31:0>; }});
+ 0x36: extql({{ Rc = (Ra_uq >> (Rb_or_imm<2:0> * 8)); }});
0x5a: extwh({{
Rc = (Ra << (64 - (Rb_or_imm<2:0> * 8))<5:0>)<15:0>; }});
0x57: inswh({{
int bv = Rb_or_imm<2:0>;
- Rc = bv ? (Ra.uq<15:0> >> (64 - 8 * bv)) : 0;
+ Rc = bv ? (Ra_uq<15:0> >> (64 - 8 * bv)) : 0;
}});
0x67: inslh({{
int bv = Rb_or_imm<2:0>;
- Rc = bv ? (Ra.uq<31:0> >> (64 - 8 * bv)) : 0;
+ Rc = bv ? (Ra_uq<31:0> >> (64 - 8 * bv)) : 0;
}});
0x77: insqh({{
int bv = Rb_or_imm<2:0>;
- Rc = bv ? (Ra.uq >> (64 - 8 * bv)) : 0;
+ Rc = bv ? (Ra_uq >> (64 - 8 * bv)) : 0;
}});
0x30: zap({{
}
0x13: decode INTFUNC { // integer multiplies
- 0x00: mull({{ Rc.sl = Ra.sl * Rb_or_imm.sl; }}, IntMultOp);
+ 0x00: mull({{ Rc_sl = Ra_sl * Rb_or_imm_sl; }}, IntMultOp);
0x20: mulq({{ Rc = Ra * Rb_or_imm; }}, IntMultOp);
0x30: umulh({{
uint64_t hi, lo;
}}, IntMultOp);
0x40: mullv({{
// 32-bit multiply with trap on overflow
- int64_t Rax = Ra.sl; // sign extended version of Ra.sl
- int64_t Rbx = Rb_or_imm.sl;
+ int64_t Rax = Ra_sl; // sign extended version of Ra_sl
+ int64_t Rbx = Rb_or_imm_sl;
int64_t tmp = Rax * Rbx;
// To avoid overflow, all the upper 32 bits must match
// the sign bit of the lower 32. We code this as
uint64_t sign_bits = tmp<63:31>;
if (sign_bits != 0 && sign_bits != mask(33))
fault = new IntegerOverflowFault;
- Rc.sl = tmp<31:0>;
+ Rc_sl = tmp<31:0>;
}}, IntMultOp);
0x60: mulqv({{
// 64-bit multiply with trap on overflow
}
0x1c: decode INTFUNC {
- 0x00: decode RA { 31: sextb({{ Rc.sb = Rb_or_imm< 7:0>; }}); }
- 0x01: decode RA { 31: sextw({{ Rc.sw = Rb_or_imm<15:0>; }}); }
+ 0x00: decode RA { 31: sextb({{ Rc_sb = Rb_or_imm< 7:0>; }}); }
+ 0x01: decode RA { 31: sextw({{ Rc_sw = Rb_or_imm<15:0>; }}); }
0x30: ctpop({{
uint64_t count = 0;
int hi = 7;
int lo = 0;
for (int i = 0; i < 8; ++i) {
- uint8_t ra_ub = Ra.uq<hi:lo>;
- uint8_t rb_ub = Rb.uq<hi:lo>;
+ uint8_t ra_ub = Ra_uq<hi:lo>;
+ uint8_t rb_ub = Rb_uq<hi:lo>;
temp += (ra_ub >= rb_ub) ?
(ra_ub - rb_ub) : (rb_ub - ra_ub);
hi += 8;
0x34: unpkbw({{
- Rc = (Rb.uq<7:0>
- | (Rb.uq<15:8> << 16)
- | (Rb.uq<23:16> << 32)
- | (Rb.uq<31:24> << 48));
+ Rc = (Rb_uq<7:0>
+ | (Rb_uq<15:8> << 16)
+ | (Rb_uq<23:16> << 32)
+ | (Rb_uq<31:24> << 48));
}}, IntAluOp);
0x35: unpkbl({{
- Rc = (Rb.uq<7:0> | (Rb.uq<15:8> << 32));
+ Rc = (Rb_uq<7:0> | (Rb_uq<15:8> << 32));
}}, IntAluOp);
0x36: pkwb({{
- Rc = (Rb.uq<7:0>
- | (Rb.uq<23:16> << 8)
- | (Rb.uq<39:32> << 16)
- | (Rb.uq<55:48> << 24));
+ Rc = (Rb_uq<7:0>
+ | (Rb_uq<23:16> << 8)
+ | (Rb_uq<39:32> << 16)
+ | (Rb_uq<55:48> << 24));
}}, IntAluOp);
0x37: pklb({{
- Rc = (Rb.uq<7:0> | (Rb.uq<39:32> << 8));
+ Rc = (Rb_uq<7:0> | (Rb_uq<39:32> << 8));
}}, IntAluOp);
0x38: minsb8({{
int hi = 63;
int lo = 56;
for (int i = 7; i >= 0; --i) {
- int8_t ra_sb = Ra.uq<hi:lo>;
- int8_t rb_sb = Rb.uq<hi:lo>;
+ int8_t ra_sb = Ra_uq<hi:lo>;
+ int8_t rb_sb = Rb_uq<hi:lo>;
temp = ((temp << 8)
- | ((ra_sb < rb_sb) ? Ra.uq<hi:lo>
- : Rb.uq<hi:lo>));
+ | ((ra_sb < rb_sb) ? Ra_uq<hi:lo>
+ : Rb_uq<hi:lo>));
hi -= 8;
lo -= 8;
}
int hi = 63;
int lo = 48;
for (int i = 3; i >= 0; --i) {
- int16_t ra_sw = Ra.uq<hi:lo>;
- int16_t rb_sw = Rb.uq<hi:lo>;
+ int16_t ra_sw = Ra_uq<hi:lo>;
+ int16_t rb_sw = Rb_uq<hi:lo>;
temp = ((temp << 16)
- | ((ra_sw < rb_sw) ? Ra.uq<hi:lo>
- : Rb.uq<hi:lo>));
+ | ((ra_sw < rb_sw) ? Ra_uq<hi:lo>
+ : Rb_uq<hi:lo>));
hi -= 16;
lo -= 16;
}
int hi = 63;
int lo = 56;
for (int i = 7; i >= 0; --i) {
- uint8_t ra_ub = Ra.uq<hi:lo>;
- uint8_t rb_ub = Rb.uq<hi:lo>;
+ uint8_t ra_ub = Ra_uq<hi:lo>;
+ uint8_t rb_ub = Rb_uq<hi:lo>;
temp = ((temp << 8)
- | ((ra_ub < rb_ub) ? Ra.uq<hi:lo>
- : Rb.uq<hi:lo>));
+ | ((ra_ub < rb_ub) ? Ra_uq<hi:lo>
+ : Rb_uq<hi:lo>));
hi -= 8;
lo -= 8;
}
int hi = 63;
int lo = 48;
for (int i = 3; i >= 0; --i) {
- uint16_t ra_sw = Ra.uq<hi:lo>;
- uint16_t rb_sw = Rb.uq<hi:lo>;
+ uint16_t ra_sw = Ra_uq<hi:lo>;
+ uint16_t rb_sw = Rb_uq<hi:lo>;
temp = ((temp << 16)
- | ((ra_sw < rb_sw) ? Ra.uq<hi:lo>
- : Rb.uq<hi:lo>));
+ | ((ra_sw < rb_sw) ? Ra_uq<hi:lo>
+ : Rb_uq<hi:lo>));
hi -= 16;
lo -= 16;
}
int hi = 63;
int lo = 56;
for (int i = 7; i >= 0; --i) {
- uint8_t ra_ub = Ra.uq<hi:lo>;
- uint8_t rb_ub = Rb.uq<hi:lo>;
+ uint8_t ra_ub = Ra_uq<hi:lo>;
+ uint8_t rb_ub = Rb_uq<hi:lo>;
temp = ((temp << 8)
- | ((ra_ub > rb_ub) ? Ra.uq<hi:lo>
- : Rb.uq<hi:lo>));
+ | ((ra_ub > rb_ub) ? Ra_uq<hi:lo>
+ : Rb_uq<hi:lo>));
hi -= 8;
lo -= 8;
}
int hi = 63;
int lo = 48;
for (int i = 3; i >= 0; --i) {
- uint16_t ra_uw = Ra.uq<hi:lo>;
- uint16_t rb_uw = Rb.uq<hi:lo>;
+ uint16_t ra_uw = Ra_uq<hi:lo>;
+ uint16_t rb_uw = Rb_uq<hi:lo>;
temp = ((temp << 16)
- | ((ra_uw > rb_uw) ? Ra.uq<hi:lo>
- : Rb.uq<hi:lo>));
+ | ((ra_uw > rb_uw) ? Ra_uq<hi:lo>
+ : Rb_uq<hi:lo>));
hi -= 16;
lo -= 16;
}
int hi = 63;
int lo = 56;
for (int i = 7; i >= 0; --i) {
- int8_t ra_sb = Ra.uq<hi:lo>;
- int8_t rb_sb = Rb.uq<hi:lo>;
+ int8_t ra_sb = Ra_uq<hi:lo>;
+ int8_t rb_sb = Rb_uq<hi:lo>;
temp = ((temp << 8)
- | ((ra_sb > rb_sb) ? Ra.uq<hi:lo>
- : Rb.uq<hi:lo>));
+ | ((ra_sb > rb_sb) ? Ra_uq<hi:lo>
+ : Rb_uq<hi:lo>));
hi -= 8;
lo -= 8;
}
int hi = 63;
int lo = 48;
for (int i = 3; i >= 0; --i) {
- int16_t ra_sw = Ra.uq<hi:lo>;
- int16_t rb_sw = Rb.uq<hi:lo>;
+ int16_t ra_sw = Ra_uq<hi:lo>;
+ int16_t rb_sw = Rb_uq<hi:lo>;
temp = ((temp << 16)
- | ((ra_sw > rb_sw) ? Ra.uq<hi:lo>
- : Rb.uq<hi:lo>));
+ | ((ra_sw > rb_sw) ? Ra_uq<hi:lo>
+ : Rb_uq<hi:lo>));
hi -= 16;
lo -= 16;
}
format BasicOperateWithNopCheck {
0x70: decode RB {
- 31: ftoit({{ Rc = Fa.uq; }}, FloatCvtOp);
+ 31: ftoit({{ Rc = Fa_uq; }}, FloatCvtOp);
}
0x78: decode RB {
- 31: ftois({{ Rc.sl = t_to_s(Fa.uq); }},
+ 31: ftois({{ Rc_sl = t_to_s(Fa_uq); }},
FloatCvtOp);
}
}
format CondBranch {
0x39: beq({{ cond = (Ra == 0); }});
0x3d: bne({{ cond = (Ra != 0); }});
- 0x3e: bge({{ cond = (Ra.sq >= 0); }});
- 0x3f: bgt({{ cond = (Ra.sq > 0); }});
- 0x3b: ble({{ cond = (Ra.sq <= 0); }});
- 0x3a: blt({{ cond = (Ra.sq < 0); }});
+ 0x3e: bge({{ cond = (Ra_sq >= 0); }});
+ 0x3f: bgt({{ cond = (Ra_sq > 0); }});
+ 0x3b: ble({{ cond = (Ra_sq <= 0); }});
+ 0x3a: blt({{ cond = (Ra_sq < 0); }});
0x38: blbc({{ cond = ((Ra & 1) == 0); }});
0x3c: blbs({{ cond = ((Ra & 1) == 1); }});
0x4: decode RB {
31: decode FP_FULLFUNC {
format BasicOperateWithNopCheck {
- 0x004: itofs({{ Fc.uq = s_to_t(Ra.ul); }}, FloatCvtOp);
- 0x024: itoft({{ Fc.uq = Ra.uq; }}, FloatCvtOp);
+ 0x004: itofs({{ Fc_uq = s_to_t(Ra_ul); }}, FloatCvtOp);
+ 0x024: itoft({{ Fc_uq = Ra_uq; }}, FloatCvtOp);
0x014: FailUnimpl::itoff(); // VAX-format conversion
}
}
}}, FloatSqrtOp);
#else
0x0b: sqrts({{
- if (Fb.sf < 0.0)
+ if (Fb_sf < 0.0)
fault = new ArithmeticFault;
- Fc.sf = sqrt(Fb.sf);
+ Fc_sf = sqrt(Fb_sf);
}}, FloatSqrtOp);
#endif
0x2b: sqrtt({{
0x02: muls({{ Fc = Fa * Fb; }}, FloatMultOp);
0x03: divs({{ Fc = Fa / Fb; }}, FloatDivOp);
#else
- 0x00: adds({{ Fc.sf = Fa.sf + Fb.sf; }});
- 0x01: subs({{ Fc.sf = Fa.sf - Fb.sf; }});
- 0x02: muls({{ Fc.sf = Fa.sf * Fb.sf; }}, FloatMultOp);
- 0x03: divs({{ Fc.sf = Fa.sf / Fb.sf; }}, FloatDivOp);
+ 0x00: adds({{ Fc_sf = Fa_sf + Fb_sf; }});
+ 0x01: subs({{ Fc_sf = Fa_sf - Fb_sf; }});
+ 0x02: muls({{ Fc_sf = Fa_sf * Fb_sf; }}, FloatMultOp);
+ 0x03: divs({{ Fc_sf = Fa_sf / Fb_sf; }}, FloatDivOp);
#endif
0x20: addt({{ Fc = Fa + Fb; }});
0x2f: decode FP_ROUNDMODE {
format FPFixedRounding {
// "chopped" i.e. round toward zero
- 0: cvttq({{ Fc.sq = (int64_t)trunc(Fb); }},
+ 0: cvttq({{ Fc_sq = (int64_t)trunc(Fb); }},
Chopped);
// round to minus infinity
- 1: cvttq({{ Fc.sq = (int64_t)floor(Fb); }},
+ 1: cvttq({{ Fc_sq = (int64_t)floor(Fb); }},
MinusInfinity);
}
- default: cvttq({{ Fc.sq = (int64_t)nearbyint(Fb); }});
+ default: cvttq({{ Fc_sq = (int64_t)nearbyint(Fb); }});
}
// The cvtts opcode is overloaded to be cvtst if the trap
format BasicOperateWithNopCheck {
// trap on denorm version "cvtst/s" is
// simulated same as cvtst
- 0x2ac, 0x6ac: cvtst({{ Fc = Fb.sf; }});
+ 0x2ac, 0x6ac: cvtst({{ Fc = Fb_sf; }});
}
- default: cvtts({{ Fc.sf = Fb; }});
+ default: cvtts({{ Fc_sf = Fb; }});
}
// The trapping mode for integer-to-FP conversions
// allowed. The full set of rounding modes are
// supported though.
0x3c: decode FP_TRAPMODE {
- 0,7: cvtqs({{ Fc.sf = Fb.sq; }});
+ 0,7: cvtqs({{ Fc_sf = Fb_sq; }});
}
0x3e: decode FP_TRAPMODE {
- 0,7: cvtqt({{ Fc = Fb.sq; }});
+ 0,7: cvtqt({{ Fc = Fb_sq; }});
}
}
}
0x17: decode FP_FULLFUNC {
format BasicOperateWithNopCheck {
0x010: cvtlq({{
- Fc.sl = (Fb.uq<63:62> << 30) | Fb.uq<58:29>;
+ Fc_sl = (Fb_uq<63:62> << 30) | Fb_uq<58:29>;
}});
0x030: cvtql({{
- Fc.uq = (Fb.uq<31:30> << 62) | (Fb.uq<29:0> << 29);
+ Fc_uq = (Fb_uq<31:30> << 62) | (Fb_uq<29:0> << 29);
}});
// We treat the precise & imprecise trapping versions of
// To avoid overflow, all the upper 32 bits must match
// the sign bit of the lower 32. We code this as
// checking the upper 33 bits for all 0s or all 1s.
- uint64_t sign_bits = Fb.uq<63:31>;
+ uint64_t sign_bits = Fb_uq<63:31>;
if (sign_bits != 0 && sign_bits != mask(33))
fault = new IntegerOverflowFault;
- Fc.uq = (Fb.uq<31:30> << 62) | (Fb.uq<29:0> << 29);
+ Fc_uq = (Fb_uq<31:30> << 62) | (Fb_uq<29:0> << 29);
}});
0x020: cpys({{ // copy sign
- Fc.uq = (Fa.uq<63:> << 63) | Fb.uq<62:0>;
+ Fc_uq = (Fa_uq<63:> << 63) | Fb_uq<62:0>;
}});
0x021: cpysn({{ // copy sign negated
- Fc.uq = (~Fa.uq<63:> << 63) | Fb.uq<62:0>;
+ Fc_uq = (~Fa_uq<63:> << 63) | Fb_uq<62:0>;
}});
0x022: cpyse({{ // copy sign and exponent
- Fc.uq = (Fa.uq<63:52> << 52) | Fb.uq<51:0>;
+ Fc_uq = (Fa_uq<63:52> << 52) | Fb_uq<51:0>;
}});
0x02a: fcmoveq({{ Fc = (Fa == 0) ? Fb : Fc; }});
0x02e: fcmovle({{ Fc = (Fa <= 0) ? Fb : Fc; }});
0x02f: fcmovgt({{ Fc = (Fa > 0) ? Fb : Fc; }});
- 0x024: mt_fpcr({{ FPCR = Fa.uq; }}, IsIprAccess);
- 0x025: mf_fpcr({{ Fa.uq = FPCR; }}, IsIprAccess);
+ 0x024: mt_fpcr({{ FPCR = Fa_uq; }}, IsIprAccess);
+ 0x025: mf_fpcr({{ Fa_uq = FPCR; }}, IsIprAccess);
}
}
0: OpcdecFault::hw_st_quad();
1: decode HW_LDST_QUAD {
format HwLoad {
- 0: hw_ld({{ EA = (Rb + disp) & ~3; }}, {{ Ra = Mem.ul; }},
+ 0: hw_ld({{ EA = (Rb + disp) & ~3; }}, {{ Ra = Mem_ul; }},
L, IsSerializing, IsSerializeBefore);
- 1: hw_ld({{ EA = (Rb + disp) & ~7; }}, {{ Ra = Mem.uq; }},
+ 1: hw_ld({{ EA = (Rb + disp) & ~7; }}, {{ Ra = Mem_uq; }},
Q, IsSerializing, IsSerializeBefore);
}
}
1: decode HW_LDST_COND {
0: decode HW_LDST_QUAD {
0: hw_st({{ EA = (Rb + disp) & ~3; }},
- {{ Mem.ul = Ra<31:0>; }}, L, IsSerializing, IsSerializeBefore);
+ {{ Mem_ul = Ra<31:0>; }}, L, IsSerializing, IsSerializeBefore);
1: hw_st({{ EA = (Rb + disp) & ~7; }},
- {{ Mem.uq = Ra.uq; }}, Q, IsSerializing, IsSerializeBefore);
+ {{ Mem_uq = Ra_uq; }}, Q, IsSerializing, IsSerializeBefore);
}
1: FailUnimpl::hw_st_cond();
# generate immediate version by substituting 'imm'
# note that imm takes no extenstion, so we extend
# the regexp to replace any extension as well
- imm_code = re.sub(r'Rb_or_imm(\.\w+)?', 'imm', orig_code)
+ imm_code = re.sub(r'Rb_or_imm(_\w+)?', 'imm', orig_code)
# generate declaration for register version
iop = InstObjParams(name, Name, 'AlphaStaticInst', code, opt_flags)
def operands {{
# Int regs default to unsigned, but code should not count on this.
# For clarity, descriptions that depend on unsigned behavior should
- # explicitly specify '.uq'.
+ # explicitly specify '_uq'.
'Ra': ('IntReg', 'uq', 'PALMODE ? reg_redir[RA] : RA',
'IsInteger', 1),
'Rb': ('IntReg', 'uq', 'PALMODE ? reg_redir[RB] : RB',
ArmISA::TLB::MustBeOne;
EA = Op1 + Op2 * 2
'''
- accCode = 'NPC = PC + 2 * (Mem.uh);\n'
+ accCode = 'NPC = PC + 2 * (Mem_uh);\n'
mnem = "tbh"
else:
eaCode = '''
ArmISA::TLB::MustBeOne;
EA = Op1 + Op2
'''
- accCode = 'NPC = PC + 2 * (Mem.ub)'
+ accCode = 'NPC = PC + 2 * (Mem_ub)'
mnem = "tbb"
iop = InstObjParams(mnem, mnem.capitalize(), "BranchRegReg",
{'ea_code': eaCode,
buildRegDataInst("qadd", '''
int32_t midRes;
- resTemp = saturateOp<32>(midRes, Op1.sw, Op2.sw);
+ resTemp = saturateOp<32>(midRes, Op1_sw, Op2_sw);
Dest = midRes;
''', flagType="saturate", buildNonCc=False)
buildRegDataInst("qadd16", '''
for (unsigned i = 0; i < 2; i++) {
int high = (i + 1) * 16 - 1;
int low = i * 16;
- int64_t arg1 = sext<16>(bits(Op1.sw, high, low));
- int64_t arg2 = sext<16>(bits(Op2.sw, high, low));
+ int64_t arg1 = sext<16>(bits(Op1_sw, high, low));
+ int64_t arg2 = sext<16>(bits(Op2_sw, high, low));
saturateOp<16>(midRes, arg1, arg2);
replaceBits(resTemp, high, low, midRes);
}
for (unsigned i = 0; i < 4; i++) {
int high = (i + 1) * 8 - 1;
int low = i * 8;
- int64_t arg1 = sext<8>(bits(Op1.sw, high, low));
- int64_t arg2 = sext<8>(bits(Op2.sw, high, low));
+ int64_t arg1 = sext<8>(bits(Op1_sw, high, low));
+ int64_t arg2 = sext<8>(bits(Op2_sw, high, low));
saturateOp<8>(midRes, arg1, arg2);
replaceBits(resTemp, high, low, midRes);
}
''', flagType="none", buildCc=False)
buildRegDataInst("qdadd", '''
int32_t midRes;
- resTemp = saturateOp<32>(midRes, Op2.sw, Op2.sw) |
- saturateOp<32>(midRes, Op1.sw, midRes);
+ resTemp = saturateOp<32>(midRes, Op2_sw, Op2_sw) |
+ saturateOp<32>(midRes, Op1_sw, midRes);
Dest = midRes;
''', flagType="saturate", buildNonCc=False)
buildRegDataInst("qsub", '''
int32_t midRes;
- resTemp = saturateOp<32>(midRes, Op1.sw, Op2.sw, true);
+ resTemp = saturateOp<32>(midRes, Op1_sw, Op2_sw, true);
Dest = midRes;
''', flagType="saturate")
buildRegDataInst("qsub16", '''
for (unsigned i = 0; i < 2; i++) {
int high = (i + 1) * 16 - 1;
int low = i * 16;
- int64_t arg1 = sext<16>(bits(Op1.sw, high, low));
- int64_t arg2 = sext<16>(bits(Op2.sw, high, low));
+ int64_t arg1 = sext<16>(bits(Op1_sw, high, low));
+ int64_t arg2 = sext<16>(bits(Op2_sw, high, low));
saturateOp<16>(midRes, arg1, arg2, true);
replaceBits(resTemp, high, low, midRes);
}
for (unsigned i = 0; i < 4; i++) {
int high = (i + 1) * 8 - 1;
int low = i * 8;
- int64_t arg1 = sext<8>(bits(Op1.sw, high, low));
- int64_t arg2 = sext<8>(bits(Op2.sw, high, low));
+ int64_t arg1 = sext<8>(bits(Op1_sw, high, low));
+ int64_t arg2 = sext<8>(bits(Op2_sw, high, low));
saturateOp<8>(midRes, arg1, arg2, true);
replaceBits(resTemp, high, low, midRes);
}
''', flagType="none", buildCc=False)
buildRegDataInst("qdsub", '''
int32_t midRes;
- resTemp = saturateOp<32>(midRes, Op2.sw, Op2.sw) |
- saturateOp<32>(midRes, Op1.sw, midRes, true);
+ resTemp = saturateOp<32>(midRes, Op2_sw, Op2_sw) |
+ saturateOp<32>(midRes, Op1_sw, midRes, true);
Dest = midRes;
''', flagType="saturate", buildNonCc=False)
buildRegDataInst("qasx", '''
int32_t midRes;
- int64_t arg1Low = sext<16>(bits(Op1.sw, 15, 0));
- int64_t arg1High = sext<16>(bits(Op1.sw, 31, 16));
- int64_t arg2Low = sext<16>(bits(Op2.sw, 15, 0));
- int64_t arg2High = sext<16>(bits(Op2.sw, 31, 16));
+ int64_t arg1Low = sext<16>(bits(Op1_sw, 15, 0));
+ int64_t arg1High = sext<16>(bits(Op1_sw, 31, 16));
+ int64_t arg2Low = sext<16>(bits(Op2_sw, 15, 0));
+ int64_t arg2High = sext<16>(bits(Op2_sw, 31, 16));
saturateOp<16>(midRes, arg1Low, arg2High, true);
replaceBits(resTemp, 15, 0, midRes);
saturateOp<16>(midRes, arg1High, arg2Low);
''', flagType="none", buildCc=False)
buildRegDataInst("qsax", '''
int32_t midRes;
- int64_t arg1Low = sext<16>(bits(Op1.sw, 15, 0));
- int64_t arg1High = sext<16>(bits(Op1.sw, 31, 16));
- int64_t arg2Low = sext<16>(bits(Op2.sw, 15, 0));
- int64_t arg2High = sext<16>(bits(Op2.sw, 31, 16));
+ int64_t arg1Low = sext<16>(bits(Op1_sw, 15, 0));
+ int64_t arg1High = sext<16>(bits(Op1_sw, 31, 16));
+ int64_t arg2Low = sext<16>(bits(Op2_sw, 15, 0));
+ int64_t arg2High = sext<16>(bits(Op2_sw, 31, 16));
saturateOp<16>(midRes, arg1Low, arg2High);
replaceBits(resTemp, 15, 0, midRes);
saturateOp<16>(midRes, arg1High, arg2Low, true);
for (unsigned i = 0; i < 4; i++) {
int high = (i + 1) * 8 - 1;
int low = i * 8;
- int32_t midRes = sext<8>(bits(Op1.sw, high, low)) +
- sext<8>(bits(Op2.sw, high, low));
+ int32_t midRes = sext<8>(bits(Op1_sw, high, low)) +
+ sext<8>(bits(Op2_sw, high, low));
replaceBits(resTemp, high, low, midRes);
if (midRes >= 0) {
geBits = geBits | (1 << i);
for (unsigned i = 0; i < 2; i++) {
int high = (i + 1) * 16 - 1;
int low = i * 16;
- int32_t midRes = sext<16>(bits(Op1.sw, high, low)) +
- sext<16>(bits(Op2.sw, high, low));
+ int32_t midRes = sext<16>(bits(Op1_sw, high, low)) +
+ sext<16>(bits(Op2_sw, high, low));
replaceBits(resTemp, high, low, midRes);
if (midRes >= 0) {
geBits = geBits | (0x3 << (i * 2));
for (unsigned i = 0; i < 4; i++) {
int high = (i + 1) * 8 - 1;
int low = i * 8;
- int32_t midRes = sext<8>(bits(Op1.sw, high, low)) -
- sext<8>(bits(Op2.sw, high, low));
+ int32_t midRes = sext<8>(bits(Op1_sw, high, low)) -
+ sext<8>(bits(Op2_sw, high, low));
replaceBits(resTemp, high, low, midRes);
if (midRes >= 0) {
geBits = geBits | (1 << i);
for (unsigned i = 0; i < 2; i++) {
int high = (i + 1) * 16 - 1;
int low = i * 16;
- int32_t midRes = sext<16>(bits(Op1.sw, high, low)) -
- sext<16>(bits(Op2.sw, high, low));
+ int32_t midRes = sext<16>(bits(Op1_sw, high, low)) -
+ sext<16>(bits(Op2_sw, high, low));
replaceBits(resTemp, high, low, midRes);
if (midRes >= 0) {
geBits = geBits | (0x3 << (i * 2));
buildRegDataInst("sasx", '''
int32_t midRes, geBits = 0;
resTemp = 0;
- int64_t arg1Low = sext<16>(bits(Op1.sw, 15, 0));
- int64_t arg1High = sext<16>(bits(Op1.sw, 31, 16));
- int64_t arg2Low = sext<16>(bits(Op2.sw, 15, 0));
- int64_t arg2High = sext<16>(bits(Op2.sw, 31, 16));
+ int64_t arg1Low = sext<16>(bits(Op1_sw, 15, 0));
+ int64_t arg1High = sext<16>(bits(Op1_sw, 31, 16));
+ int64_t arg2Low = sext<16>(bits(Op2_sw, 15, 0));
+ int64_t arg2High = sext<16>(bits(Op2_sw, 31, 16));
midRes = arg1Low - arg2High;
if (midRes >= 0) {
geBits = geBits | 0x3;
buildRegDataInst("ssax", '''
int32_t midRes, geBits = 0;
resTemp = 0;
- int64_t arg1Low = sext<16>(bits(Op1.sw, 15, 0));
- int64_t arg1High = sext<16>(bits(Op1.sw, 31, 16));
- int64_t arg2Low = sext<16>(bits(Op2.sw, 15, 0));
- int64_t arg2High = sext<16>(bits(Op2.sw, 31, 16));
+ int64_t arg1Low = sext<16>(bits(Op1_sw, 15, 0));
+ int64_t arg1High = sext<16>(bits(Op1_sw, 31, 16));
+ int64_t arg2Low = sext<16>(bits(Op2_sw, 15, 0));
+ int64_t arg2High = sext<16>(bits(Op2_sw, 31, 16));
midRes = arg1Low + arg2High;
if (midRes >= 0) {
geBits = geBits | 0x3;
int high = (i + 1) * 8 - 1;
int low = i * 8;
int32_t midRes =
- (uint64_t)(sext<8>(bits(Op1.sw, high, low)) +
- sext<8>(bits(Op2.sw, high, low))) >> 1;
+ (uint64_t)(sext<8>(bits(Op1_sw, high, low)) +
+ sext<8>(bits(Op2_sw, high, low))) >> 1;
replaceBits(resTemp, high, low, midRes);
}
Dest = resTemp;
int high = (i + 1) * 16 - 1;
int low = i * 16;
int32_t midRes =
- (uint64_t)(sext<16>(bits(Op1.sw, high, low)) +
- sext<16>(bits(Op2.sw, high, low))) >> 1;
+ (uint64_t)(sext<16>(bits(Op1_sw, high, low)) +
+ sext<16>(bits(Op2_sw, high, low))) >> 1;
replaceBits(resTemp, high, low, midRes);
}
Dest = resTemp;
int high = (i + 1) * 8 - 1;
int low = i * 8;
int32_t midRes =
- (uint64_t)(sext<8>(bits(Op1.sw, high, low)) -
- sext<8>(bits(Op2.sw, high, low))) >> 1;
+ (uint64_t)(sext<8>(bits(Op1_sw, high, low)) -
+ sext<8>(bits(Op2_sw, high, low))) >> 1;
replaceBits(resTemp, high, low, midRes);
}
Dest = resTemp;
int high = (i + 1) * 16 - 1;
int low = i * 16;
int32_t midRes =
- (uint64_t)(sext<16>(bits(Op1.sw, high, low)) -
- sext<16>(bits(Op2.sw, high, low))) >> 1;
+ (uint64_t)(sext<16>(bits(Op1_sw, high, low)) -
+ sext<16>(bits(Op2_sw, high, low))) >> 1;
replaceBits(resTemp, high, low, midRes);
}
Dest = resTemp;
buildRegDataInst("shasx", '''
int32_t midRes;
resTemp = 0;
- int64_t arg1Low = sext<16>(bits(Op1.sw, 15, 0));
- int64_t arg1High = sext<16>(bits(Op1.sw, 31, 16));
- int64_t arg2Low = sext<16>(bits(Op2.sw, 15, 0));
- int64_t arg2High = sext<16>(bits(Op2.sw, 31, 16));
+ int64_t arg1Low = sext<16>(bits(Op1_sw, 15, 0));
+ int64_t arg1High = sext<16>(bits(Op1_sw, 31, 16));
+ int64_t arg2Low = sext<16>(bits(Op2_sw, 15, 0));
+ int64_t arg2High = sext<16>(bits(Op2_sw, 31, 16));
midRes = (uint64_t)(arg1Low - arg2High) >> 1;
replaceBits(resTemp, 15, 0, midRes);
midRes = (arg1High + arg2Low) >> 1;
buildRegDataInst("shsax", '''
int32_t midRes;
resTemp = 0;
- int64_t arg1Low = sext<16>(bits(Op1.sw, 15, 0));
- int64_t arg1High = sext<16>(bits(Op1.sw, 31, 16));
- int64_t arg2Low = sext<16>(bits(Op2.sw, 15, 0));
- int64_t arg2High = sext<16>(bits(Op2.sw, 31, 16));
+ int64_t arg1Low = sext<16>(bits(Op1_sw, 15, 0));
+ int64_t arg1High = sext<16>(bits(Op1_sw, 31, 16));
+ int64_t arg2Low = sext<16>(bits(Op2_sw, 15, 0));
+ int64_t arg2High = sext<16>(bits(Op2_sw, 31, 16));
midRes = (uint64_t)(arg1Low + arg2High) >> 1;
replaceBits(resTemp, 15, 0, midRes);
midRes = (uint64_t)(arg1High - arg2Low) >> 1;
''', flagType="none", buildCc=False)
buildRegDataInst("uqasx", '''
uint32_t midRes;
- uint64_t arg1Low = bits(Op1.sw, 15, 0);
- uint64_t arg1High = bits(Op1.sw, 31, 16);
- uint64_t arg2Low = bits(Op2.sw, 15, 0);
- uint64_t arg2High = bits(Op2.sw, 31, 16);
+ uint64_t arg1Low = bits(Op1_sw, 15, 0);
+ uint64_t arg1High = bits(Op1_sw, 31, 16);
+ uint64_t arg2Low = bits(Op2_sw, 15, 0);
+ uint64_t arg2High = bits(Op2_sw, 31, 16);
uSaturateOp<16>(midRes, arg1Low, arg2High, true);
replaceBits(resTemp, 15, 0, midRes);
uSaturateOp<16>(midRes, arg1High, arg2Low);
''', flagType="none", buildCc=False)
buildRegDataInst("uqsax", '''
uint32_t midRes;
- uint64_t arg1Low = bits(Op1.sw, 15, 0);
- uint64_t arg1High = bits(Op1.sw, 31, 16);
- uint64_t arg2Low = bits(Op2.sw, 15, 0);
- uint64_t arg2High = bits(Op2.sw, 31, 16);
+ uint64_t arg1Low = bits(Op1_sw, 15, 0);
+ uint64_t arg1High = bits(Op1_sw, 31, 16);
+ uint64_t arg2Low = bits(Op2_sw, 15, 0);
+ uint64_t arg2High = bits(Op2_sw, 31, 16);
uSaturateOp<16>(midRes, arg1Low, arg2High);
replaceBits(resTemp, 15, 0, midRes);
uSaturateOp<16>(midRes, arg1High, arg2Low, true);
buildRegDataInst("uasx", '''
int32_t midRes, geBits = 0;
resTemp = 0;
- int64_t arg1Low = bits(Op1.sw, 15, 0);
- int64_t arg1High = bits(Op1.sw, 31, 16);
- int64_t arg2Low = bits(Op2.sw, 15, 0);
- int64_t arg2High = bits(Op2.sw, 31, 16);
+ int64_t arg1Low = bits(Op1_sw, 15, 0);
+ int64_t arg1High = bits(Op1_sw, 31, 16);
+ int64_t arg2Low = bits(Op2_sw, 15, 0);
+ int64_t arg2High = bits(Op2_sw, 31, 16);
midRes = arg1Low - arg2High;
if (midRes >= 0) {
geBits = geBits | 0x3;
buildRegDataInst("usax", '''
int32_t midRes, geBits = 0;
resTemp = 0;
- int64_t arg1Low = bits(Op1.sw, 15, 0);
- int64_t arg1High = bits(Op1.sw, 31, 16);
- int64_t arg2Low = bits(Op2.sw, 15, 0);
- int64_t arg2High = bits(Op2.sw, 31, 16);
+ int64_t arg1Low = bits(Op1_sw, 15, 0);
+ int64_t arg1High = bits(Op1_sw, 31, 16);
+ int64_t arg2Low = bits(Op2_sw, 15, 0);
+ int64_t arg2High = bits(Op2_sw, 31, 16);
midRes = arg1Low + arg2High;
if (midRes >= 0x10000) {
geBits = geBits | 0x3;
buildRegDataInst("uhasx", '''
int32_t midRes;
resTemp = 0;
- int64_t arg1Low = bits(Op1.sw, 15, 0);
- int64_t arg1High = bits(Op1.sw, 31, 16);
- int64_t arg2Low = bits(Op2.sw, 15, 0);
- int64_t arg2High = bits(Op2.sw, 31, 16);
+ int64_t arg1Low = bits(Op1_sw, 15, 0);
+ int64_t arg1High = bits(Op1_sw, 31, 16);
+ int64_t arg2Low = bits(Op2_sw, 15, 0);
+ int64_t arg2High = bits(Op2_sw, 31, 16);
midRes = (arg1Low - arg2High) >> 1;
replaceBits(resTemp, 15, 0, midRes);
midRes = (arg1High + arg2Low) >> 1;
buildRegDataInst("uhsax", '''
int32_t midRes;
resTemp = 0;
- int64_t arg1Low = bits(Op1.sw, 15, 0);
- int64_t arg1High = bits(Op1.sw, 31, 16);
- int64_t arg2Low = bits(Op2.sw, 15, 0);
- int64_t arg2High = bits(Op2.sw, 31, 16);
+ int64_t arg1Low = bits(Op1_sw, 15, 0);
+ int64_t arg1High = bits(Op1_sw, 31, 16);
+ int64_t arg2Low = bits(Op2_sw, 15, 0);
+ int64_t arg2High = bits(Op2_sw, 31, 16);
midRes = (arg1Low + arg2High) >> 1;
replaceBits(resTemp, 15, 0, midRes);
midRes = (arg1High - arg2Low) >> 1;
let {{
sdivCode = '''
- if (Op2.sw == 0) {
+ if (Op2_sw == 0) {
if (((SCTLR)Sctlr).dz) {
#if FULL_SYSTEM
return new UndefinedInstruction;
return new UndefinedInstruction(false, mnemonic);
#endif
}
- Dest.sw = 0;
- } else if (Op1.sw == INT_MIN && Op2.sw == -1) {
- Dest.sw = INT_MIN;
+ Dest_sw = 0;
+ } else if (Op1_sw == INT_MIN && Op2_sw == -1) {
+ Dest_sw = INT_MIN;
} else {
- Dest.sw = Op1.sw / Op2.sw;
+ Dest_sw = Op1_sw / Op2_sw;
}
'''
sdivIop = InstObjParams("sdiv", "Sdiv", "RegRegRegOp",
exec_output = PredOpExecute.subst(sdivIop)
udivCode = '''
- if (Op2.uw == 0) {
+ if (Op2_uw == 0) {
if (((SCTLR)Sctlr).dz) {
#if FULL_SYSTEM
return new UndefinedInstruction;
return new UndefinedInstruction(false, mnemonic);
#endif
}
- Dest.uw = 0;
+ Dest_uw = 0;
} else {
- Dest.uw = Op1.uw / Op2.uw;
+ Dest_uw = Op1_uw / Op2_uw;
}
'''
udivIop = InstObjParams("udiv", "Udiv", "RegRegRegOp",
exec_output += PredOpExecute.subst(vmrsApsrFpscrIop);
vmovImmSCode = vfpEnabledCheckCode + '''
- FpDest.uw = bits(imm, 31, 0);
+ FpDest_uw = bits(imm, 31, 0);
'''
vmovImmSIop = InstObjParams("vmov", "VmovImmS", "FpRegImmOp",
{ "code": vmovImmSCode,
exec_output += PredOpExecute.subst(vmovImmSIop);
vmovImmDCode = vfpEnabledCheckCode + '''
- FpDestP0.uw = bits(imm, 31, 0);
- FpDestP1.uw = bits(imm, 63, 32);
+ FpDestP0_uw = bits(imm, 31, 0);
+ FpDestP1_uw = bits(imm, 63, 32);
'''
vmovImmDIop = InstObjParams("vmov", "VmovImmD", "FpRegImmOp",
{ "code": vmovImmDCode,
exec_output += PredOpExecute.subst(vmovImmDIop);
vmovImmQCode = vfpEnabledCheckCode + '''
- FpDestP0.uw = bits(imm, 31, 0);
- FpDestP1.uw = bits(imm, 63, 32);
- FpDestP2.uw = bits(imm, 31, 0);
- FpDestP3.uw = bits(imm, 63, 32);
+ FpDestP0_uw = bits(imm, 31, 0);
+ FpDestP1_uw = bits(imm, 63, 32);
+ FpDestP2_uw = bits(imm, 31, 0);
+ FpDestP3_uw = bits(imm, 63, 32);
'''
vmovImmQIop = InstObjParams("vmov", "VmovImmQ", "FpRegImmOp",
{ "code": vmovImmQCode,
exec_output += PredOpExecute.subst(vmovImmQIop);
vmovRegSCode = vfpEnabledCheckCode + '''
- FpDest.uw = FpOp1.uw;
+ FpDest_uw = FpOp1_uw;
'''
vmovRegSIop = InstObjParams("vmov", "VmovRegS", "FpRegRegOp",
{ "code": vmovRegSCode,
exec_output += PredOpExecute.subst(vmovRegSIop);
vmovRegDCode = vfpEnabledCheckCode + '''
- FpDestP0.uw = FpOp1P0.uw;
- FpDestP1.uw = FpOp1P1.uw;
+ FpDestP0_uw = FpOp1P0_uw;
+ FpDestP1_uw = FpOp1P1_uw;
'''
vmovRegDIop = InstObjParams("vmov", "VmovRegD", "FpRegRegOp",
{ "code": vmovRegDCode,
exec_output += PredOpExecute.subst(vmovRegDIop);
vmovRegQCode = vfpEnabledCheckCode + '''
- FpDestP0.uw = FpOp1P0.uw;
- FpDestP1.uw = FpOp1P1.uw;
- FpDestP2.uw = FpOp1P2.uw;
- FpDestP3.uw = FpOp1P3.uw;
+ FpDestP0_uw = FpOp1P0_uw;
+ FpDestP1_uw = FpOp1P1_uw;
+ FpDestP2_uw = FpOp1P2_uw;
+ FpDestP3_uw = FpOp1P3_uw;
'''
vmovRegQIop = InstObjParams("vmov", "VmovRegQ", "FpRegRegOp",
{ "code": vmovRegQCode,
exec_output += PredOpExecute.subst(vmovRegQIop);
vmovCoreRegBCode = vfpEnabledCheckCode + '''
- FpDest.uw = insertBits(FpDest.uw, imm * 8 + 7, imm * 8, Op1.ub);
+ FpDest_uw = insertBits(FpDest_uw, imm * 8 + 7, imm * 8, Op1_ub);
'''
vmovCoreRegBIop = InstObjParams("vmov", "VmovCoreRegB", "FpRegRegImmOp",
{ "code": vmovCoreRegBCode,
exec_output += PredOpExecute.subst(vmovCoreRegBIop);
vmovCoreRegHCode = vfpEnabledCheckCode + '''
- FpDest.uw = insertBits(FpDest.uw, imm * 16 + 15, imm * 16, Op1.uh);
+ FpDest_uw = insertBits(FpDest_uw, imm * 16 + 15, imm * 16, Op1_uh);
'''
vmovCoreRegHIop = InstObjParams("vmov", "VmovCoreRegH", "FpRegRegImmOp",
{ "code": vmovCoreRegHCode,
exec_output += PredOpExecute.subst(vmovCoreRegHIop);
vmovCoreRegWCode = vfpEnabledCheckCode + '''
- FpDest.uw = Op1.uw;
+ FpDest_uw = Op1_uw;
'''
vmovCoreRegWIop = InstObjParams("vmov", "VmovCoreRegW", "FpRegRegOp",
{ "code": vmovCoreRegWCode,
vmovRegCoreUBCode = vfpEnabledCheckCode + '''
assert(imm < 4);
- Dest = bits(FpOp1.uw, imm * 8 + 7, imm * 8);
+ Dest = bits(FpOp1_uw, imm * 8 + 7, imm * 8);
'''
vmovRegCoreUBIop = InstObjParams("vmov", "VmovRegCoreUB", "FpRegRegImmOp",
{ "code": vmovRegCoreUBCode,
vmovRegCoreUHCode = vfpEnabledCheckCode + '''
assert(imm < 2);
- Dest = bits(FpOp1.uw, imm * 16 + 15, imm * 16);
+ Dest = bits(FpOp1_uw, imm * 16 + 15, imm * 16);
'''
vmovRegCoreUHIop = InstObjParams("vmov", "VmovRegCoreUH", "FpRegRegImmOp",
{ "code": vmovRegCoreUHCode,
vmovRegCoreSBCode = vfpEnabledCheckCode + '''
assert(imm < 4);
- Dest = sext<8>(bits(FpOp1.uw, imm * 8 + 7, imm * 8));
+ Dest = sext<8>(bits(FpOp1_uw, imm * 8 + 7, imm * 8));
'''
vmovRegCoreSBIop = InstObjParams("vmov", "VmovRegCoreSB", "FpRegRegImmOp",
{ "code": vmovRegCoreSBCode,
vmovRegCoreSHCode = vfpEnabledCheckCode + '''
assert(imm < 2);
- Dest = sext<16>(bits(FpOp1.uw, imm * 16 + 15, imm * 16));
+ Dest = sext<16>(bits(FpOp1_uw, imm * 16 + 15, imm * 16));
'''
vmovRegCoreSHIop = InstObjParams("vmov", "VmovRegCoreSH", "FpRegRegImmOp",
{ "code": vmovRegCoreSHCode,
exec_output += PredOpExecute.subst(vmovRegCoreSHIop);
vmovRegCoreWCode = vfpEnabledCheckCode + '''
- Dest = FpOp1.uw;
+ Dest = FpOp1_uw;
'''
vmovRegCoreWIop = InstObjParams("vmov", "VmovRegCoreW", "FpRegRegOp",
{ "code": vmovRegCoreWCode,
exec_output += PredOpExecute.subst(vmovRegCoreWIop);
vmov2Reg2CoreCode = vfpEnabledCheckCode + '''
- FpDestP0.uw = Op1.uw;
- FpDestP1.uw = Op2.uw;
+ FpDestP0_uw = Op1_uw;
+ FpDestP1_uw = Op2_uw;
'''
vmov2Reg2CoreIop = InstObjParams("vmov", "Vmov2Reg2Core", "FpRegRegRegOp",
{ "code": vmov2Reg2CoreCode,
exec_output += PredOpExecute.subst(vmov2Reg2CoreIop);
vmov2Core2RegCode = vfpEnabledCheckCode + '''
- Dest.uw = FpOp2P0.uw;
- Op1.uw = FpOp2P1.uw;
+ Dest_uw = FpOp2P0_uw;
+ Op1_uw = FpOp2P1_uw;
'''
vmov2Core2RegIop = InstObjParams("vmov", "Vmov2Core2Reg", "FpRegRegRegOp",
{ "code": vmov2Core2RegCode,
doubleCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
double dest = %(op)s;
- FpDestP0.uw = dblLow(dest);
- FpDestP1.uw = dblHi(dest);
+ FpDestP0_uw = dblLow(dest);
+ FpDestP1_uw = dblHi(dest);
FpscrExc = fpscr;
'''
doubleBinOp = '''
- binaryOp(fpscr, dbl(FpOp1P0.uw, FpOp1P1.uw),
- dbl(FpOp2P0.uw, FpOp2P1.uw),
+ binaryOp(fpscr, dbl(FpOp1P0_uw, FpOp1P1_uw),
+ dbl(FpOp2P0_uw, FpOp2P1_uw),
%(func)s, fpscr.fz, fpscr.dn, fpscr.rMode);
'''
doubleUnaryOp = '''
- unaryOp(fpscr, dbl(FpOp1P0.uw, FpOp1P1.uw), %(func)s,
+ unaryOp(fpscr, dbl(FpOp1P0_uw, FpOp1P1_uw), %(func)s,
fpscr.fz, fpscr.rMode)
'''
exec_output += PredOpExecute.subst(iop)
buildSimpleUnaryFpOp("vneg", "Vneg", "FpRegRegOp", "SimdFloatMiscOp",
- "-FpOp1", "-dbl(FpOp1P0.uw, FpOp1P1.uw)")
+ "-FpOp1", "-dbl(FpOp1P0_uw, FpOp1P1_uw)")
buildSimpleUnaryFpOp("vabs", "Vabs", "FpRegRegOp", "SimdFloatMiscOp",
- "fabsf(FpOp1)", "fabs(dbl(FpOp1P0.uw, FpOp1P1.uw))")
+ "fabsf(FpOp1)", "fabs(dbl(FpOp1P0_uw, FpOp1P1_uw))")
}};
let {{
vmlaDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double mid = binaryOp(fpscr, dbl(FpOp1P0.uw, FpOp1P1.uw),
- dbl(FpOp2P0.uw, FpOp2P1.uw),
+ double mid = binaryOp(fpscr, dbl(FpOp1P0_uw, FpOp1P1_uw),
+ dbl(FpOp2P0_uw, FpOp2P1_uw),
fpMulD, fpscr.fz, fpscr.dn, fpscr.rMode);
- double dest = binaryOp(fpscr, dbl(FpDestP0.uw, FpDestP1.uw),
+ double dest = binaryOp(fpscr, dbl(FpDestP0_uw, FpDestP1_uw),
mid, fpAddD, fpscr.fz,
fpscr.dn, fpscr.rMode);
- FpDestP0.uw = dblLow(dest);
- FpDestP1.uw = dblHi(dest);
+ FpDestP0_uw = dblLow(dest);
+ FpDestP1_uw = dblHi(dest);
FpscrExc = fpscr;
'''
vmlaDIop = InstObjParams("vmlad", "VmlaD", "FpRegRegRegOp",
vmlsDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double mid = binaryOp(fpscr, dbl(FpOp1P0.uw, FpOp1P1.uw),
- dbl(FpOp2P0.uw, FpOp2P1.uw),
+ double mid = binaryOp(fpscr, dbl(FpOp1P0_uw, FpOp1P1_uw),
+ dbl(FpOp2P0_uw, FpOp2P1_uw),
fpMulD, fpscr.fz, fpscr.dn, fpscr.rMode);
- double dest = binaryOp(fpscr, dbl(FpDestP0.uw, FpDestP1.uw),
+ double dest = binaryOp(fpscr, dbl(FpDestP0_uw, FpDestP1_uw),
-mid, fpAddD, fpscr.fz,
fpscr.dn, fpscr.rMode);
- FpDestP0.uw = dblLow(dest);
- FpDestP1.uw = dblHi(dest);
+ FpDestP0_uw = dblLow(dest);
+ FpDestP1_uw = dblHi(dest);
FpscrExc = fpscr;
'''
vmlsDIop = InstObjParams("vmlsd", "VmlsD", "FpRegRegRegOp",
vnmlaDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double mid = binaryOp(fpscr, dbl(FpOp1P0.uw, FpOp1P1.uw),
- dbl(FpOp2P0.uw, FpOp2P1.uw),
+ double mid = binaryOp(fpscr, dbl(FpOp1P0_uw, FpOp1P1_uw),
+ dbl(FpOp2P0_uw, FpOp2P1_uw),
fpMulD, fpscr.fz, fpscr.dn, fpscr.rMode);
- double dest = binaryOp(fpscr, -dbl(FpDestP0.uw, FpDestP1.uw),
+ double dest = binaryOp(fpscr, -dbl(FpDestP0_uw, FpDestP1_uw),
-mid, fpAddD, fpscr.fz,
fpscr.dn, fpscr.rMode);
- FpDestP0.uw = dblLow(dest);
- FpDestP1.uw = dblHi(dest);
+ FpDestP0_uw = dblLow(dest);
+ FpDestP1_uw = dblHi(dest);
FpscrExc = fpscr;
'''
vnmlaDIop = InstObjParams("vnmlad", "VnmlaD", "FpRegRegRegOp",
vnmlsDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double mid = binaryOp(fpscr, dbl(FpOp1P0.uw, FpOp1P1.uw),
- dbl(FpOp2P0.uw, FpOp2P1.uw),
+ double mid = binaryOp(fpscr, dbl(FpOp1P0_uw, FpOp1P1_uw),
+ dbl(FpOp2P0_uw, FpOp2P1_uw),
fpMulD, fpscr.fz, fpscr.dn, fpscr.rMode);
- double dest = binaryOp(fpscr, -dbl(FpDestP0.uw, FpDestP1.uw),
+ double dest = binaryOp(fpscr, -dbl(FpDestP0_uw, FpDestP1_uw),
mid, fpAddD, fpscr.fz,
fpscr.dn, fpscr.rMode);
- FpDestP0.uw = dblLow(dest);
- FpDestP1.uw = dblHi(dest);
+ FpDestP0_uw = dblLow(dest);
+ FpDestP1_uw = dblHi(dest);
FpscrExc = fpscr;
'''
vnmlsDIop = InstObjParams("vnmlsd", "VnmlsD", "FpRegRegRegOp",
vnmulDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double dest = -binaryOp(fpscr, dbl(FpOp1P0.uw, FpOp1P1.uw),
- dbl(FpOp2P0.uw, FpOp2P1.uw),
+ double dest = -binaryOp(fpscr, dbl(FpOp1P0_uw, FpOp1P1_uw),
+ dbl(FpOp2P0_uw, FpOp2P1_uw),
fpMulD, fpscr.fz, fpscr.dn,
fpscr.rMode);
- FpDestP0.uw = dblLow(dest);
- FpDestP1.uw = dblHi(dest);
+ FpDestP0_uw = dblLow(dest);
+ FpDestP1_uw = dblHi(dest);
FpscrExc = fpscr;
'''
vnmulDIop = InstObjParams("vnmuld", "VnmulD", "FpRegRegRegOp",
vcvtUIntFpSCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1.uw) : "m" (FpOp1.uw));
- FpDest = FpOp1.uw;
+ __asm__ __volatile__("" : "=m" (FpOp1_uw) : "m" (FpOp1_uw));
+ FpDest = FpOp1_uw;
__asm__ __volatile__("" :: "m" (FpDest));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
vcvtUIntFpDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1P0.uw) : "m" (FpOp1P0.uw));
- double cDest = (uint64_t)FpOp1P0.uw;
+ __asm__ __volatile__("" : "=m" (FpOp1P0_uw) : "m" (FpOp1P0_uw));
+ double cDest = (uint64_t)FpOp1P0_uw;
__asm__ __volatile__("" :: "m" (cDest));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = dblLow(cDest);
- FpDestP1.uw = dblHi(cDest);
+ FpDestP0_uw = dblLow(cDest);
+ FpDestP1_uw = dblHi(cDest);
FpscrExc = fpscr;
'''
vcvtUIntFpDIop = InstObjParams("vcvt", "VcvtUIntFpD", "FpRegRegOp",
vcvtSIntFpSCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1.sw) : "m" (FpOp1.sw));
- FpDest = FpOp1.sw;
+ __asm__ __volatile__("" : "=m" (FpOp1_sw) : "m" (FpOp1_sw));
+ FpDest = FpOp1_sw;
__asm__ __volatile__("" :: "m" (FpDest));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
vcvtSIntFpDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1P0.sw) : "m" (FpOp1P0.sw));
- double cDest = FpOp1P0.sw;
+ __asm__ __volatile__("" : "=m" (FpOp1P0_sw) : "m" (FpOp1P0_sw));
+ double cDest = FpOp1P0_sw;
__asm__ __volatile__("" :: "m" (cDest));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = dblLow(cDest);
- FpDestP1.uw = dblHi(cDest);
+ FpDestP0_uw = dblLow(cDest);
+ FpDestP1_uw = dblHi(cDest);
FpscrExc = fpscr;
'''
vcvtSIntFpDIop = InstObjParams("vcvt", "VcvtSIntFpD", "FpRegRegOp",
VfpSavedState state = prepFpState(fpscr.rMode);
vfpFlushToZero(fpscr, FpOp1);
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
- FpDest.uw = vfpFpSToFixed(FpOp1, false, false, 0, false);
- __asm__ __volatile__("" :: "m" (FpDest.uw));
+ FpDest_uw = vfpFpSToFixed(FpOp1, false, false, 0, false);
+ __asm__ __volatile__("" :: "m" (FpDest_uw));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
vcvtFpUIntDRCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
uint64_t result = vfpFpDToFixed(cOp1, false, false, 0, false);
__asm__ __volatile__("" :: "m" (result));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = result;
+ FpDestP0_uw = result;
FpscrExc = fpscr;
'''
vcvtFpUIntDRIop = InstObjParams("vcvtr", "VcvtFpUIntDR", "FpRegRegOp",
VfpSavedState state = prepFpState(fpscr.rMode);
vfpFlushToZero(fpscr, FpOp1);
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
- FpDest.sw = vfpFpSToFixed(FpOp1, true, false, 0, false);
- __asm__ __volatile__("" :: "m" (FpDest.sw));
+ FpDest_sw = vfpFpSToFixed(FpOp1, true, false, 0, false);
+ __asm__ __volatile__("" :: "m" (FpDest_sw));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
vcvtFpSIntDRCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
int64_t result = vfpFpDToFixed(cOp1, true, false, 0, false);
__asm__ __volatile__("" :: "m" (result));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = result;
+ FpDestP0_uw = result;
FpscrExc = fpscr;
'''
vcvtFpSIntDRIop = InstObjParams("vcvtr", "VcvtFpSIntDR", "FpRegRegOp",
VfpSavedState state = prepFpState(fpscr.rMode);
fesetround(FeRoundZero);
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
- FpDest.uw = vfpFpSToFixed(FpOp1, false, false, 0);
- __asm__ __volatile__("" :: "m" (FpDest.uw));
+ FpDest_uw = vfpFpSToFixed(FpOp1, false, false, 0);
+ __asm__ __volatile__("" :: "m" (FpDest_uw));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
vcvtFpUIntDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
fesetround(FeRoundZero);
uint64_t result = vfpFpDToFixed(cOp1, false, false, 0);
__asm__ __volatile__("" :: "m" (result));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = result;
+ FpDestP0_uw = result;
FpscrExc = fpscr;
'''
vcvtFpUIntDIop = InstObjParams("vcvt", "VcvtFpUIntD", "FpRegRegOp",
VfpSavedState state = prepFpState(fpscr.rMode);
fesetround(FeRoundZero);
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
- FpDest.sw = vfpFpSToFixed(FpOp1, true, false, 0);
- __asm__ __volatile__("" :: "m" (FpDest.sw));
+ FpDest_sw = vfpFpSToFixed(FpOp1, true, false, 0);
+ __asm__ __volatile__("" :: "m" (FpDest_sw));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
vcvtFpSIntDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
fesetround(FeRoundZero);
int64_t result = vfpFpDToFixed(cOp1, true, false, 0);
__asm__ __volatile__("" :: "m" (result));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = result;
+ FpDestP0_uw = result;
FpscrExc = fpscr;
'''
vcvtFpSIntDIop = InstObjParams("vcvt", "VcvtFpSIntD", "FpRegRegOp",
double cDest = fixFpSFpDDest(FpscrExc, FpOp1);
__asm__ __volatile__("" :: "m" (cDest));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = dblLow(cDest);
- FpDestP1.uw = dblHi(cDest);
+ FpDestP0_uw = dblLow(cDest);
+ FpDestP1_uw = dblHi(cDest);
FpscrExc = fpscr;
'''
vcvtFpSFpDIop = InstObjParams("vcvt", "VcvtFpSFpD", "FpRegRegOp",
vcvtFpDFpSCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
FPSCR fpscr = (FPSCR) FpscrExc;
vfpFlushToZero(fpscr, FpOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1), "=m" (FpDest.uw)
- : "m" (FpOp1), "m" (FpDest.uw));
- FpDest.uw = insertBits(FpDest.uw, 31, 16,,
+ __asm__ __volatile__("" : "=m" (FpOp1), "=m" (FpDest_uw)
+ : "m" (FpOp1), "m" (FpDest_uw));
+ FpDest_uw = insertBits(FpDest_uw, 31, 16,,
vcvtFpSFpH(fpscr, fpscr.fz, fpscr.dn,
fpscr.rMode, fpscr.ahp, FpOp1));
- __asm__ __volatile__("" :: "m" (FpDest.uw));
+ __asm__ __volatile__("" :: "m" (FpDest_uw));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
FPSCR fpscr = (FPSCR) FpscrExc;
vfpFlushToZero(fpscr, FpOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1), "=m" (FpDest.uw)
- : "m" (FpOp1), "m" (FpDest.uw));
- FpDest.uw = insertBits(FpDest.uw, 15, 0,
+ __asm__ __volatile__("" : "=m" (FpOp1), "=m" (FpDest_uw)
+ : "m" (FpOp1), "m" (FpDest_uw));
+ FpDest_uw = insertBits(FpDest_uw, 15, 0,
vcvtFpSFpH(fpscr, fpscr.fz, fpscr.dn,
fpscr.rMode, fpscr.ahp, FpOp1));
- __asm__ __volatile__("" :: "m" (FpDest.uw));
+ __asm__ __volatile__("" :: "m" (FpDest_uw));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
exec_output += PredOpExecute.subst(vcmpSIop);
vcmpDCode = vfpEnabledCheckCode + '''
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
- double cDest = dbl(FpDestP0.uw, FpDestP1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
+ double cDest = dbl(FpDestP0_uw, FpDestP1_uw);
FPSCR fpscr = (FPSCR) FpscrExc;
vfpFlushToZero(fpscr, cDest, cOp1);
if (cDest == cOp1) {
vcmpZeroDCode = vfpEnabledCheckCode + '''
// This only handles imm == 0 for now.
assert(imm == 0);
- double cDest = dbl(FpDestP0.uw, FpDestP1.uw);
+ double cDest = dbl(FpDestP0_uw, FpDestP1_uw);
FPSCR fpscr = (FPSCR) FpscrExc;
vfpFlushToZero(fpscr, cDest);
if (cDest == imm) {
exec_output += PredOpExecute.subst(vcmpeSIop);
vcmpeDCode = vfpEnabledCheckCode + '''
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
- double cDest = dbl(FpDestP0.uw, FpDestP1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
+ double cDest = dbl(FpDestP0_uw, FpDestP1_uw);
FPSCR fpscr = (FPSCR) FpscrExc;
vfpFlushToZero(fpscr, cDest, cOp1);
if (cDest == cOp1) {
exec_output += PredOpExecute.subst(vcmpeZeroSIop);
vcmpeZeroDCode = vfpEnabledCheckCode + '''
- double cDest = dbl(FpDestP0.uw, FpDestP1.uw);
+ double cDest = dbl(FpDestP0_uw, FpDestP1_uw);
FPSCR fpscr = (FPSCR) FpscrExc;
vfpFlushToZero(fpscr, cDest);
if (cDest == imm) {
vfpFlushToZero(fpscr, FpOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
- FpDest.sw = vfpFpSToFixed(FpOp1, true, false, imm);
- __asm__ __volatile__("" :: "m" (FpDest.sw));
+ FpDest_sw = vfpFpSToFixed(FpOp1, true, false, imm);
+ __asm__ __volatile__("" :: "m" (FpDest_sw));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
vcvtFpSFixedDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
uint64_t mid = vfpFpDToFixed(cOp1, true, false, imm);
__asm__ __volatile__("" :: "m" (mid));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = mid;
- FpDestP1.uw = mid >> 32;
+ FpDestP0_uw = mid;
+ FpDestP1_uw = mid >> 32;
FpscrExc = fpscr;
'''
vcvtFpSFixedDIop = InstObjParams("vcvt", "VcvtFpSFixedD", "FpRegRegImmOp",
vfpFlushToZero(fpscr, FpOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
- FpDest.uw = vfpFpSToFixed(FpOp1, false, false, imm);
- __asm__ __volatile__("" :: "m" (FpDest.uw));
+ FpDest_uw = vfpFpSToFixed(FpOp1, false, false, imm);
+ __asm__ __volatile__("" :: "m" (FpDest_uw));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
vcvtFpUFixedDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
uint64_t mid = vfpFpDToFixed(cOp1, false, false, imm);
__asm__ __volatile__("" :: "m" (mid));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = mid;
- FpDestP1.uw = mid >> 32;
+ FpDestP0_uw = mid;
+ FpDestP1_uw = mid >> 32;
FpscrExc = fpscr;
'''
vcvtFpUFixedDIop = InstObjParams("vcvt", "VcvtFpUFixedD", "FpRegRegImmOp",
vcvtSFixedFpSCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1.sw) : "m" (FpOp1.sw));
- FpDest = vfpSFixedToFpS(fpscr.fz, fpscr.dn, FpOp1.sw, false, imm);
+ __asm__ __volatile__("" : "=m" (FpOp1_sw) : "m" (FpOp1_sw));
+ FpDest = vfpSFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_sw, false, imm);
__asm__ __volatile__("" :: "m" (FpDest));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
vcvtSFixedFpDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- uint64_t mid = ((uint64_t)FpOp1P0.uw | ((uint64_t)FpOp1P1.uw << 32));
+ uint64_t mid = ((uint64_t)FpOp1P0_uw | ((uint64_t)FpOp1P1_uw << 32));
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (mid) : "m" (mid));
double cDest = vfpSFixedToFpD(fpscr.fz, fpscr.dn, mid, false, imm);
__asm__ __volatile__("" :: "m" (cDest));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = dblLow(cDest);
- FpDestP1.uw = dblHi(cDest);
+ FpDestP0_uw = dblLow(cDest);
+ FpDestP1_uw = dblHi(cDest);
FpscrExc = fpscr;
'''
vcvtSFixedFpDIop = InstObjParams("vcvt", "VcvtSFixedFpD", "FpRegRegImmOp",
vcvtUFixedFpSCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1.uw) : "m" (FpOp1.uw));
- FpDest = vfpUFixedToFpS(fpscr.fz, fpscr.dn, FpOp1.uw, false, imm);
+ __asm__ __volatile__("" : "=m" (FpOp1_uw) : "m" (FpOp1_uw));
+ FpDest = vfpUFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_uw, false, imm);
__asm__ __volatile__("" :: "m" (FpDest));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
vcvtUFixedFpDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- uint64_t mid = ((uint64_t)FpOp1P0.uw | ((uint64_t)FpOp1P1.uw << 32));
+ uint64_t mid = ((uint64_t)FpOp1P0_uw | ((uint64_t)FpOp1P1_uw << 32));
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (mid) : "m" (mid));
double cDest = vfpUFixedToFpD(fpscr.fz, fpscr.dn, mid, false, imm);
__asm__ __volatile__("" :: "m" (cDest));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = dblLow(cDest);
- FpDestP1.uw = dblHi(cDest);
+ FpDestP0_uw = dblLow(cDest);
+ FpDestP1_uw = dblHi(cDest);
FpscrExc = fpscr;
'''
vcvtUFixedFpDIop = InstObjParams("vcvt", "VcvtUFixedFpD", "FpRegRegImmOp",
vfpFlushToZero(fpscr, FpOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
- FpDest.sh = vfpFpSToFixed(FpOp1, true, true, imm);
- __asm__ __volatile__("" :: "m" (FpDest.sh));
+ FpDest_sh = vfpFpSToFixed(FpOp1, true, true, imm);
+ __asm__ __volatile__("" :: "m" (FpDest_sh));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
vcvtFpSHFixedDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
uint64_t result = vfpFpDToFixed(cOp1, true, true, imm);
__asm__ __volatile__("" :: "m" (result));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = result;
- FpDestP1.uw = result >> 32;
+ FpDestP0_uw = result;
+ FpDestP1_uw = result >> 32;
FpscrExc = fpscr;
'''
vcvtFpSHFixedDIop = InstObjParams("vcvt", "VcvtFpSHFixedD",
vfpFlushToZero(fpscr, FpOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
- FpDest.uh = vfpFpSToFixed(FpOp1, false, true, imm);
- __asm__ __volatile__("" :: "m" (FpDest.uh));
+ FpDest_uh = vfpFpSToFixed(FpOp1, false, true, imm);
+ __asm__ __volatile__("" :: "m" (FpDest_uh));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
'''
vcvtFpUHFixedDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- double cOp1 = dbl(FpOp1P0.uw, FpOp1P1.uw);
+ double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
vfpFlushToZero(fpscr, cOp1);
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
uint64_t mid = vfpFpDToFixed(cOp1, false, true, imm);
__asm__ __volatile__("" :: "m" (mid));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = mid;
- FpDestP1.uw = mid >> 32;
+ FpDestP0_uw = mid;
+ FpDestP1_uw = mid >> 32;
FpscrExc = fpscr;
'''
vcvtFpUHFixedDIop = InstObjParams("vcvt", "VcvtFpUHFixedD",
vcvtSHFixedFpSCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1.sh) : "m" (FpOp1.sh));
- FpDest = vfpSFixedToFpS(fpscr.fz, fpscr.dn, FpOp1.sh, true, imm);
+ __asm__ __volatile__("" : "=m" (FpOp1_sh) : "m" (FpOp1_sh));
+ FpDest = vfpSFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_sh, true, imm);
__asm__ __volatile__("" :: "m" (FpDest));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
vcvtSHFixedFpDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- uint64_t mid = ((uint64_t)FpOp1P0.uw | ((uint64_t)FpOp1P1.uw << 32));
+ uint64_t mid = ((uint64_t)FpOp1P0_uw | ((uint64_t)FpOp1P1_uw << 32));
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (mid) : "m" (mid));
double cDest = vfpSFixedToFpD(fpscr.fz, fpscr.dn, mid, true, imm);
__asm__ __volatile__("" :: "m" (cDest));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = dblLow(cDest);
- FpDestP1.uw = dblHi(cDest);
+ FpDestP0_uw = dblLow(cDest);
+ FpDestP1_uw = dblHi(cDest);
FpscrExc = fpscr;
'''
vcvtSHFixedFpDIop = InstObjParams("vcvt", "VcvtSHFixedFpD",
vcvtUHFixedFpSCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
VfpSavedState state = prepFpState(fpscr.rMode);
- __asm__ __volatile__("" : "=m" (FpOp1.uh) : "m" (FpOp1.uh));
- FpDest = vfpUFixedToFpS(fpscr.fz, fpscr.dn, FpOp1.uh, true, imm);
+ __asm__ __volatile__("" : "=m" (FpOp1_uh) : "m" (FpOp1_uh));
+ FpDest = vfpUFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_uh, true, imm);
__asm__ __volatile__("" :: "m" (FpDest));
finishVfp(fpscr, state, fpscr.fz);
FpscrExc = fpscr;
vcvtUHFixedFpDCode = vfpEnabledCheckCode + '''
FPSCR fpscr = (FPSCR) FpscrExc;
- uint64_t mid = ((uint64_t)FpOp1P0.uw | ((uint64_t)FpOp1P1.uw << 32));
+ uint64_t mid = ((uint64_t)FpOp1P0_uw | ((uint64_t)FpOp1P1_uw << 32));
VfpSavedState state = prepFpState(fpscr.rMode);
__asm__ __volatile__("" : "=m" (mid) : "m" (mid));
double cDest = vfpUFixedToFpD(fpscr.fz, fpscr.dn, mid, true, imm);
__asm__ __volatile__("" :: "m" (cDest));
finishVfp(fpscr, state, fpscr.fz);
- FpDestP0.uw = dblLow(cDest);
- FpDestP1.uw = dblHi(cDest);
+ FpDestP0_uw = dblLow(cDest);
+ FpDestP1_uw = dblHi(cDest);
FpscrExc = fpscr;
'''
vcvtUHFixedFpDIop = InstObjParams("vcvt", "VcvtUHFixedFpD",
cpsr.v = CondCodesV;
cpsr.ge = CondCodesGE;
URc = cpsr;
- URa = cSwap<uint32_t>(Mem.ud, cpsr.e);
- URb = cSwap<uint32_t>(Mem.ud >> 32, cpsr.e);
+ URa = cSwap<uint32_t>(Mem_ud, cpsr.e);
+ URb = cSwap<uint32_t>(Mem_ud >> 32, cpsr.e);
'''
self.codeBlobs["memacc_code"] = accCode
if self.flavor == "dprefetch" or self.flavor == "iprefetch":
accCode = 'uint64_t temp = Mem%s; temp = temp;'
elif self.flavor == "fp":
- accCode = "FpDest.uw = cSwap(Mem%s, ((CPSR)Cpsr).e);\n"
+ accCode = "FpDest_uw = cSwap(Mem%s, ((CPSR)Cpsr).e);\n"
else:
accCode = "IWDest = cSwap(Mem%s, ((CPSR)Cpsr).e);"
accCode = accCode % buildMemSuffix(self.sign, self.size)
if self.flavor != "fp":
accCode = '''
CPSR cpsr = Cpsr;
- Dest = cSwap<uint32_t>(Mem.ud, cpsr.e);
- Dest2 = cSwap<uint32_t>(Mem.ud >> 32, cpsr.e);
+ Dest = cSwap<uint32_t>(Mem_ud, cpsr.e);
+ Dest2 = cSwap<uint32_t>(Mem_ud >> 32, cpsr.e);
'''
else:
accCode = '''
- uint64_t swappedMem = cSwap(Mem.ud, ((CPSR)Cpsr).e);
- FpDest.uw = (uint32_t)swappedMem;
- FpDest2.uw = (uint32_t)(swappedMem >> 32);
+ uint64_t swappedMem = cSwap(Mem_ud, ((CPSR)Cpsr).e);
+ FpDest_uw = (uint32_t)swappedMem;
+ FpDest2_uw = (uint32_t)(swappedMem >> 32);
'''
self.codeBlobs["memacc_code"] = accCode
//
let {{
- microLdrUopCode = "IWRa = cSwap(Mem.uw, ((CPSR)Cpsr).e);"
+ microLdrUopCode = "IWRa = cSwap(Mem_uw, ((CPSR)Cpsr).e);"
microLdrUopIop = InstObjParams('ldr_uop', 'MicroLdrUop',
'MicroMemOp',
{'memacc_code': microLdrUopCode,
'predicate_test': predicateTest},
['IsMicroop'])
- microLdrFpUopCode = "Fa.uw = cSwap(Mem.uw, ((CPSR)Cpsr).e);"
+ microLdrFpUopCode = "Fa_uw = cSwap(Mem_uw, ((CPSR)Cpsr).e);"
microLdrFpUopIop = InstObjParams('ldrfp_uop', 'MicroLdrFpUop',
'MicroMemOp',
{'memacc_code': microLdrFpUopCode,
'predicate_test': predicateTest},
['IsMicroop'])
- microLdrDBFpUopCode = "Fa.uw = cSwap(Mem.uw, ((CPSR)Cpsr).e);"
+ microLdrDBFpUopCode = "Fa_uw = cSwap(Mem_uw, ((CPSR)Cpsr).e);"
microLdrDBFpUopIop = InstObjParams('ldrfp_uop', 'MicroLdrDBFpUop',
'MicroMemOp',
{'memacc_code': microLdrFpUopCode,
'predicate_test': predicateTest},
['IsMicroop'])
- microLdrDTFpUopCode = "Fa.uw = cSwap(Mem.uw, ((CPSR)Cpsr).e);"
+ microLdrDTFpUopCode = "Fa_uw = cSwap(Mem_uw, ((CPSR)Cpsr).e);"
microLdrDTFpUopIop = InstObjParams('ldrfp_uop', 'MicroLdrDTFpUop',
'MicroMemOp',
{'memacc_code': microLdrFpUopCode,
microLdrRetUopIop = InstObjParams('ldr_ret_uop', 'MicroLdrRetUop',
'MicroMemOp',
{'memacc_code':
- microRetUopCode % 'Mem.uw',
+ microRetUopCode % 'Mem_uw',
'ea_code':
'EA = URb + (up ? imm : -imm);',
'predicate_test': condPredicateTest},
['IsMicroop','IsNonSpeculative',
'IsSerializeAfter'])
- microStrUopCode = "Mem = cSwap(URa.uw, ((CPSR)Cpsr).e);"
+ microStrUopCode = "Mem = cSwap(URa_uw, ((CPSR)Cpsr).e);"
microStrUopIop = InstObjParams('str_uop', 'MicroStrUop',
'MicroMemOp',
{'memacc_code': microStrUopCode,
'predicate_test': predicateTest},
['IsMicroop'])
- microStrFpUopCode = "Mem = cSwap(Fa.uw, ((CPSR)Cpsr).e);"
+ microStrFpUopCode = "Mem = cSwap(Fa_uw, ((CPSR)Cpsr).e);"
microStrFpUopIop = InstObjParams('strfp_uop', 'MicroStrFpUop',
'MicroMemOp',
{'memacc_code': microStrFpUopCode,
'predicate_test': predicateTest},
['IsMicroop'])
- microStrDBFpUopCode = "Mem = cSwap(Fa.uw, ((CPSR)Cpsr).e);"
+ microStrDBFpUopCode = "Mem = cSwap(Fa_uw, ((CPSR)Cpsr).e);"
microStrDBFpUopIop = InstObjParams('strfp_uop', 'MicroStrDBFpUop',
'MicroMemOp',
{'memacc_code': microStrFpUopCode,
'predicate_test': predicateTest},
['IsMicroop'])
- microStrDTFpUopCode = "Mem = cSwap(Fa.uw, ((CPSR)Cpsr).e);"
+ microStrDTFpUopCode = "Mem = cSwap(Fa_uw, ((CPSR)Cpsr).e);"
microStrDTFpUopIop = InstObjParams('strfp_uop', 'MicroStrDTFpUop',
'MicroMemOp',
{'memacc_code': microStrFpUopCode,
if reg == regs - 1:
mask = ' & mask(%d)' % (32 - 8 * (regs * 4 - size))
regSetCode += '''
- FpDestP%(reg)d.uw = gtoh(memUnion.floatRegBits[%(reg)d])%(mask)s;
+ FpDestP%(reg)d_uw = gtoh(memUnion.floatRegBits[%(reg)d])%(mask)s;
''' % { "reg" : reg, "mask" : mask }
# Pull everything in from registers
regGetCode = ''
for reg in range(regs):
regGetCode += '''
- memUnion.floatRegBits[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ memUnion.floatRegBits[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
loadMemAccCode = convCode + regSetCode
unloadConv = ''
for dReg in range(dRegs):
loadConv += '''
- conv1.cRegs[%(sReg0)d] = htog(FpOp1P%(sReg0)d.uw);
- conv1.cRegs[%(sReg1)d] = htog(FpOp1P%(sReg1)d.uw);
+ conv1.cRegs[%(sReg0)d] = htog(FpOp1P%(sReg0)d_uw);
+ conv1.cRegs[%(sReg1)d] = htog(FpOp1P%(sReg1)d_uw);
''' % { "sReg0" : (dReg * 2), "sReg1" : (dReg * 2 + 1) }
unloadConv += '''
- FpDestS%(dReg)dP0.uw = gtoh(conv2.cRegs[2 * %(dReg)d + 0]);
- FpDestS%(dReg)dP1.uw = gtoh(conv2.cRegs[2 * %(dReg)d + 1]);
+ FpDestS%(dReg)dP0_uw = gtoh(conv2.cRegs[2 * %(dReg)d + 0]);
+ FpDestS%(dReg)dP1_uw = gtoh(conv2.cRegs[2 * %(dReg)d + 1]);
''' % { "dReg" : dReg }
microDeintNeonCode = '''
const unsigned dRegs = %(dRegs)d;
unloadConv = ''
for dReg in range(dRegs):
loadConv += '''
- conv1.cRegs[2 * %(dReg)d + 0] = htog(FpOp1S%(dReg)dP0.uw);
- conv1.cRegs[2 * %(dReg)d + 1] = htog(FpOp1S%(dReg)dP1.uw);
+ conv1.cRegs[2 * %(dReg)d + 0] = htog(FpOp1S%(dReg)dP0_uw);
+ conv1.cRegs[2 * %(dReg)d + 1] = htog(FpOp1S%(dReg)dP1_uw);
''' % { "dReg" : dReg }
unloadConv += '''
- FpDestP%(sReg0)d.uw = gtoh(conv2.cRegs[%(sReg0)d]);
- FpDestP%(sReg1)d.uw = gtoh(conv2.cRegs[%(sReg1)d]);
+ FpDestP%(sReg0)d_uw = gtoh(conv2.cRegs[%(sReg0)d]);
+ FpDestP%(sReg1)d_uw = gtoh(conv2.cRegs[%(sReg1)d]);
''' % { "sReg0" : (dReg * 2), "sReg1" : (dReg * 2 + 1) }
microInterNeonCode = '''
const unsigned dRegs = %(dRegs)d;
baseLoadRegs = ''
for reg in range(sRegs):
baseLoadRegs += '''
- sourceRegs.fRegs[%(reg0)d] = htog(FpOp1P%(reg0)d.uw);
- sourceRegs.fRegs[%(reg1)d] = htog(FpOp1P%(reg1)d.uw);
+ sourceRegs.fRegs[%(reg0)d] = htog(FpOp1P%(reg0)d_uw);
+ sourceRegs.fRegs[%(reg1)d] = htog(FpOp1P%(reg1)d_uw);
''' % { "reg0" : (2 * reg + 0),
"reg1" : (2 * reg + 1) }
for dRegs in range(sRegs, 5):
loadRegs = baseLoadRegs
for reg in range(dRegs):
loadRegs += '''
- destRegs[%(reg)d].fRegs[0] = htog(FpDestS%(reg)dP0.uw);
- destRegs[%(reg)d].fRegs[1] = htog(FpDestS%(reg)dP1.uw);
+ destRegs[%(reg)d].fRegs[0] = htog(FpDestS%(reg)dP0_uw);
+ destRegs[%(reg)d].fRegs[1] = htog(FpDestS%(reg)dP1_uw);
''' % { "reg" : reg }
unloadRegs += '''
- FpDestS%(reg)dP0.uw = gtoh(destRegs[%(reg)d].fRegs[0]);
- FpDestS%(reg)dP1.uw = gtoh(destRegs[%(reg)d].fRegs[1]);
+ FpDestS%(reg)dP0_uw = gtoh(destRegs[%(reg)d].fRegs[0]);
+ FpDestS%(reg)dP1_uw = gtoh(destRegs[%(reg)d].fRegs[1]);
''' % { "reg" : reg }
microUnpackNeonCode = '''
const unsigned perDReg = (2 * sizeof(FloatRegBits)) /
loadRegs = ''
for reg in range(sRegs):
loadRegs += '''
- sourceRegs.fRegs[%(reg0)d] = htog(FpOp1P%(reg0)d.uw);
- sourceRegs.fRegs[%(reg1)d] = htog(FpOp1P%(reg1)d.uw);
+ sourceRegs.fRegs[%(reg0)d] = htog(FpOp1P%(reg0)d_uw);
+ sourceRegs.fRegs[%(reg1)d] = htog(FpOp1P%(reg1)d_uw);
''' % { "reg0" : (2 * reg + 0),
"reg1" : (2 * reg + 1) }
for dRegs in range(sRegs, 5):
unloadRegs = ''
for reg in range(dRegs):
unloadRegs += '''
- FpDestS%(reg)dP0.uw = gtoh(destRegs[%(reg)d].fRegs[0]);
- FpDestS%(reg)dP1.uw = gtoh(destRegs[%(reg)d].fRegs[1]);
+ FpDestS%(reg)dP0_uw = gtoh(destRegs[%(reg)d].fRegs[0]);
+ FpDestS%(reg)dP1_uw = gtoh(destRegs[%(reg)d].fRegs[1]);
''' % { "reg" : reg }
microUnpackAllNeonCode = '''
const unsigned perDReg = (2 * sizeof(FloatRegBits)) /
unloadRegs = ''
for reg in range(dRegs):
unloadRegs += '''
- FpDestP%(reg0)d.uw = gtoh(destRegs.fRegs[%(reg0)d]);
- FpDestP%(reg1)d.uw = gtoh(destRegs.fRegs[%(reg1)d]);
+ FpDestP%(reg0)d_uw = gtoh(destRegs.fRegs[%(reg0)d]);
+ FpDestP%(reg1)d_uw = gtoh(destRegs.fRegs[%(reg1)d]);
''' % { "reg0" : (2 * reg + 0),
"reg1" : (2 * reg + 1) }
for sRegs in range(dRegs, 5):
loadRegs = ''
for reg in range(sRegs):
loadRegs += '''
- sourceRegs[%(reg)d].fRegs[0] = htog(FpOp1S%(reg)dP0.uw);
- sourceRegs[%(reg)d].fRegs[1] = htog(FpOp1S%(reg)dP1.uw);
+ sourceRegs[%(reg)d].fRegs[0] = htog(FpOp1S%(reg)dP0_uw);
+ sourceRegs[%(reg)d].fRegs[1] = htog(FpOp1S%(reg)dP1_uw);
''' % { "reg" : reg }
microPackNeonCode = '''
const unsigned perDReg = (2 * sizeof(FloatRegBits)) /
memSuffix = ''
elif size == 2:
if sign:
- memSuffix = '.sh'
+ memSuffix = '_sh'
else:
- memSuffix = '.uh'
+ memSuffix = '_uh'
elif size == 1:
if sign:
- memSuffix = '.sb'
+ memSuffix = '_sb'
else:
- memSuffix = '.ub'
+ memSuffix = '_ub'
else:
raise Exception, "Unrecognized size for access %d" % size
sxtbIop = InstObjParams("sxtb", "Sxtb", "RegImmRegOp",
{ "code":
- "Dest = sext<8>((uint8_t)(Op1.ud >> imm));",
+ "Dest = sext<8>((uint8_t)(Op1_ud >> imm));",
"predicate_test": predicateTest }, [])
header_output += RegImmRegOpDeclare.subst(sxtbIop)
decoder_output += RegImmRegOpConstructor.subst(sxtbIop)
sxtabIop = InstObjParams("sxtab", "Sxtab", "RegRegRegImmOp",
{ "code":
'''
- Dest = sext<8>((uint8_t)(Op2.ud >> imm)) +
+ Dest = sext<8>((uint8_t)(Op2_ud >> imm)) +
Op1;
''',
"predicate_test": predicateTest }, [])
exec_output += PredOpExecute.subst(sxtahIop)
uxtbIop = InstObjParams("uxtb", "Uxtb", "RegImmRegOp",
- { "code": "Dest = (uint8_t)(Op1.ud >> imm);",
+ { "code": "Dest = (uint8_t)(Op1_ud >> imm);",
"predicate_test": predicateTest }, [])
header_output += RegImmRegOpDeclare.subst(uxtbIop)
decoder_output += RegImmRegOpConstructor.subst(uxtbIop)
uxtabIop = InstObjParams("uxtab", "Uxtab", "RegRegRegImmOp",
{ "code":
- "Dest = (uint8_t)(Op2.ud >> imm) + Op1;",
+ "Dest = (uint8_t)(Op2_ud >> imm) + Op1;",
"predicate_test": predicateTest }, [])
header_output += RegRegRegImmOpDeclare.subst(uxtabIop)
decoder_output += RegRegRegImmOpConstructor.subst(uxtabIop)
buildMult3Inst ("mul", "Reg0 = resTemp = Reg1 * Reg2;")
buildMult4InstCc ("smlabb", '''Reg0 = resTemp =
sext<16>(bits(Reg1, 15, 0)) *
- sext<16>(bits(Reg2.sw, 15, 0)) +
- Reg3.sw;
+ sext<16>(bits(Reg2_sw, 15, 0)) +
+ Reg3_sw;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
buildMult4InstCc ("smlabt", '''Reg0 = resTemp =
sext<16>(bits(Reg1, 15, 0)) *
- sext<16>(bits(Reg2.sw, 31, 16)) +
- Reg3.sw;
+ sext<16>(bits(Reg2_sw, 31, 16)) +
+ Reg3_sw;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
buildMult4InstCc ("smlatb", '''Reg0 = resTemp =
sext<16>(bits(Reg1, 31, 16)) *
- sext<16>(bits(Reg2.sw, 15, 0)) +
- Reg3.sw;
+ sext<16>(bits(Reg2_sw, 15, 0)) +
+ Reg3_sw;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
buildMult4InstCc ("smlatt", '''Reg0 = resTemp =
sext<16>(bits(Reg1, 31, 16)) *
- sext<16>(bits(Reg2.sw, 31, 16)) +
- Reg3.sw;
+ sext<16>(bits(Reg2_sw, 31, 16)) +
+ Reg3_sw;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
sext<16>(bits(Reg2, 31, 16)) +
sext<16>(bits(Reg1, 15, 0)) *
sext<16>(bits(Reg2, 15, 0)) +
- Reg3.sw;
+ Reg3_sw;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
sext<16>(bits(Reg2, 15, 0)) +
sext<16>(bits(Reg1, 15, 0)) *
sext<16>(bits(Reg2, 31, 16)) +
- Reg3.sw;
+ Reg3_sw;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
buildMult4Inst ("smlal", '''resTemp = sext<32>(Reg2) * sext<32>(Reg3) +
- (int64_t)((Reg1.ud << 32) | Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) | Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''', "llbit")
buildMult4InstUnCc("smlalbb", '''resTemp = sext<16>(bits(Reg2, 15, 0)) *
sext<16>(bits(Reg3, 15, 0)) +
- (int64_t)((Reg1.ud << 32) |
- Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) |
+ Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''')
buildMult4InstUnCc("smlalbt", '''resTemp = sext<16>(bits(Reg2, 15, 0)) *
sext<16>(bits(Reg3, 31, 16)) +
- (int64_t)((Reg1.ud << 32) |
- Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) |
+ Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''')
buildMult4InstUnCc("smlaltb", '''resTemp = sext<16>(bits(Reg2, 31, 16)) *
sext<16>(bits(Reg3, 15, 0)) +
- (int64_t)((Reg1.ud << 32) |
- Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) |
+ Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''')
buildMult4InstUnCc("smlaltt", '''resTemp = sext<16>(bits(Reg2, 31, 16)) *
sext<16>(bits(Reg3, 31, 16)) +
- (int64_t)((Reg1.ud << 32) |
- Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) |
+ Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''')
buildMult4InstUnCc("smlald", '''resTemp =
sext<16>(bits(Reg2, 31, 16)) *
sext<16>(bits(Reg3, 31, 16)) +
sext<16>(bits(Reg2, 15, 0)) *
sext<16>(bits(Reg3, 15, 0)) +
- (int64_t)((Reg1.ud << 32) |
- Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) |
+ Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''')
buildMult4InstUnCc("smlaldx", '''resTemp =
sext<16>(bits(Reg2, 31, 16)) *
sext<16>(bits(Reg3, 15, 0)) +
sext<16>(bits(Reg2, 15, 0)) *
sext<16>(bits(Reg3, 31, 16)) +
- (int64_t)((Reg1.ud << 32) |
- Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) |
+ Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''')
buildMult4InstCc ("smlawb", '''Reg0 = resTemp =
- (Reg1.sw *
+ (Reg1_sw *
sext<16>(bits(Reg2, 15, 0)) +
- ((int64_t)Reg3.sw << 16)) >> 16;
+ ((int64_t)Reg3_sw << 16)) >> 16;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
buildMult4InstCc ("smlawt", '''Reg0 = resTemp =
- (Reg1.sw *
+ (Reg1_sw *
sext<16>(bits(Reg2, 31, 16)) +
- ((int64_t)Reg3.sw << 16)) >> 16;
+ ((int64_t)Reg3_sw << 16)) >> 16;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
sext<16>(bits(Reg2, 15, 0)) -
sext<16>(bits(Reg1, 31, 16)) *
sext<16>(bits(Reg2, 31, 16)) +
- Reg3.sw;
+ Reg3_sw;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
sext<16>(bits(Reg2, 31, 16)) -
sext<16>(bits(Reg1, 31, 16)) *
sext<16>(bits(Reg2, 15, 0)) +
- Reg3.sw;
+ Reg3_sw;
resTemp = bits(resTemp, 32) !=
bits(resTemp, 31);
''', "overflow")
sext<16>(bits(Reg3, 15, 0)) -
sext<16>(bits(Reg2, 31, 16)) *
sext<16>(bits(Reg3, 31, 16)) +
- (int64_t)((Reg1.ud << 32) |
- Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) |
+ Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''')
buildMult4InstUnCc("smlsldx", '''resTemp =
sext<16>(bits(Reg2, 15, 0)) *
sext<16>(bits(Reg3, 31, 16)) -
sext<16>(bits(Reg2, 31, 16)) *
sext<16>(bits(Reg3, 15, 0)) +
- (int64_t)((Reg1.ud << 32) |
- Reg0.ud);
- Reg0.ud = (uint32_t)resTemp;
- Reg1.ud = (uint32_t)(resTemp >> 32);
+ (int64_t)((Reg1_ud << 32) |
+ Reg0_ud);
+ Reg0_ud = (uint32_t)resTemp;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
''')
buildMult4InstUnCc("smmla", '''Reg0 = resTemp =
- ((int64_t)(Reg3.ud << 32) +
- (int64_t)Reg1.sw *
- (int64_t)Reg2.sw) >> 32;
+ ((int64_t)(Reg3_ud << 32) +
+ (int64_t)Reg1_sw *
+ (int64_t)Reg2_sw) >> 32;
''')
buildMult4InstUnCc("smmlar", '''Reg0 = resTemp =
- ((int64_t)(Reg3.ud << 32) +
- (int64_t)Reg1.sw *
- (int64_t)Reg2.sw +
+ ((int64_t)(Reg3_ud << 32) +
+ (int64_t)Reg1_sw *
+ (int64_t)Reg2_sw +
ULL(0x80000000)) >> 32;
''')
buildMult4InstUnCc("smmls", '''Reg0 = resTemp =
- ((int64_t)(Reg3.ud << 32) -
- (int64_t)Reg1.sw *
- (int64_t)Reg2.sw) >> 32;
+ ((int64_t)(Reg3_ud << 32) -
+ (int64_t)Reg1_sw *
+ (int64_t)Reg2_sw) >> 32;
''')
buildMult4InstUnCc("smmlsr", '''Reg0 = resTemp =
- ((int64_t)(Reg3.ud << 32) -
- (int64_t)Reg1.sw *
- (int64_t)Reg2.sw +
+ ((int64_t)(Reg3_ud << 32) -
+ (int64_t)Reg1_sw *
+ (int64_t)Reg2_sw +
ULL(0x80000000)) >> 32;
''')
buildMult3InstUnCc("smmul", '''Reg0 = resTemp =
- ((int64_t)Reg1.sw *
- (int64_t)Reg2.sw) >> 32;
+ ((int64_t)Reg1_sw *
+ (int64_t)Reg2_sw) >> 32;
''')
buildMult3InstUnCc("smmulr", '''Reg0 = resTemp =
- ((int64_t)Reg1.sw *
- (int64_t)Reg2.sw +
+ ((int64_t)Reg1_sw *
+ (int64_t)Reg2_sw +
ULL(0x80000000)) >> 32;
''')
buildMult3InstCc ("smuad", '''Reg0 = resTemp =
sext<16>(bits(Reg1, 31, 16)) *
sext<16>(bits(Reg2, 31, 16));
''')
- buildMult4Inst ("smull", '''resTemp = (int64_t)Reg2.sw *
- (int64_t)Reg3.sw;
+ buildMult4Inst ("smull", '''resTemp = (int64_t)Reg2_sw *
+ (int64_t)Reg3_sw;
Reg1 = (int32_t)(resTemp >> 32);
Reg0 = (int32_t)resTemp;
''', "llbit")
buildMult3InstUnCc("smulwb", '''Reg0 = resTemp =
- (Reg1.sw *
+ (Reg1_sw *
sext<16>(bits(Reg2, 15, 0))) >> 16;
''')
buildMult3InstUnCc("smulwt", '''Reg0 = resTemp =
- (Reg1.sw *
+ (Reg1_sw *
sext<16>(bits(Reg2, 31, 16))) >> 16;
''')
buildMult3InstUnCc("smusd", '''Reg0 = resTemp =
sext<16>(bits(Reg1, 31, 16)) *
sext<16>(bits(Reg2, 15, 0));
''')
- buildMult4InstUnCc("umaal", '''resTemp = Reg2.ud * Reg3.ud +
- Reg0.ud + Reg1.ud;
- Reg1.ud = (uint32_t)(resTemp >> 32);
- Reg0.ud = (uint32_t)resTemp;
+ buildMult4InstUnCc("umaal", '''resTemp = Reg2_ud * Reg3_ud +
+ Reg0_ud + Reg1_ud;
+ Reg1_ud = (uint32_t)(resTemp >> 32);
+ Reg0_ud = (uint32_t)resTemp;
''')
- buildMult4Inst ("umlal", '''resTemp = Reg2.ud * Reg3.ud + Reg0.ud +
- (Reg1.ud << 32);
- Reg1.ud = (uint32_t)(resTemp >> 32);
- Reg0.ud = (uint32_t)resTemp;
+ buildMult4Inst ("umlal", '''resTemp = Reg2_ud * Reg3_ud + Reg0_ud +
+ (Reg1_ud << 32);
+ Reg1_ud = (uint32_t)(resTemp >> 32);
+ Reg0_ud = (uint32_t)resTemp;
''', "llbit")
- buildMult4Inst ("umull", '''resTemp = Reg2.ud * Reg3.ud;
+ buildMult4Inst ("umull", '''resTemp = Reg2_ud * Reg3_ud;
Reg1 = (uint32_t)(resTemp >> 32);
Reg0 = (uint32_t)resTemp;
''', "llbit")
'''
for reg in range(rCount):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
- srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
+ srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegRegOp",
for reg in range(rCount):
if toInt:
eWalkCode += '''
- FpDestP%(reg)d.uw = destRegs.regs[%(reg)d];
+ FpDestP%(reg)d_uw = destRegs.regs[%(reg)d];
''' % { "reg" : reg }
else:
eWalkCode += '''
''' % (src1Prefix, src2Prefix, destPrefix)
for reg in range(src1Cnt):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
for reg in range(src2Cnt):
eWalkCode += '''
- srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d.uw);
+ srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
for reg in range(destCnt):
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
"destPrefix" : destPrefix }
for reg in range(destCnt):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegRegOp",
'''
for reg in range(rCount):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
- srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
+ srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegRegImmOp",
'''
for reg in range(rCount):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
- srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d.uw);;
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
+ srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d_uw);;
''' % { "reg" : reg }
if readDest:
for reg in range(2 * rCount):
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(2 * rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegRegImmOp",
'''
for reg in range(rCount):
eWalkCode += '''
- srcRegs1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcRegs1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
eWalkCode += '''
- destRegs.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destRegs.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
"writeDest" : writeDestCode }
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destRegs.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destRegs.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegImmOp",
'''
for reg in range(4):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
for reg in range(2):
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(2):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegImmOp",
'''
for reg in range(2):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
for reg in range(4):
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(4):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegImmOp",
'''
for reg in range(rCount):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegOp",
'''
for reg in range(rCount):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegImmOp",
'''
for reg in range(rCount):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
eWalkCode += '''
eWalkCode += op
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
- FpOp1P%(reg)d.uw = gtoh(srcReg1.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
+ FpOp1P%(reg)d_uw = gtoh(srcReg1.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegOp",
for reg in range(rCount):
if toInt:
eWalkCode += '''
- FpDestP%(reg)d.uw = destRegs.regs[%(reg)d];
+ FpDestP%(reg)d_uw = destRegs.regs[%(reg)d];
''' % { "reg" : reg }
else:
eWalkCode += '''
'''
for reg in range(rCount):
eWalkCode += '''
- srcRegs.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcRegs.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegOp",
'''
for reg in range(4):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
for reg in range(2):
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(2):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegOp",
if readDest:
for reg in range(rCount):
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegImmOp",
'''
for reg in range(2):
eWalkCode += '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
''' % { "reg" : reg }
if readDest:
for reg in range(4):
eWalkCode += '''
- destReg.regs[%(reg)d] = htog(FpDestP%(reg)d.uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
''' % { "reg" : reg }
readDestCode = ''
if readDest:
''' % { "op" : op, "readDest" : readDestCode }
for reg in range(4):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegOp",
'''
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegOp",
'''
for reg in range(rCount):
eWalkCode += simdEnabledCheckCode + '''
- srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);
- srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d.uw);
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
+ srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d_uw);
''' % { "reg" : reg }
eWalkCode += op
for reg in range(rCount):
eWalkCode += '''
- FpDestP%(reg)d.uw = gtoh(destReg.regs[%(reg)d]);
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
''' % { "reg" : reg }
iop = InstObjParams(name, Name,
"RegRegRegImmOp",
const unsigned length = %(length)d;
const bool isVtbl = %(isVtbl)s;
- srcReg2.regs[0] = htog(FpOp2P0.uw);
- srcReg2.regs[1] = htog(FpOp2P1.uw);
+ srcReg2.regs[0] = htog(FpOp2P0_uw);
+ srcReg2.regs[1] = htog(FpOp2P1_uw);
- destReg.regs[0] = htog(FpDestP0.uw);
- destReg.regs[1] = htog(FpDestP1.uw);
+ destReg.regs[0] = htog(FpDestP0_uw);
+ destReg.regs[1] = htog(FpDestP1_uw);
''' % { "length" : length, "isVtbl" : isVtbl }
for reg in range(8):
if reg < length * 2:
- code += 'table.regs[%(reg)d] = htog(FpOp1P%(reg)d.uw);\n' % \
+ code += 'table.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);\n' % \
{ "reg" : reg }
else:
code += 'table.regs[%(reg)d] = 0;\n' % { "reg" : reg }
}
}
- FpDestP0.uw = gtoh(destReg.regs[0]);
- FpDestP1.uw = gtoh(destReg.regs[1]);
+ FpDestP0_uw = gtoh(destReg.regs[0]);
+ FpDestP1_uw = gtoh(destReg.regs[1]);
'''
iop = InstObjParams(name, Name,
"RegRegRegOp",
wbDiff = 8
accCode = '''
CPSR cpsr = Cpsr;
- Mem.ud = (uint64_t)cSwap(LR.uw, cpsr.e) |
- ((uint64_t)cSwap(Spsr.uw, cpsr.e) << 32);
+ Mem_ud = (uint64_t)cSwap(LR_uw, cpsr.e) |
+ ((uint64_t)cSwap(Spsr_uw, cpsr.e) << 32);
'''
global header_output, decoder_output, exec_output
# Code that actually handles the access
if self.flavor == "fp":
- accCode = 'Mem%(suffix)s = cSwap(FpDest.uw, ((CPSR)Cpsr).e);'
+ accCode = 'Mem%(suffix)s = cSwap(FpDest_uw, ((CPSR)Cpsr).e);'
else:
accCode = \
'Mem%(suffix)s = cSwap(Dest%(suffix)s, ((CPSR)Cpsr).e);'
# Code that actually handles the access
if self.flavor == "fp":
accCode = '''
- uint64_t swappedMem = (uint64_t)FpDest.uw |
- ((uint64_t)FpDest2.uw << 32);
- Mem.ud = cSwap(swappedMem, ((CPSR)Cpsr).e);
+ uint64_t swappedMem = (uint64_t)FpDest_uw |
+ ((uint64_t)FpDest2_uw << 32);
+ Mem_ud = cSwap(swappedMem, ((CPSR)Cpsr).e);
'''
else:
accCode = '''
CPSR cpsr = Cpsr;
- Mem.ud = (uint64_t)cSwap(Dest.uw, cpsr.e) |
- ((uint64_t)cSwap(Dest2.uw, cpsr.e) << 32);
+ Mem_ud = (uint64_t)cSwap(Dest_uw, cpsr.e) |
+ ((uint64_t)cSwap(Dest2_uw, cpsr.e) << 32);
'''
self.codeBlobs["memacc_code"] = accCode
'''
SwapInst('swp', 'Swp', 'EA = Base;',
- swpPreAccCode + 'Mem = cSwap(Op1.uw, ((CPSR)Cpsr).e);',
+ swpPreAccCode + 'Mem = cSwap(Op1_uw, ((CPSR)Cpsr).e);',
'Dest = cSwap((uint32_t)memData, ((CPSR)Cpsr).e);',
['Request::MEM_SWAP',
'ArmISA::TLB::AlignWord',
['IsStoreConditional']).emit()
SwapInst('swpb', 'Swpb', 'EA = Base;',
- swpPreAccCode + 'Mem.ub = cSwap(Op1.ub, ((CPSR)Cpsr).e);',
- 'Dest.ub = cSwap((uint8_t)memData, ((CPSR)Cpsr).e);',
+ swpPreAccCode + 'Mem_ub = cSwap(Op1_ub, ((CPSR)Cpsr).e);',
+ 'Dest_ub = cSwap((uint8_t)memData, ((CPSR)Cpsr).e);',
['Request::MEM_SWAP',
'ArmISA::TLB::AlignByte',
'ArmISA::TLB::MustBeOne'],
# Define operand variables.
operands = user_dict.keys()
+ extensions = self.operandTypeMap.keys()
- operandsREString = (r'''
- (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
- ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
- (?![\w\.]) # neg. lookahead assertion: prevent partial matches
- '''
- % string.join(operands, '|'))
+ operandsREString = r'''
+ (?<!\w) # neg. lookbehind assertion: prevent partial matches
+ ((%s)(?:_(%s))?) # match: operand with optional '_' then suffix
+ (?!\w) # neg. lookahead assertion: prevent partial matches
+ ''' % (string.join(operands, '|'), string.join(extensions, '|'))
self.operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
# Same as operandsREString, but extension is mandatory, and only two
# groups are returned (base and ext, not full name as above).
# Used for subtituting '_' for '.' to make C++ identifiers.
- operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
- % string.join(operands, '|'))
+ operandsWithExtREString = r'(?<!\w)(%s)_(%s)(?!\w)' \
+ % (string.join(operands, '|'), string.join(extensions, '|'))
self.operandsWithExtRE = \
re.compile(operandsWithExtREString, re.MULTILINE)
0x1: ssnop({{;}});
0x3: ehb({{;}});
}
- default: sll({{ Rd = Rt.uw << SA; }});
+ default: sll({{ Rd = Rt_uw << SA; }});
}
}
0x2: decode RS_SRL {
0x0:decode SRL {
- 0: srl({{ Rd = Rt.uw >> SA; }});
+ 0: srl({{ Rd = Rt_uw >> SA; }});
//Hardcoded assuming 32-bit ISA,
//probably need parameter here
1: rotr({{
- Rd = (Rt.uw << (32 - SA)) | (Rt.uw >> SA);
+ Rd = (Rt_uw << (32 - SA)) | (Rt_uw >> SA);
}});
}
}
}});
}
- 0x4: sllv({{ Rd = Rt.uw << Rs<4:0>; }});
+ 0x4: sllv({{ Rd = Rt_uw << Rs<4:0>; }});
0x6: decode SRLV {
- 0: srlv({{ Rd = Rt.uw >> Rs<4:0>; }});
+ 0: srlv({{ Rd = Rt_uw >> Rs<4:0>; }});
//Hardcoded assuming 32-bit ISA,
//probably need parameter here
1: rotrv({{
- Rd = (Rt.uw << (32 - Rs<4:0>)) |
- (Rt.uw >> Rs<4:0>);
+ Rd = (Rt_uw << (32 - Rs<4:0>)) |
+ (Rt_uw >> Rs<4:0>);
}});
}
0x3: decode FUNCTION_LO {
format HiLoRdSelValOp {
- 0x0: mult({{ val = Rs.sd * Rt.sd; }}, IntMultOp);
- 0x1: multu({{ val = Rs.ud * Rt.ud; }}, IntMultOp);
+ 0x0: mult({{ val = Rs_sd * Rt_sd; }}, IntMultOp);
+ 0x1: multu({{ val = Rs_ud * Rt_ud; }}, IntMultOp);
}
format HiLoOp {
0x2: div({{
- if (Rt.sd != 0) {
- HI0 = Rs.sd % Rt.sd;
- LO0 = Rs.sd / Rt.sd;
+ if (Rt_sd != 0) {
+ HI0 = Rs_sd % Rt_sd;
+ LO0 = Rs_sd / Rt_sd;
}
}}, IntDivOp);
0x3: divu({{
- if (Rt.ud != 0) {
- HI0 = Rs.ud % Rt.ud;
- LO0 = Rs.ud / Rt.ud;
+ if (Rt_ud != 0) {
+ HI0 = Rs_ud % Rt_ud;
+ LO0 = Rs_ud / Rt_ud;
}
}}, IntDivOp);
}
fault = new IntegerOverflowFault();
}
}});
- 0x1: addu({{ Rd.sw = Rs.sw + Rt.sw;}});
+ 0x1: addu({{ Rd_sw = Rs_sw + Rt_sw;}});
0x2: sub({{
IntReg result;
Rd = result = Rs - Rt;
fault = new IntegerOverflowFault();
}
}});
- 0x3: subu({{ Rd.sw = Rs.sw - Rt.sw; }});
+ 0x3: subu({{ Rd_sw = Rs_sw - Rt_sw; }});
0x4: and({{ Rd = Rs & Rt; }});
0x5: or({{ Rd = Rs | Rt; }});
0x6: xor({{ Rd = Rs ^ Rt; }});
0x5: decode HINT {
0x0: decode FUNCTION_LO {
format IntOp{
- 0x2: slt({{ Rd.sw = (Rs.sw < Rt.sw) ? 1 : 0 }});
- 0x3: sltu({{ Rd.uw = (Rs.uw < Rt.uw) ? 1 : 0 }});
+ 0x2: slt({{ Rd_sw = (Rs_sw < Rt_sw) ? 1 : 0 }});
+ 0x3: sltu({{ Rd_uw = (Rs_uw < Rt_uw) ? 1 : 0 }});
}
}
}
0x6: decode FUNCTION_LO {
format Trap {
- 0x0: tge({{ cond = (Rs.sw >= Rt.sw); }});
- 0x1: tgeu({{ cond = (Rs.uw >= Rt.uw); }});
- 0x2: tlt({{ cond = (Rs.sw < Rt.sw); }});
- 0x3: tltu({{ cond = (Rs.uw < Rt.uw); }});
- 0x4: teq({{ cond = (Rs.sw == Rt.sw); }});
- 0x6: tne({{ cond = (Rs.sw != Rt.sw); }});
+ 0x0: tge({{ cond = (Rs_sw >= Rt_sw); }});
+ 0x1: tgeu({{ cond = (Rs_uw >= Rt_uw); }});
+ 0x2: tlt({{ cond = (Rs_sw < Rt_sw); }});
+ 0x3: tltu({{ cond = (Rs_uw < Rt_uw); }});
+ 0x4: teq({{ cond = (Rs_sw == Rt_sw); }});
+ 0x6: tne({{ cond = (Rs_sw != Rt_sw); }});
}
}
}
0x1: decode REGIMM_HI {
0x0: decode REGIMM_LO {
format Branch {
- 0x0: bltz({{ cond = (Rs.sw < 0); }});
- 0x1: bgez({{ cond = (Rs.sw >= 0); }});
- 0x2: bltzl({{ cond = (Rs.sw < 0); }}, Likely);
- 0x3: bgezl({{ cond = (Rs.sw >= 0); }}, Likely);
+ 0x0: bltz({{ cond = (Rs_sw < 0); }});
+ 0x1: bgez({{ cond = (Rs_sw >= 0); }});
+ 0x2: bltzl({{ cond = (Rs_sw < 0); }}, Likely);
+ 0x3: bgezl({{ cond = (Rs_sw >= 0); }}, Likely);
}
}
0x1: decode REGIMM_LO {
format TrapImm {
- 0x0: tgei( {{ cond = (Rs.sw >= (int16_t)INTIMM); }});
+ 0x0: tgei( {{ cond = (Rs_sw >= (int16_t)INTIMM); }});
0x1: tgeiu({{
- cond = (Rs.uw >= (uint32_t)(int32_t)(int16_t)INTIMM);
+ cond = (Rs_uw >= (uint32_t)(int32_t)(int16_t)INTIMM);
}});
- 0x2: tlti( {{ cond = (Rs.sw < (int16_t)INTIMM); }});
+ 0x2: tlti( {{ cond = (Rs_sw < (int16_t)INTIMM); }});
0x3: tltiu({{
- cond = (Rs.uw < (uint32_t)(int32_t)(int16_t)INTIMM);
+ cond = (Rs_uw < (uint32_t)(int32_t)(int16_t)INTIMM);
}});
- 0x4: teqi( {{ cond = (Rs.sw == (int16_t)INTIMM); }});
- 0x6: tnei( {{ cond = (Rs.sw != (int16_t)INTIMM); }});
+ 0x4: teqi( {{ cond = (Rs_sw == (int16_t)INTIMM); }});
+ 0x6: tnei( {{ cond = (Rs_sw != (int16_t)INTIMM); }});
}
}
0x2: decode REGIMM_LO {
format Branch {
- 0x0: bltzal({{ cond = (Rs.sw < 0); }}, Link);
+ 0x0: bltzal({{ cond = (Rs_sw < 0); }}, Link);
0x1: decode RS {
0x0: bal ({{ cond = 1; }}, IsCall, Link);
- default: bgezal({{ cond = (Rs.sw >= 0); }}, Link);
+ default: bgezal({{ cond = (Rs_sw >= 0); }}, Link);
}
- 0x2: bltzall({{ cond = (Rs.sw < 0); }}, Link, Likely);
- 0x3: bgezall({{ cond = (Rs.sw >= 0); }}, Link, Likely);
+ 0x2: bltzall({{ cond = (Rs_sw < 0); }}, Link, Likely);
+ 0x3: bgezall({{ cond = (Rs_sw >= 0); }}, Link, Likely);
}
}
format Branch {
0x4: decode RS_RT {
0x0: b({{ cond = 1; }});
- default: beq({{ cond = (Rs.sw == Rt.sw); }});
+ default: beq({{ cond = (Rs_sw == Rt_sw); }});
}
- 0x5: bne({{ cond = (Rs.sw != Rt.sw); }});
- 0x6: blez({{ cond = (Rs.sw <= 0); }});
- 0x7: bgtz({{ cond = (Rs.sw > 0); }});
+ 0x5: bne({{ cond = (Rs_sw != Rt_sw); }});
+ 0x6: blez({{ cond = (Rs_sw <= 0); }});
+ 0x7: bgtz({{ cond = (Rs_sw > 0); }});
}
}
fault = new IntegerOverflowFault();
}
}});
- 0x1: addiu({{ Rt.sw = Rs.sw + imm; }});
- 0x2: slti({{ Rt.sw = (Rs.sw < imm) ? 1 : 0 }});
- 0x3: sltiu({{ Rt.uw = (Rs.uw < (uint32_t)sextImm) ? 1 : 0;}});
- 0x4: andi({{ Rt.sw = Rs.sw & zextImm; }});
- 0x5: ori({{ Rt.sw = Rs.sw | zextImm; }});
- 0x6: xori({{ Rt.sw = Rs.sw ^ zextImm; }});
+ 0x1: addiu({{ Rt_sw = Rs_sw + imm; }});
+ 0x2: slti({{ Rt_sw = (Rs_sw < imm) ? 1 : 0 }});
+ 0x3: sltiu({{ Rt_uw = (Rs_uw < (uint32_t)sextImm) ? 1 : 0;}});
+ 0x4: andi({{ Rt_sw = Rs_sw & zextImm; }});
+ 0x5: ori({{ Rt_sw = Rs_sw | zextImm; }});
+ 0x6: xori({{ Rt_sw = Rs_sw ^ zextImm; }});
0x7: decode RS {
0x0: lui({{ Rt = imm << 16; }});
uint32_t data;
switch (RD) {
case 25:
- data = (Rt.uw<7:1> << 25) | // move 31-25
+ data = (Rt_uw<7:1> << 25) | // move 31-25
(FCSR & 0x01000000) | // bit 24
(FCSR & 0x004FFFFF); // bit 22-0
break;
case 26:
data = (FCSR & 0xFFFC0000) | // move 31-18
- Rt.uw<17:12> << 12 | // bit 17-12
+ Rt_uw<17:12> << 12 | // bit 17-12
(FCSR & 0x00000F80) << 7 | // bit 11-7
- Rt.uw<6:2> << 2 | // bit 6-2
+ Rt_uw<6:2> << 2 | // bit 6-2
(FCSR & 0x00000002); // bit 1...0
break;
case 28:
data = (FCSR & 0xFE000000) | // move 31-25
- Rt.uw<2:2> << 24 | // bit 24
+ Rt_uw<2:2> << 24 | // bit 24
(FCSR & 0x00FFF000) << 23 | // bit 23-12
- Rt.uw<11:7> << 7 | // bit 24
+ Rt_uw<11:7> << 7 | // bit 24
(FCSR & 0x000007E) |
- Rt.uw<1:0>; // bit 22-0
+ Rt_uw<1:0>; // bit 22-0
break;
case 31:
- data = Rt.uw;
+ data = Rt_uw;
break;
default:
panic("FP Control Value (%d) "
0x0: decode RS_HI {
0x0: decode RS_LO {
format CP1Control {
- 0x0: mfc1 ({{ Rt.uw = Fs.uw; }});
+ 0x0: mfc1 ({{ Rt_uw = Fs_uw; }});
0x2: cfc1({{
switch (FS) {
}
}});
- 0x3: mfhc1({{ Rt.uw = Fs.ud<63:32>; }});
+ 0x3: mfhc1({{ Rt_uw = Fs_ud<63:32>; }});
- 0x4: mtc1({{ Fs.uw = Rt.uw; }});
+ 0x4: mtc1({{ Fs_uw = Rt_uw; }});
0x6: ctc1({{
switch (FS) {
case 25:
- FCSR = (Rt.uw<7:1> << 25) | // move 31-25
+ FCSR = (Rt_uw<7:1> << 25) | // move 31-25
(FCSR & 0x01000000) | // bit 24
(FCSR & 0x004FFFFF); // bit 22-0
break;
case 26:
FCSR = (FCSR & 0xFFFC0000) | // move 31-18
- Rt.uw<17:12> << 12 | // bit 17-12
+ Rt_uw<17:12> << 12 | // bit 17-12
(FCSR & 0x00000F80) << 7 | // bit 11-7
- Rt.uw<6:2> << 2 | // bit 6-2
+ Rt_uw<6:2> << 2 | // bit 6-2
(FCSR & 0x00000002); // bit 1-0
break;
case 28:
FCSR = (FCSR & 0xFE000000) | // move 31-25
- Rt.uw<2:2> << 24 | // bit 24
+ Rt_uw<2:2> << 24 | // bit 24
(FCSR & 0x00FFF000) << 23 | // bit 23-12
- Rt.uw<11:7> << 7 | // bit 24
+ Rt_uw<11:7> << 7 | // bit 24
(FCSR & 0x000007E) |
- Rt.uw<1:0>; // bit 22-0
+ Rt_uw<1:0>; // bit 22-0
break;
case 31:
- FCSR = Rt.uw;
+ FCSR = Rt_uw;
break;
default:
}});
0x7: mthc1({{
- uint64_t fs_hi = Rt.uw;
- uint64_t fs_lo = Fs.ud & 0x0FFFFFFFF;
- Fs.ud = (fs_hi << 32) | fs_lo;
+ uint64_t fs_hi = Rt_uw;
+ uint64_t fs_lo = Fs_ud & 0x0FFFFFFFF;
+ Fs_ud = (fs_hi << 32) | fs_lo;
}});
}
0x0: decode FUNCTION_HI {
0x0: decode FUNCTION_LO {
format FloatOp {
- 0x0: add_s({{ Fd.sf = Fs.sf + Ft.sf; }});
- 0x1: sub_s({{ Fd.sf = Fs.sf - Ft.sf; }});
- 0x2: mul_s({{ Fd.sf = Fs.sf * Ft.sf; }});
- 0x3: div_s({{ Fd.sf = Fs.sf / Ft.sf; }});
- 0x4: sqrt_s({{ Fd.sf = sqrt(Fs.sf); }});
- 0x5: abs_s({{ Fd.sf = fabs(Fs.sf); }});
- 0x7: neg_s({{ Fd.sf = -Fs.sf; }});
+ 0x0: add_s({{ Fd_sf = Fs_sf + Ft_sf; }});
+ 0x1: sub_s({{ Fd_sf = Fs_sf - Ft_sf; }});
+ 0x2: mul_s({{ Fd_sf = Fs_sf * Ft_sf; }});
+ 0x3: div_s({{ Fd_sf = Fs_sf / Ft_sf; }});
+ 0x4: sqrt_s({{ Fd_sf = sqrt(Fs_sf); }});
+ 0x5: abs_s({{ Fd_sf = fabs(Fs_sf); }});
+ 0x7: neg_s({{ Fd_sf = -Fs_sf; }});
}
- 0x6: BasicOp::mov_s({{ Fd.sf = Fs.sf; }});
+ 0x6: BasicOp::mov_s({{ Fd_sf = Fs_sf; }});
}
0x1: decode FUNCTION_LO {
format FloatConvertOp {
- 0x0: round_l_s({{ val = Fs.sf; }},
+ 0x0: round_l_s({{ val = Fs_sf; }},
ToLong, Round);
- 0x1: trunc_l_s({{ val = Fs.sf; }},
+ 0x1: trunc_l_s({{ val = Fs_sf; }},
ToLong, Trunc);
- 0x2: ceil_l_s({{ val = Fs.sf;}},
+ 0x2: ceil_l_s({{ val = Fs_sf;}},
ToLong, Ceil);
- 0x3: floor_l_s({{ val = Fs.sf; }},
+ 0x3: floor_l_s({{ val = Fs_sf; }},
ToLong, Floor);
- 0x4: round_w_s({{ val = Fs.sf; }},
+ 0x4: round_w_s({{ val = Fs_sf; }},
ToWord, Round);
- 0x5: trunc_w_s({{ val = Fs.sf; }},
+ 0x5: trunc_w_s({{ val = Fs_sf; }},
ToWord, Trunc);
- 0x6: ceil_w_s({{ val = Fs.sf; }},
+ 0x6: ceil_w_s({{ val = Fs_sf; }},
ToWord, Ceil);
- 0x7: floor_w_s({{ val = Fs.sf; }},
+ 0x7: floor_w_s({{ val = Fs_sf; }},
ToWord, Floor);
}
}
0x4: decode FUNCTION_LO {
format FloatConvertOp {
- 0x1: cvt_d_s({{ val = Fs.sf; }}, ToDouble);
- 0x4: cvt_w_s({{ val = Fs.sf; }}, ToWord);
- 0x5: cvt_l_s({{ val = Fs.sf; }}, ToLong);
+ 0x1: cvt_d_s({{ val = Fs_sf; }}, ToDouble);
+ 0x4: cvt_w_s({{ val = Fs_sf; }}, ToWord);
+ 0x5: cvt_l_s({{ val = Fs_sf; }}, ToLong);
}
0x6: FloatOp::cvt_ps_s({{
- Fd.ud = (uint64_t) Fs.uw << 32 |
- (uint64_t) Ft.uw;
+ Fd_ud = (uint64_t) Fs_uw << 32 |
+ (uint64_t) Ft_uw;
}});
format CP1Unimpl {
default: unknown();
SinglePrecision, UnorderedFalse);
0x1: c_un_s({{ cond = 0; }},
SinglePrecision, UnorderedTrue);
- 0x2: c_eq_s({{ cond = (Fs.sf == Ft.sf); }},
+ 0x2: c_eq_s({{ cond = (Fs_sf == Ft_sf); }},
UnorderedFalse);
- 0x3: c_ueq_s({{ cond = (Fs.sf == Ft.sf); }},
+ 0x3: c_ueq_s({{ cond = (Fs_sf == Ft_sf); }},
UnorderedTrue);
- 0x4: c_olt_s({{ cond = (Fs.sf < Ft.sf); }},
+ 0x4: c_olt_s({{ cond = (Fs_sf < Ft_sf); }},
UnorderedFalse);
- 0x5: c_ult_s({{ cond = (Fs.sf < Ft.sf); }},
+ 0x5: c_ult_s({{ cond = (Fs_sf < Ft_sf); }},
UnorderedTrue);
- 0x6: c_ole_s({{ cond = (Fs.sf <= Ft.sf); }},
+ 0x6: c_ole_s({{ cond = (Fs_sf <= Ft_sf); }},
UnorderedFalse);
- 0x7: c_ule_s({{ cond = (Fs.sf <= Ft.sf); }},
+ 0x7: c_ule_s({{ cond = (Fs_sf <= Ft_sf); }},
UnorderedTrue);
}
}
UnorderedFalse, QnanException);
0x1: c_ngle_s({{ cond = 0; }}, SinglePrecision,
UnorderedTrue, QnanException);
- 0x2: c_seq_s({{ cond = (Fs.sf == Ft.sf); }},
+ 0x2: c_seq_s({{ cond = (Fs_sf == Ft_sf); }},
UnorderedFalse, QnanException);
- 0x3: c_ngl_s({{ cond = (Fs.sf == Ft.sf); }},
+ 0x3: c_ngl_s({{ cond = (Fs_sf == Ft_sf); }},
UnorderedTrue, QnanException);
- 0x4: c_lt_s({{ cond = (Fs.sf < Ft.sf); }},
+ 0x4: c_lt_s({{ cond = (Fs_sf < Ft_sf); }},
UnorderedFalse, QnanException);
- 0x5: c_nge_s({{ cond = (Fs.sf < Ft.sf); }},
+ 0x5: c_nge_s({{ cond = (Fs_sf < Ft_sf); }},
UnorderedTrue, QnanException);
- 0x6: c_le_s({{ cond = (Fs.sf <= Ft.sf); }},
+ 0x6: c_le_s({{ cond = (Fs_sf <= Ft_sf); }},
UnorderedFalse, QnanException);
- 0x7: c_ngt_s({{ cond = (Fs.sf <= Ft.sf); }},
+ 0x7: c_ngt_s({{ cond = (Fs_sf <= Ft_sf); }},
UnorderedTrue, QnanException);
}
}
0x1: decode FUNCTION_HI {
0x0: decode FUNCTION_LO {
format FloatOp {
- 0x0: add_d({{ Fd.df = Fs.df + Ft.df; }});
- 0x1: sub_d({{ Fd.df = Fs.df - Ft.df; }});
- 0x2: mul_d({{ Fd.df = Fs.df * Ft.df; }});
- 0x3: div_d({{ Fd.df = Fs.df / Ft.df; }});
- 0x4: sqrt_d({{ Fd.df = sqrt(Fs.df); }});
- 0x5: abs_d({{ Fd.df = fabs(Fs.df); }});
- 0x7: neg_d({{ Fd.df = -1 * Fs.df; }});
+ 0x0: add_d({{ Fd_df = Fs_df + Ft_df; }});
+ 0x1: sub_d({{ Fd_df = Fs_df - Ft_df; }});
+ 0x2: mul_d({{ Fd_df = Fs_df * Ft_df; }});
+ 0x3: div_d({{ Fd_df = Fs_df / Ft_df; }});
+ 0x4: sqrt_d({{ Fd_df = sqrt(Fs_df); }});
+ 0x5: abs_d({{ Fd_df = fabs(Fs_df); }});
+ 0x7: neg_d({{ Fd_df = -1 * Fs_df; }});
}
- 0x6: BasicOp::mov_d({{ Fd.df = Fs.df; }});
+ 0x6: BasicOp::mov_d({{ Fd_df = Fs_df; }});
}
0x1: decode FUNCTION_LO {
format FloatConvertOp {
- 0x0: round_l_d({{ val = Fs.df; }},
+ 0x0: round_l_d({{ val = Fs_df; }},
ToLong, Round);
- 0x1: trunc_l_d({{ val = Fs.df; }},
+ 0x1: trunc_l_d({{ val = Fs_df; }},
ToLong, Trunc);
- 0x2: ceil_l_d({{ val = Fs.df; }},
+ 0x2: ceil_l_d({{ val = Fs_df; }},
ToLong, Ceil);
- 0x3: floor_l_d({{ val = Fs.df; }},
+ 0x3: floor_l_d({{ val = Fs_df; }},
ToLong, Floor);
- 0x4: round_w_d({{ val = Fs.df; }},
+ 0x4: round_w_d({{ val = Fs_df; }},
ToWord, Round);
- 0x5: trunc_w_d({{ val = Fs.df; }},
+ 0x5: trunc_w_d({{ val = Fs_df; }},
ToWord, Trunc);
- 0x6: ceil_w_d({{ val = Fs.df; }},
+ 0x6: ceil_w_d({{ val = Fs_df; }},
ToWord, Ceil);
- 0x7: floor_w_d({{ val = Fs.df; }},
+ 0x7: floor_w_d({{ val = Fs_df; }},
ToWord, Floor);
}
}
0x1: decode MOVCF {
format BasicOp {
0x0: movf_d({{
- Fd.df = (getCondCode(FCSR,CC) == 0) ?
- Fs.df : Fd.df;
+ Fd_df = (getCondCode(FCSR,CC) == 0) ?
+ Fs_df : Fd_df;
}});
0x1: movt_d({{
- Fd.df = (getCondCode(FCSR,CC) == 1) ?
- Fs.df : Fd.df;
+ Fd_df = (getCondCode(FCSR,CC) == 1) ?
+ Fs_df : Fd_df;
}});
}
}
format BasicOp {
0x2: movz_d({{
- Fd.df = (Rt == 0) ? Fs.df : Fd.df;
+ Fd_df = (Rt == 0) ? Fs_df : Fd_df;
}});
0x3: movn_d({{
- Fd.df = (Rt != 0) ? Fs.df : Fd.df;
+ Fd_df = (Rt != 0) ? Fs_df : Fd_df;
}});
}
format FloatOp {
- 0x5: recip_d({{ Fd.df = 1 / Fs.df; }});
- 0x6: rsqrt_d({{ Fd.df = 1 / sqrt(Fs.df); }});
+ 0x5: recip_d({{ Fd_df = 1 / Fs_df; }});
+ 0x6: rsqrt_d({{ Fd_df = 1 / sqrt(Fs_df); }});
}
format CP1Unimpl {
default: unknown();
}
0x4: decode FUNCTION_LO {
format FloatConvertOp {
- 0x0: cvt_s_d({{ val = Fs.df; }}, ToSingle);
- 0x4: cvt_w_d({{ val = Fs.df; }}, ToWord);
- 0x5: cvt_l_d({{ val = Fs.df; }}, ToLong);
+ 0x0: cvt_s_d({{ val = Fs_df; }}, ToSingle);
+ 0x4: cvt_w_d({{ val = Fs_df; }}, ToWord);
+ 0x5: cvt_l_d({{ val = Fs_df; }}, ToLong);
}
default: CP1Unimpl::unknown();
}
DoublePrecision, UnorderedFalse);
0x1: c_un_d({{ cond = 0; }},
DoublePrecision, UnorderedTrue);
- 0x2: c_eq_d({{ cond = (Fs.df == Ft.df); }},
+ 0x2: c_eq_d({{ cond = (Fs_df == Ft_df); }},
UnorderedFalse);
- 0x3: c_ueq_d({{ cond = (Fs.df == Ft.df); }},
+ 0x3: c_ueq_d({{ cond = (Fs_df == Ft_df); }},
UnorderedTrue);
- 0x4: c_olt_d({{ cond = (Fs.df < Ft.df); }},
+ 0x4: c_olt_d({{ cond = (Fs_df < Ft_df); }},
UnorderedFalse);
- 0x5: c_ult_d({{ cond = (Fs.df < Ft.df); }},
+ 0x5: c_ult_d({{ cond = (Fs_df < Ft_df); }},
UnorderedTrue);
- 0x6: c_ole_d({{ cond = (Fs.df <= Ft.df); }},
+ 0x6: c_ole_d({{ cond = (Fs_df <= Ft_df); }},
UnorderedFalse);
- 0x7: c_ule_d({{ cond = (Fs.df <= Ft.df); }},
+ 0x7: c_ule_d({{ cond = (Fs_df <= Ft_df); }},
UnorderedTrue);
}
}
UnorderedFalse, QnanException);
0x1: c_ngle_d({{ cond = 0; }}, DoublePrecision,
UnorderedTrue, QnanException);
- 0x2: c_seq_d({{ cond = (Fs.df == Ft.df); }},
+ 0x2: c_seq_d({{ cond = (Fs_df == Ft_df); }},
UnorderedFalse, QnanException);
- 0x3: c_ngl_d({{ cond = (Fs.df == Ft.df); }},
+ 0x3: c_ngl_d({{ cond = (Fs_df == Ft_df); }},
UnorderedTrue, QnanException);
- 0x4: c_lt_d({{ cond = (Fs.df < Ft.df); }},
+ 0x4: c_lt_d({{ cond = (Fs_df < Ft_df); }},
UnorderedFalse, QnanException);
- 0x5: c_nge_d({{ cond = (Fs.df < Ft.df); }},
+ 0x5: c_nge_d({{ cond = (Fs_df < Ft_df); }},
UnorderedTrue, QnanException);
- 0x6: c_le_d({{ cond = (Fs.df <= Ft.df); }},
+ 0x6: c_le_d({{ cond = (Fs_df <= Ft_df); }},
UnorderedFalse, QnanException);
- 0x7: c_ngt_d({{ cond = (Fs.df <= Ft.df); }},
+ 0x7: c_ngt_d({{ cond = (Fs_df <= Ft_df); }},
UnorderedTrue, QnanException);
}
}
//Field When rs=W
0x4: decode FUNCTION {
format FloatConvertOp {
- 0x20: cvt_s_w({{ val = Fs.uw; }}, ToSingle);
- 0x21: cvt_d_w({{ val = Fs.uw; }}, ToDouble);
+ 0x20: cvt_s_w({{ val = Fs_uw; }}, ToSingle);
+ 0x21: cvt_d_w({{ val = Fs_uw; }}, ToDouble);
0x26: CP1Unimpl::cvt_ps_w();
}
default: CP1Unimpl::unknown();
//floating point operations are enabled."
0x5: decode FUNCTION_HI {
format FloatConvertOp {
- 0x20: cvt_s_l({{ val = Fs.ud; }}, ToSingle);
- 0x21: cvt_d_l({{ val = Fs.ud; }}, ToDouble);
+ 0x20: cvt_s_l({{ val = Fs_ud; }}, ToSingle);
+ 0x21: cvt_d_l({{ val = Fs_ud; }}, ToDouble);
0x26: CP1Unimpl::cvt_ps_l();
}
default: CP1Unimpl::unknown();
0x0: decode FUNCTION_LO {
format Float64Op {
0x0: add_ps({{
- Fd1.sf = Fs1.sf + Ft2.sf;
- Fd2.sf = Fs2.sf + Ft2.sf;
+ Fd1_sf = Fs1_sf + Ft2_sf;
+ Fd2_sf = Fs2_sf + Ft2_sf;
}});
0x1: sub_ps({{
- Fd1.sf = Fs1.sf - Ft2.sf;
- Fd2.sf = Fs2.sf - Ft2.sf;
+ Fd1_sf = Fs1_sf - Ft2_sf;
+ Fd2_sf = Fs2_sf - Ft2_sf;
}});
0x2: mul_ps({{
- Fd1.sf = Fs1.sf * Ft2.sf;
- Fd2.sf = Fs2.sf * Ft2.sf;
+ Fd1_sf = Fs1_sf * Ft2_sf;
+ Fd2_sf = Fs2_sf * Ft2_sf;
}});
0x5: abs_ps({{
- Fd1.sf = fabs(Fs1.sf);
- Fd2.sf = fabs(Fs2.sf);
+ Fd1_sf = fabs(Fs1_sf);
+ Fd2_sf = fabs(Fs2_sf);
}});
0x6: mov_ps({{
- Fd1.sf = Fs1.sf;
- Fd2.sf = Fs2.sf;
+ Fd1_sf = Fs1_sf;
+ Fd2_sf = Fs2_sf;
}});
0x7: neg_ps({{
- Fd1.sf = -(Fs1.sf);
- Fd2.sf = -(Fs2.sf);
+ Fd1_sf = -(Fs1_sf);
+ Fd2_sf = -(Fs2_sf);
}});
default: CP1Unimpl::unknown();
}
}
0x3: CP1Unimpl::unknown();
0x4: decode FUNCTION_LO {
- 0x0: FloatOp::cvt_s_pu({{ Fd.sf = Fs2.sf; }});
+ 0x0: FloatOp::cvt_s_pu({{ Fd_sf = Fs2_sf; }});
default: CP1Unimpl::unknown();
}
0x5: decode FUNCTION_LO {
- 0x0: FloatOp::cvt_s_pl({{ Fd.sf = Fs1.sf; }});
+ 0x0: FloatOp::cvt_s_pl({{ Fd_sf = Fs1_sf; }});
format Float64Op {
0x4: pll({{
- Fd.ud = (uint64_t)Fs1.uw << 32 | Ft1.uw;
+ Fd_ud = (uint64_t)Fs1_uw << 32 | Ft1_uw;
}});
0x5: plu({{
- Fd.ud = (uint64_t)Fs1.uw << 32 | Ft2.uw;
+ Fd_ud = (uint64_t)Fs1_uw << 32 | Ft2_uw;
}});
0x6: pul({{
- Fd.ud = (uint64_t)Fs2.uw << 32 | Ft1.uw;
+ Fd_ud = (uint64_t)Fs2_uw << 32 | Ft1_uw;
}});
0x7: puu({{
- Fd.ud = (uint64_t)Fs2.uw << 32 | Ft2.uw;
+ Fd_ud = (uint64_t)Fs2_uw << 32 | Ft2_uw;
}});
}
default: CP1Unimpl::unknown();
UnorderedFalse);
0x1: c_un_ps({{ cond1 = 0; }}, {{ cond2 = 0; }},
UnorderedTrue);
- 0x2: c_eq_ps({{ cond1 = (Fs1.sf == Ft1.sf); }},
- {{ cond2 = (Fs2.sf == Ft2.sf); }},
+ 0x2: c_eq_ps({{ cond1 = (Fs1_sf == Ft1_sf); }},
+ {{ cond2 = (Fs2_sf == Ft2_sf); }},
UnorderedFalse);
- 0x3: c_ueq_ps({{ cond1 = (Fs1.sf == Ft1.sf); }},
- {{ cond2 = (Fs2.sf == Ft2.sf); }},
+ 0x3: c_ueq_ps({{ cond1 = (Fs1_sf == Ft1_sf); }},
+ {{ cond2 = (Fs2_sf == Ft2_sf); }},
UnorderedTrue);
- 0x4: c_olt_ps({{ cond1 = (Fs1.sf < Ft1.sf); }},
- {{ cond2 = (Fs2.sf < Ft2.sf); }},
+ 0x4: c_olt_ps({{ cond1 = (Fs1_sf < Ft1_sf); }},
+ {{ cond2 = (Fs2_sf < Ft2_sf); }},
UnorderedFalse);
- 0x5: c_ult_ps({{ cond1 = (Fs.sf < Ft.sf); }},
- {{ cond2 = (Fs2.sf < Ft2.sf); }},
+ 0x5: c_ult_ps({{ cond1 = (Fs_sf < Ft_sf); }},
+ {{ cond2 = (Fs2_sf < Ft2_sf); }},
UnorderedTrue);
- 0x6: c_ole_ps({{ cond1 = (Fs.sf <= Ft.sf); }},
- {{ cond2 = (Fs2.sf <= Ft2.sf); }},
+ 0x6: c_ole_ps({{ cond1 = (Fs_sf <= Ft_sf); }},
+ {{ cond2 = (Fs2_sf <= Ft2_sf); }},
UnorderedFalse);
- 0x7: c_ule_ps({{ cond1 = (Fs1.sf <= Ft1.sf); }},
- {{ cond2 = (Fs2.sf <= Ft2.sf); }},
+ 0x7: c_ule_ps({{ cond1 = (Fs1_sf <= Ft1_sf); }},
+ {{ cond2 = (Fs2_sf <= Ft2_sf); }},
UnorderedTrue);
}
}
0x1: c_ngle_ps({{ cond1 = 0; }},
{{ cond2 = 0; }},
UnorderedTrue, QnanException);
- 0x2: c_seq_ps({{ cond1 = (Fs1.sf == Ft1.sf); }},
- {{ cond2 = (Fs2.sf == Ft2.sf); }},
+ 0x2: c_seq_ps({{ cond1 = (Fs1_sf == Ft1_sf); }},
+ {{ cond2 = (Fs2_sf == Ft2_sf); }},
UnorderedFalse, QnanException);
- 0x3: c_ngl_ps({{ cond1 = (Fs1.sf == Ft1.sf); }},
- {{ cond2 = (Fs2.sf == Ft2.sf); }},
+ 0x3: c_ngl_ps({{ cond1 = (Fs1_sf == Ft1_sf); }},
+ {{ cond2 = (Fs2_sf == Ft2_sf); }},
UnorderedTrue, QnanException);
- 0x4: c_lt_ps({{ cond1 = (Fs1.sf < Ft1.sf); }},
- {{ cond2 = (Fs2.sf < Ft2.sf); }},
+ 0x4: c_lt_ps({{ cond1 = (Fs1_sf < Ft1_sf); }},
+ {{ cond2 = (Fs2_sf < Ft2_sf); }},
UnorderedFalse, QnanException);
- 0x5: c_nge_ps({{ cond1 = (Fs1.sf < Ft1.sf); }},
- {{ cond2 = (Fs2.sf < Ft2.sf); }},
+ 0x5: c_nge_ps({{ cond1 = (Fs1_sf < Ft1_sf); }},
+ {{ cond2 = (Fs2_sf < Ft2_sf); }},
UnorderedTrue, QnanException);
- 0x6: c_le_ps({{ cond1 = (Fs1.sf <= Ft1.sf); }},
- {{ cond2 = (Fs2.sf <= Ft2.sf); }},
+ 0x6: c_le_ps({{ cond1 = (Fs1_sf <= Ft1_sf); }},
+ {{ cond2 = (Fs2_sf <= Ft2_sf); }},
UnorderedFalse, QnanException);
- 0x7: c_ngt_ps({{ cond1 = (Fs1.sf <= Ft1.sf); }},
- {{ cond2 = (Fs2.sf <= Ft2.sf); }},
+ 0x7: c_ngt_ps({{ cond1 = (Fs1_sf <= Ft1_sf); }},
+ {{ cond2 = (Fs2_sf <= Ft2_sf); }},
UnorderedTrue, QnanException);
}
}
0x3: decode FUNCTION_HI {
0x0: decode FUNCTION_LO {
format LoadIndexedMemory {
- 0x0: lwxc1({{ Fd.uw = Mem.uw; }});
- 0x1: ldxc1({{ Fd.ud = Mem.ud; }});
- 0x5: luxc1({{ Fd.ud = Mem.ud; }},
+ 0x0: lwxc1({{ Fd_uw = Mem_uw; }});
+ 0x1: ldxc1({{ Fd_ud = Mem_ud; }});
+ 0x5: luxc1({{ Fd_ud = Mem_ud; }},
{{ EA = (Rs + Rt) & ~7; }});
}
}
0x1: decode FUNCTION_LO {
format StoreIndexedMemory {
- 0x0: swxc1({{ Mem.uw = Fs.uw; }});
- 0x1: sdxc1({{ Mem.ud = Fs.ud; }});
- 0x5: suxc1({{ Mem.ud = Fs.ud; }},
+ 0x0: swxc1({{ Mem_uw = Fs_uw; }});
+ 0x1: sdxc1({{ Mem_ud = Fs_ud; }});
+ 0x5: suxc1({{ Mem_ud = Fs_ud; }},
{{ EA = (Rs + Rt) & ~7; }});
}
0x7: Prefetch::prefx({{ EA = Rs + Rt; }});
0x3: decode FUNCTION_LO {
0x6: Float64Op::alnv_ps({{
if (Rs<2:0> == 0) {
- Fd.ud = Fs.ud;
+ Fd_ud = Fs_ud;
} else if (Rs<2:0> == 4) {
if (GuestByteOrder == BigEndianByteOrder)
- Fd.ud = Fs.ud<31:0> << 32 | Ft.ud<63:32>;
+ Fd_ud = Fs_ud<31:0> << 32 | Ft_ud<63:32>;
else
- Fd.ud = Ft.ud<31:0> << 32 | Fs.ud<63:32>;
+ Fd_ud = Ft_ud<31:0> << 32 | Fs_ud<63:32>;
} else {
- Fd.ud = Fd.ud;
+ Fd_ud = Fd_ud;
}
}});
}
format FloatAccOp {
0x4: decode FUNCTION_LO {
- 0x0: madd_s({{ Fd.sf = (Fs.sf * Ft.sf) + Fr.sf; }});
- 0x1: madd_d({{ Fd.df = (Fs.df * Ft.df) + Fr.df; }});
+ 0x0: madd_s({{ Fd_sf = (Fs_sf * Ft_sf) + Fr_sf; }});
+ 0x1: madd_d({{ Fd_df = (Fs_df * Ft_df) + Fr_df; }});
0x6: madd_ps({{
- Fd1.sf = (Fs1.df * Ft1.df) + Fr1.df;
- Fd2.sf = (Fs2.df * Ft2.df) + Fr2.df;
+ Fd1_sf = (Fs1_df * Ft1_df) + Fr1_df;
+ Fd2_sf = (Fs2_df * Ft2_df) + Fr2_df;
}});
}
0x5: decode FUNCTION_LO {
- 0x0: msub_s({{ Fd.sf = (Fs.sf * Ft.sf) - Fr.sf; }});
- 0x1: msub_d({{ Fd.df = (Fs.df * Ft.df) - Fr.df; }});
+ 0x0: msub_s({{ Fd_sf = (Fs_sf * Ft_sf) - Fr_sf; }});
+ 0x1: msub_d({{ Fd_df = (Fs_df * Ft_df) - Fr_df; }});
0x6: msub_ps({{
- Fd1.sf = (Fs1.df * Ft1.df) - Fr1.df;
- Fd2.sf = (Fs2.df * Ft2.df) - Fr2.df;
+ Fd1_sf = (Fs1_df * Ft1_df) - Fr1_df;
+ Fd2_sf = (Fs2_df * Ft2_df) - Fr2_df;
}});
}
0x6: decode FUNCTION_LO {
- 0x0: nmadd_s({{ Fd.sf = (-1 * Fs.sf * Ft.sf) - Fr.sf; }});
- 0x1: nmadd_d({{ Fd.df = (-1 * Fs.df * Ft.df) - Fr.df; }});
+ 0x0: nmadd_s({{ Fd_sf = (-1 * Fs_sf * Ft_sf) - Fr_sf; }});
+ 0x1: nmadd_d({{ Fd_df = (-1 * Fs_df * Ft_df) - Fr_df; }});
0x6: nmadd_ps({{
- Fd1.sf = -((Fs1.df * Ft1.df) + Fr1.df);
- Fd2.sf = -((Fs2.df * Ft2.df) + Fr2.df);
+ Fd1_sf = -((Fs1_df * Ft1_df) + Fr1_df);
+ Fd2_sf = -((Fs2_df * Ft2_df) + Fr2_df);
}});
}
0x7: decode FUNCTION_LO {
- 0x0: nmsub_s({{ Fd.sf = (-1 * Fs.sf * Ft.sf) + Fr.sf; }});
- 0x1: nmsub_d({{ Fd.df = (-1 * Fs.df * Ft.df) + Fr.df; }});
+ 0x0: nmsub_s({{ Fd_sf = (-1 * Fs_sf * Ft_sf) + Fr_sf; }});
+ 0x1: nmsub_d({{ Fd_df = (-1 * Fs_df * Ft_df) + Fr_df; }});
0x6: nmsub_ps({{
- Fd1.sf = -((Fs1.df * Ft1.df) - Fr1.df);
- Fd2.sf = -((Fs2.df * Ft2.df) - Fr2.df);
+ Fd1_sf = -((Fs1_df * Ft1_df) - Fr1_df);
+ Fd2_sf = -((Fs2_df * Ft2_df) - Fr2_df);
}});
}
}
}
format Branch {
- 0x4: beql({{ cond = (Rs.sw == Rt.sw); }}, Likely);
- 0x5: bnel({{ cond = (Rs.sw != Rt.sw); }}, Likely);
- 0x6: blezl({{ cond = (Rs.sw <= 0); }}, Likely);
- 0x7: bgtzl({{ cond = (Rs.sw > 0); }}, Likely);
+ 0x4: beql({{ cond = (Rs_sw == Rt_sw); }}, Likely);
+ 0x5: bnel({{ cond = (Rs_sw != Rt_sw); }}, Likely);
+ 0x6: blezl({{ cond = (Rs_sw <= 0); }}, Likely);
+ 0x7: bgtzl({{ cond = (Rs_sw > 0); }}, Likely);
}
}
0x4: decode FUNCTION_HI {
0x0: decode FUNCTION_LO {
0x2: IntOp::mul({{
- int64_t temp1 = Rs.sd * Rt.sd;
- Rd.sw = temp1<31:0>;
+ int64_t temp1 = Rs_sd * Rt_sd;
+ Rd_sw = temp1<31:0>;
}}, IntMultOp);
format HiLoRdSelValOp {
0x0: madd({{
val = ((int64_t)HI_RD_SEL << 32 | LO_RD_SEL) +
- (Rs.sd * Rt.sd);
+ (Rs_sd * Rt_sd);
}}, IntMultOp);
0x1: maddu({{
val = ((uint64_t)HI_RD_SEL << 32 | LO_RD_SEL) +
- (Rs.ud * Rt.ud);
+ (Rs_ud * Rt_ud);
}}, IntMultOp);
0x4: msub({{
val = ((int64_t)HI_RD_SEL << 32 | LO_RD_SEL) -
- (Rs.sd * Rt.sd);
+ (Rs_sd * Rt_sd);
}}, IntMultOp);
0x5: msubu({{
val = ((uint64_t)HI_RD_SEL << 32 | LO_RD_SEL) -
- (Rs.ud * Rt.ud);
+ (Rs_ud * Rt_ud);
}}, IntMultOp);
}
}
break;
}
}
- Rd.uw = cnt;
+ Rd_uw = cnt;
}});
0x1: clo({{
int cnt = 32;
break;
}
}
- Rd.uw = cnt;
+ Rd_uw = cnt;
}});
}
}
0x7: decode FUNCTION_HI {
0x0: decode FUNCTION_LO {
format BasicOp {
- 0x0: ext({{ Rt.uw = bits(Rs.uw, MSB+LSB, LSB); }});
+ 0x0: ext({{ Rt_uw = bits(Rs_uw, MSB+LSB, LSB); }});
0x4: ins({{
- Rt.uw = bits(Rt.uw, 31, MSB+1) << (MSB+1) |
- bits(Rs.uw, MSB-LSB, 0) << LSB |
- bits(Rt.uw, LSB-1, 0);
+ Rt_uw = bits(Rt_uw, 31, MSB+1) << (MSB+1) |
+ bits(Rs_uw, MSB-LSB, 0) << LSB |
+ bits(Rt_uw, LSB-1, 0);
}});
}
}
forkThread(xc->tcBase(), fault, RD, Rs, Rt);
}}, UserMode);
0x1: yield({{
- Rd.sw = yieldThread(xc->tcBase(), fault, Rs.sw,
+ Rd_sw = yieldThread(xc->tcBase(), fault, Rs_sw,
YQMask);
}}, UserMode);
}
0x2: decode OP_HI {
0x0: decode OP_LO {
format LoadIndexedMemory {
- 0x0: lwx({{ Rd.sw = Mem.sw; }});
- 0x4: lhx({{ Rd.sw = Mem.sh; }});
- 0x6: lbux({{ Rd.uw = Mem.ub; }});
+ 0x0: lwx({{ Rd_sw = Mem_sw; }});
+ 0x4: lhx({{ Rd_sw = Mem_sh; }});
+ 0x6: lbux({{ Rd_uw = Mem_ub; }});
}
}
}
0x4: DspIntOp::insv({{
int pos = dspctl<5:0>;
int size = dspctl<12:7> - 1;
- Rt.uw = insertBits(Rt.uw, pos+size,
- pos, Rs.uw<size:0>);
+ Rt_uw = insertBits(Rt_uw, pos+size,
+ pos, Rs_uw<size:0>);
}});
}
0x0: decode OP_LO {
format DspIntOp {
0x0: addu_qb({{
- Rd.uw = dspAdd(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspAdd(Rs_uw, Rt_uw, SIMD_FMT_QB,
NOSATURATE, UNSIGNED, &dspctl);
}});
0x1: subu_qb({{
- Rd.uw = dspSub(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspSub(Rs_uw, Rt_uw, SIMD_FMT_QB,
NOSATURATE, UNSIGNED, &dspctl);
}});
0x4: addu_s_qb({{
- Rd.uw = dspAdd(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspAdd(Rs_uw, Rt_uw, SIMD_FMT_QB,
SATURATE, UNSIGNED, &dspctl);
}});
0x5: subu_s_qb({{
- Rd.uw = dspSub(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspSub(Rs_uw, Rt_uw, SIMD_FMT_QB,
SATURATE, UNSIGNED, &dspctl);
}});
0x6: muleu_s_ph_qbl({{
- Rd.uw = dspMuleu(Rs.uw, Rt.uw,
+ Rd_uw = dspMuleu(Rs_uw, Rt_uw,
MODE_L, &dspctl);
}}, IntMultOp);
0x7: muleu_s_ph_qbr({{
- Rd.uw = dspMuleu(Rs.uw, Rt.uw,
+ Rd_uw = dspMuleu(Rs_uw, Rt_uw,
MODE_R, &dspctl);
}}, IntMultOp);
}
0x1: decode OP_LO {
format DspIntOp {
0x0: addu_ph({{
- Rd.uw = dspAdd(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ Rd_uw = dspAdd(Rs_uw, Rt_uw, SIMD_FMT_PH,
NOSATURATE, UNSIGNED, &dspctl);
}});
0x1: subu_ph({{
- Rd.uw = dspSub(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ Rd_uw = dspSub(Rs_uw, Rt_uw, SIMD_FMT_PH,
NOSATURATE, UNSIGNED, &dspctl);
}});
0x2: addq_ph({{
- Rd.uw = dspAdd(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ Rd_uw = dspAdd(Rs_uw, Rt_uw, SIMD_FMT_PH,
NOSATURATE, SIGNED, &dspctl);
}});
0x3: subq_ph({{
- Rd.uw = dspSub(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ Rd_uw = dspSub(Rs_uw, Rt_uw, SIMD_FMT_PH,
NOSATURATE, SIGNED, &dspctl);
}});
0x4: addu_s_ph({{
- Rd.uw = dspAdd(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ Rd_uw = dspAdd(Rs_uw, Rt_uw, SIMD_FMT_PH,
SATURATE, UNSIGNED, &dspctl);
}});
0x5: subu_s_ph({{
- Rd.uw = dspSub(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ Rd_uw = dspSub(Rs_uw, Rt_uw, SIMD_FMT_PH,
SATURATE, UNSIGNED, &dspctl);
}});
0x6: addq_s_ph({{
- Rd.uw = dspAdd(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ Rd_uw = dspAdd(Rs_uw, Rt_uw, SIMD_FMT_PH,
SATURATE, SIGNED, &dspctl);
}});
0x7: subq_s_ph({{
- Rd.uw = dspSub(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ Rd_uw = dspSub(Rs_uw, Rt_uw, SIMD_FMT_PH,
SATURATE, SIGNED, &dspctl);
}});
}
format DspIntOp {
0x0: addsc({{
int64_t dresult;
- dresult = Rs.ud + Rt.ud;
- Rd.sw = dresult<31:0>;
+ dresult = Rs_ud + Rt_ud;
+ Rd_sw = dresult<31:0>;
dspctl = insertBits(dspctl, 13, 13,
dresult<32:32>);
}});
0x1: addwc({{
int64_t dresult;
- dresult = Rs.sd + Rt.sd + dspctl<13:13>;
- Rd.sw = dresult<31:0>;
+ dresult = Rs_sd + Rt_sd + dspctl<13:13>;
+ Rd_sw = dresult<31:0>;
if (dresult<32:32> != dresult<31:31>)
dspctl = insertBits(dspctl, 20, 20, 1);
}});
0x2: modsub({{
- Rd.sw = (Rs.sw == 0) ? Rt.sw<23:8> :
- Rs.sw - Rt.sw<7:0>;
+ Rd_sw = (Rs_sw == 0) ? Rt_sw<23:8> :
+ Rs_sw - Rt_sw<7:0>;
}});
0x4: raddu_w_qb({{
- Rd.uw = Rs.uw<31:24> + Rs.uw<23:16> +
- Rs.uw<15:8> + Rs.uw<7:0>;
+ Rd_uw = Rs_uw<31:24> + Rs_uw<23:16> +
+ Rs_uw<15:8> + Rs_uw<7:0>;
}});
0x6: addq_s_w({{
- Rd.sw = dspAdd(Rs.sw, Rt.sw, SIMD_FMT_W,
+ Rd_sw = dspAdd(Rs_sw, Rt_sw, SIMD_FMT_W,
SATURATE, SIGNED, &dspctl);
}});
0x7: subq_s_w({{
- Rd.sw = dspSub(Rs.sw, Rt.sw, SIMD_FMT_W,
+ Rd_sw = dspSub(Rs_sw, Rt_sw, SIMD_FMT_W,
SATURATE, SIGNED, &dspctl);
}});
}
0x3: decode OP_LO {
format DspIntOp {
0x4: muleq_s_w_phl({{
- Rd.sw = dspMuleq(Rs.sw, Rt.sw,
+ Rd_sw = dspMuleq(Rs_sw, Rt_sw,
MODE_L, &dspctl);
}}, IntMultOp);
0x5: muleq_s_w_phr({{
- Rd.sw = dspMuleq(Rs.sw, Rt.sw,
+ Rd_sw = dspMuleq(Rs_sw, Rt_sw,
MODE_R, &dspctl);
}}, IntMultOp);
0x6: mulq_s_ph({{
- Rd.sw = dspMulq(Rs.sw, Rt.sw, SIMD_FMT_PH,
+ Rd_sw = dspMulq(Rs_sw, Rt_sw, SIMD_FMT_PH,
SATURATE, NOROUND, &dspctl);
}}, IntMultOp);
0x7: mulq_rs_ph({{
- Rd.sw = dspMulq(Rs.sw, Rt.sw, SIMD_FMT_PH,
+ Rd_sw = dspMulq(Rs_sw, Rt_sw, SIMD_FMT_PH,
SATURATE, ROUND, &dspctl);
}}, IntMultOp);
}
0x0: decode OP_LO {
format DspIntOp {
0x0: cmpu_eq_qb({{
- dspCmp(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ dspCmp(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_EQ, &dspctl);
}});
0x1: cmpu_lt_qb({{
- dspCmp(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ dspCmp(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_LT, &dspctl);
}});
0x2: cmpu_le_qb({{
- dspCmp(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ dspCmp(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_LE, &dspctl);
}});
0x3: pick_qb({{
- Rd.uw = dspPick(Rs.uw, Rt.uw,
+ Rd_uw = dspPick(Rs_uw, Rt_uw,
SIMD_FMT_QB, &dspctl);
}});
0x4: cmpgu_eq_qb({{
- Rd.uw = dspCmpg(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspCmpg(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_EQ );
}});
0x5: cmpgu_lt_qb({{
- Rd.uw = dspCmpg(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspCmpg(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_LT);
}});
0x6: cmpgu_le_qb({{
- Rd.uw = dspCmpg(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspCmpg(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_LE);
}});
}
0x1: decode OP_LO {
format DspIntOp {
0x0: cmp_eq_ph({{
- dspCmp(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ dspCmp(Rs_uw, Rt_uw, SIMD_FMT_PH,
SIGNED, CMP_EQ, &dspctl);
}});
0x1: cmp_lt_ph({{
- dspCmp(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ dspCmp(Rs_uw, Rt_uw, SIMD_FMT_PH,
SIGNED, CMP_LT, &dspctl);
}});
0x2: cmp_le_ph({{
- dspCmp(Rs.uw, Rt.uw, SIMD_FMT_PH,
+ dspCmp(Rs_uw, Rt_uw, SIMD_FMT_PH,
SIGNED, CMP_LE, &dspctl);
}});
0x3: pick_ph({{
- Rd.uw = dspPick(Rs.uw, Rt.uw,
+ Rd_uw = dspPick(Rs_uw, Rt_uw,
SIMD_FMT_PH, &dspctl);
}});
0x4: precrq_qb_ph({{
- Rd.uw = Rs.uw<31:24> << 24 |
- Rs.uw<15:8> << 16 |
- Rt.uw<31:24> << 8 |
- Rt.uw<15:8>;
+ Rd_uw = Rs_uw<31:24> << 24 |
+ Rs_uw<15:8> << 16 |
+ Rt_uw<31:24> << 8 |
+ Rt_uw<15:8>;
}});
0x5: precr_qb_ph({{
- Rd.uw = Rs.uw<23:16> << 24 |
- Rs.uw<7:0> << 16 |
- Rt.uw<23:16> << 8 |
- Rt.uw<7:0>;
+ Rd_uw = Rs_uw<23:16> << 24 |
+ Rs_uw<7:0> << 16 |
+ Rt_uw<23:16> << 8 |
+ Rt_uw<7:0>;
}});
0x6: packrl_ph({{
- Rd.uw = dspPack(Rs.uw, Rt.uw, SIMD_FMT_PH);
+ Rd_uw = dspPack(Rs_uw, Rt_uw, SIMD_FMT_PH);
}});
0x7: precrqu_s_qb_ph({{
- Rd.uw = dspPrecrqu(Rs.uw, Rt.uw, &dspctl);
+ Rd_uw = dspPrecrqu(Rs_uw, Rt_uw, &dspctl);
}});
}
}
0x2: decode OP_LO {
format DspIntOp {
0x4: precrq_ph_w({{
- Rd.uw = Rs.uw<31:16> << 16 | Rt.uw<31:16>;
+ Rd_uw = Rs_uw<31:16> << 16 | Rt_uw<31:16>;
}});
0x5: precrq_rs_ph_w({{
- Rd.uw = dspPrecrq(Rs.uw, Rt.uw,
+ Rd_uw = dspPrecrq(Rs_uw, Rt_uw,
SIMD_FMT_W, &dspctl);
}});
}
0x3: decode OP_LO {
format DspIntOp {
0x0: cmpgdu_eq_qb({{
- Rd.uw = dspCmpgd(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspCmpgd(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_EQ, &dspctl);
}});
0x1: cmpgdu_lt_qb({{
- Rd.uw = dspCmpgd(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspCmpgd(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_LT, &dspctl);
}});
0x2: cmpgdu_le_qb({{
- Rd.uw = dspCmpgd(Rs.uw, Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspCmpgd(Rs_uw, Rt_uw, SIMD_FMT_QB,
UNSIGNED, CMP_LE, &dspctl);
}});
0x6: precr_sra_ph_w({{
- Rt.uw = dspPrecrSra(Rt.uw, Rs.uw, RD,
+ Rt_uw = dspPrecrSra(Rt_uw, Rs_uw, RD,
SIMD_FMT_W, NOROUND);
}});
0x7: precr_sra_r_ph_w({{
- Rt.uw = dspPrecrSra(Rt.uw, Rs.uw, RD,
+ Rt_uw = dspPrecrSra(Rt_uw, Rs_uw, RD,
SIMD_FMT_W, ROUND);
}});
}
0x0: decode OP_LO {
format DspIntOp {
0x1: absq_s_qb({{
- Rd.sw = dspAbs(Rt.sw, SIMD_FMT_QB, &dspctl);
+ Rd_sw = dspAbs(Rt_sw, SIMD_FMT_QB, &dspctl);
}});
0x2: repl_qb({{
- Rd.uw = RS_RT<7:0> << 24 |
+ Rd_uw = RS_RT<7:0> << 24 |
RS_RT<7:0> << 16 |
RS_RT<7:0> << 8 |
RS_RT<7:0>;
}});
0x3: replv_qb({{
- Rd.sw = Rt.uw<7:0> << 24 |
- Rt.uw<7:0> << 16 |
- Rt.uw<7:0> << 8 |
- Rt.uw<7:0>;
+ Rd_sw = Rt_uw<7:0> << 24 |
+ Rt_uw<7:0> << 16 |
+ Rt_uw<7:0> << 8 |
+ Rt_uw<7:0>;
}});
0x4: precequ_ph_qbl({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_QB, UNSIGNED,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_QB, UNSIGNED,
SIMD_FMT_PH, SIGNED, MODE_L);
}});
0x5: precequ_ph_qbr({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_QB, UNSIGNED,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_QB, UNSIGNED,
SIMD_FMT_PH, SIGNED, MODE_R);
}});
0x6: precequ_ph_qbla({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_QB, UNSIGNED,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_QB, UNSIGNED,
SIMD_FMT_PH, SIGNED, MODE_LA);
}});
0x7: precequ_ph_qbra({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_QB, UNSIGNED,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_QB, UNSIGNED,
SIMD_FMT_PH, SIGNED, MODE_RA);
}});
}
0x1: decode OP_LO {
format DspIntOp {
0x1: absq_s_ph({{
- Rd.sw = dspAbs(Rt.sw, SIMD_FMT_PH, &dspctl);
+ Rd_sw = dspAbs(Rt_sw, SIMD_FMT_PH, &dspctl);
}});
0x2: repl_ph({{
- Rd.uw = (sext<10>(RS_RT))<15:0> << 16 |
+ Rd_uw = (sext<10>(RS_RT))<15:0> << 16 |
(sext<10>(RS_RT))<15:0>;
}});
0x3: replv_ph({{
- Rd.uw = Rt.uw<15:0> << 16 |
- Rt.uw<15:0>;
+ Rd_uw = Rt_uw<15:0> << 16 |
+ Rt_uw<15:0>;
}});
0x4: preceq_w_phl({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_PH, SIGNED,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_PH, SIGNED,
SIMD_FMT_W, SIGNED, MODE_L);
}});
0x5: preceq_w_phr({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_PH, SIGNED,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_PH, SIGNED,
SIMD_FMT_W, SIGNED, MODE_R);
}});
}
0x2: decode OP_LO {
format DspIntOp {
0x1: absq_s_w({{
- Rd.sw = dspAbs(Rt.sw, SIMD_FMT_W, &dspctl);
+ Rd_sw = dspAbs(Rt_sw, SIMD_FMT_W, &dspctl);
}});
}
}
0x3: decode OP_LO {
0x3: IntOp::bitrev({{
- Rd.uw = bitrev( Rt.uw<15:0> );
+ Rd_uw = bitrev( Rt_uw<15:0> );
}});
format DspIntOp {
0x4: preceu_ph_qbl({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_QB,
UNSIGNED, SIMD_FMT_PH,
UNSIGNED, MODE_L);
}});
0x5: preceu_ph_qbr({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_QB,
UNSIGNED, SIMD_FMT_PH,
UNSIGNED, MODE_R );
}});
0x6: preceu_ph_qbla({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_QB,
UNSIGNED, SIMD_FMT_PH,
UNSIGNED, MODE_LA );
}});
0x7: preceu_ph_qbra({{
- Rd.uw = dspPrece(Rt.uw, SIMD_FMT_QB,
+ Rd_uw = dspPrece(Rt_uw, SIMD_FMT_QB,
UNSIGNED, SIMD_FMT_PH,
UNSIGNED, MODE_RA);
}});
0x0: decode OP_LO {
format DspIntOp {
0x0: shll_qb({{
- Rd.sw = dspShll(Rt.sw, RS, SIMD_FMT_QB,
+ Rd_sw = dspShll(Rt_sw, RS, SIMD_FMT_QB,
NOSATURATE, UNSIGNED, &dspctl);
}});
0x1: shrl_qb({{
- Rd.sw = dspShrl(Rt.sw, RS, SIMD_FMT_QB,
+ Rd_sw = dspShrl(Rt_sw, RS, SIMD_FMT_QB,
UNSIGNED);
}});
0x2: shllv_qb({{
- Rd.sw = dspShll(Rt.sw, Rs.sw, SIMD_FMT_QB,
+ Rd_sw = dspShll(Rt_sw, Rs_sw, SIMD_FMT_QB,
NOSATURATE, UNSIGNED, &dspctl);
}});
0x3: shrlv_qb({{
- Rd.sw = dspShrl(Rt.sw, Rs.sw, SIMD_FMT_QB,
+ Rd_sw = dspShrl(Rt_sw, Rs_sw, SIMD_FMT_QB,
UNSIGNED);
}});
0x4: shra_qb({{
- Rd.sw = dspShra(Rt.sw, RS, SIMD_FMT_QB,
+ Rd_sw = dspShra(Rt_sw, RS, SIMD_FMT_QB,
NOROUND, SIGNED, &dspctl);
}});
0x5: shra_r_qb({{
- Rd.sw = dspShra(Rt.sw, RS, SIMD_FMT_QB,
+ Rd_sw = dspShra(Rt_sw, RS, SIMD_FMT_QB,
ROUND, SIGNED, &dspctl);
}});
0x6: shrav_qb({{
- Rd.sw = dspShra(Rt.sw, Rs.sw, SIMD_FMT_QB,
+ Rd_sw = dspShra(Rt_sw, Rs_sw, SIMD_FMT_QB,
NOROUND, SIGNED, &dspctl);
}});
0x7: shrav_r_qb({{
- Rd.sw = dspShra(Rt.sw, Rs.sw, SIMD_FMT_QB,
+ Rd_sw = dspShra(Rt_sw, Rs_sw, SIMD_FMT_QB,
ROUND, SIGNED, &dspctl);
}});
}
0x1: decode OP_LO {
format DspIntOp {
0x0: shll_ph({{
- Rd.uw = dspShll(Rt.uw, RS, SIMD_FMT_PH,
+ Rd_uw = dspShll(Rt_uw, RS, SIMD_FMT_PH,
NOSATURATE, SIGNED, &dspctl);
}});
0x1: shra_ph({{
- Rd.sw = dspShra(Rt.sw, RS, SIMD_FMT_PH,
+ Rd_sw = dspShra(Rt_sw, RS, SIMD_FMT_PH,
NOROUND, SIGNED, &dspctl);
}});
0x2: shllv_ph({{
- Rd.sw = dspShll(Rt.sw, Rs.sw, SIMD_FMT_PH,
+ Rd_sw = dspShll(Rt_sw, Rs_sw, SIMD_FMT_PH,
NOSATURATE, SIGNED, &dspctl);
}});
0x3: shrav_ph({{
- Rd.sw = dspShra(Rt.sw, Rs.sw, SIMD_FMT_PH,
+ Rd_sw = dspShra(Rt_sw, Rs_sw, SIMD_FMT_PH,
NOROUND, SIGNED, &dspctl);
}});
0x4: shll_s_ph({{
- Rd.sw = dspShll(Rt.sw, RS, SIMD_FMT_PH,
+ Rd_sw = dspShll(Rt_sw, RS, SIMD_FMT_PH,
SATURATE, SIGNED, &dspctl);
}});
0x5: shra_r_ph({{
- Rd.sw = dspShra(Rt.sw, RS, SIMD_FMT_PH,
+ Rd_sw = dspShra(Rt_sw, RS, SIMD_FMT_PH,
ROUND, SIGNED, &dspctl);
}});
0x6: shllv_s_ph({{
- Rd.sw = dspShll(Rt.sw, Rs.sw, SIMD_FMT_PH,
+ Rd_sw = dspShll(Rt_sw, Rs_sw, SIMD_FMT_PH,
SATURATE, SIGNED, &dspctl);
}});
0x7: shrav_r_ph({{
- Rd.sw = dspShra(Rt.sw, Rs.sw, SIMD_FMT_PH,
+ Rd_sw = dspShra(Rt_sw, Rs_sw, SIMD_FMT_PH,
ROUND, SIGNED, &dspctl);
}});
}
0x2: decode OP_LO {
format DspIntOp {
0x4: shll_s_w({{
- Rd.sw = dspShll(Rt.sw, RS, SIMD_FMT_W,
+ Rd_sw = dspShll(Rt_sw, RS, SIMD_FMT_W,
SATURATE, SIGNED, &dspctl);
}});
0x5: shra_r_w({{
- Rd.sw = dspShra(Rt.sw, RS, SIMD_FMT_W,
+ Rd_sw = dspShra(Rt_sw, RS, SIMD_FMT_W,
ROUND, SIGNED, &dspctl);
}});
0x6: shllv_s_w({{
- Rd.sw = dspShll(Rt.sw, Rs.sw, SIMD_FMT_W,
+ Rd_sw = dspShll(Rt_sw, Rs_sw, SIMD_FMT_W,
SATURATE, SIGNED, &dspctl);
}});
0x7: shrav_r_w({{
- Rd.sw = dspShra(Rt.sw, Rs.sw, SIMD_FMT_W,
+ Rd_sw = dspShra(Rt_sw, Rs_sw, SIMD_FMT_W,
ROUND, SIGNED, &dspctl);
}});
}
0x3: decode OP_LO {
format DspIntOp {
0x1: shrl_ph({{
- Rd.sw = dspShrl(Rt.sw, RS, SIMD_FMT_PH,
+ Rd_sw = dspShrl(Rt_sw, RS, SIMD_FMT_PH,
UNSIGNED);
}});
0x3: shrlv_ph({{
- Rd.sw = dspShrl(Rt.sw, Rs.sw, SIMD_FMT_PH,
+ Rd_sw = dspShrl(Rt_sw, Rs_sw, SIMD_FMT_PH,
UNSIGNED);
}});
}
0x0: decode OP_LO {
format DspIntOp {
0x0: adduh_qb({{
- Rd.uw = dspAddh(Rs.sw, Rt.sw, SIMD_FMT_QB,
+ Rd_uw = dspAddh(Rs_sw, Rt_sw, SIMD_FMT_QB,
NOROUND, UNSIGNED);
}});
0x1: subuh_qb({{
- Rd.uw = dspSubh(Rs.sw, Rt.sw, SIMD_FMT_QB,
+ Rd_uw = dspSubh(Rs_sw, Rt_sw, SIMD_FMT_QB,
NOROUND, UNSIGNED);
}});
0x2: adduh_r_qb({{
- Rd.uw = dspAddh(Rs.sw, Rt.sw, SIMD_FMT_QB,
+ Rd_uw = dspAddh(Rs_sw, Rt_sw, SIMD_FMT_QB,
ROUND, UNSIGNED);
}});
0x3: subuh_r_qb({{
- Rd.uw = dspSubh(Rs.sw, Rt.sw, SIMD_FMT_QB,
+ Rd_uw = dspSubh(Rs_sw, Rt_sw, SIMD_FMT_QB,
ROUND, UNSIGNED);
}});
}
0x1: decode OP_LO {
format DspIntOp {
0x0: addqh_ph({{
- Rd.uw = dspAddh(Rs.sw, Rt.sw, SIMD_FMT_PH,
+ Rd_uw = dspAddh(Rs_sw, Rt_sw, SIMD_FMT_PH,
NOROUND, SIGNED);
}});
0x1: subqh_ph({{
- Rd.uw = dspSubh(Rs.sw, Rt.sw, SIMD_FMT_PH,
+ Rd_uw = dspSubh(Rs_sw, Rt_sw, SIMD_FMT_PH,
NOROUND, SIGNED);
}});
0x2: addqh_r_ph({{
- Rd.uw = dspAddh(Rs.sw, Rt.sw, SIMD_FMT_PH,
+ Rd_uw = dspAddh(Rs_sw, Rt_sw, SIMD_FMT_PH,
ROUND, SIGNED);
}});
0x3: subqh_r_ph({{
- Rd.uw = dspSubh(Rs.sw, Rt.sw, SIMD_FMT_PH,
+ Rd_uw = dspSubh(Rs_sw, Rt_sw, SIMD_FMT_PH,
ROUND, SIGNED);
}});
0x4: mul_ph({{
- Rd.sw = dspMul(Rs.sw, Rt.sw, SIMD_FMT_PH,
+ Rd_sw = dspMul(Rs_sw, Rt_sw, SIMD_FMT_PH,
NOSATURATE, &dspctl);
}}, IntMultOp);
0x6: mul_s_ph({{
- Rd.sw = dspMul(Rs.sw, Rt.sw, SIMD_FMT_PH,
+ Rd_sw = dspMul(Rs_sw, Rt_sw, SIMD_FMT_PH,
SATURATE, &dspctl);
}}, IntMultOp);
}
0x2: decode OP_LO {
format DspIntOp {
0x0: addqh_w({{
- Rd.uw = dspAddh(Rs.sw, Rt.sw, SIMD_FMT_W,
+ Rd_uw = dspAddh(Rs_sw, Rt_sw, SIMD_FMT_W,
NOROUND, SIGNED);
}});
0x1: subqh_w({{
- Rd.uw = dspSubh(Rs.sw, Rt.sw, SIMD_FMT_W,
+ Rd_uw = dspSubh(Rs_sw, Rt_sw, SIMD_FMT_W,
NOROUND, SIGNED);
}});
0x2: addqh_r_w({{
- Rd.uw = dspAddh(Rs.sw, Rt.sw, SIMD_FMT_W,
+ Rd_uw = dspAddh(Rs_sw, Rt_sw, SIMD_FMT_W,
ROUND, SIGNED);
}});
0x3: subqh_r_w({{
- Rd.uw = dspSubh(Rs.sw, Rt.sw, SIMD_FMT_W,
+ Rd_uw = dspSubh(Rs_sw, Rt_sw, SIMD_FMT_W,
ROUND, SIGNED);
}});
0x6: mulq_s_w({{
- Rd.sw = dspMulq(Rs.sw, Rt.sw, SIMD_FMT_W,
+ Rd_sw = dspMulq(Rs_sw, Rt_sw, SIMD_FMT_W,
SATURATE, NOROUND, &dspctl);
}}, IntMultOp);
0x7: mulq_rs_w({{
- Rd.sw = dspMulq(Rs.sw, Rt.sw, SIMD_FMT_W,
+ Rd_sw = dspMulq(Rs_sw, Rt_sw, SIMD_FMT_W,
SATURATE, ROUND, &dspctl);
}}, IntMultOp);
}
0x4: decode SA {
format BasicOp {
0x02: wsbh({{
- Rd.uw = Rt.uw<23:16> << 24 |
- Rt.uw<31:24> << 16 |
- Rt.uw<7:0> << 8 |
- Rt.uw<15:8>;
+ Rd_uw = Rt_uw<23:16> << 24 |
+ Rt_uw<31:24> << 16 |
+ Rt_uw<7:0> << 8 |
+ Rt_uw<15:8>;
}});
- 0x10: seb({{ Rd.sw = Rt.sb; }});
- 0x18: seh({{ Rd.sw = Rt.sh; }});
+ 0x10: seb({{ Rd_sw = Rt_sb; }});
+ 0x18: seh({{ Rd_sw = Rt_sh; }});
}
}
0x0: decode OP_LO {
format DspHiLoOp {
0x0: dpa_w_ph({{
- dspac = dspDpa(dspac, Rs.sw, Rt.sw, ACDST,
+ dspac = dspDpa(dspac, Rs_sw, Rt_sw, ACDST,
SIMD_FMT_PH, SIGNED, MODE_L);
}}, IntMultOp);
0x1: dps_w_ph({{
- dspac = dspDps(dspac, Rs.sw, Rt.sw, ACDST,
+ dspac = dspDps(dspac, Rs_sw, Rt_sw, ACDST,
SIMD_FMT_PH, SIGNED, MODE_L);
}}, IntMultOp);
0x2: mulsa_w_ph({{
- dspac = dspMulsa(dspac, Rs.sw, Rt.sw,
+ dspac = dspMulsa(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_PH );
}}, IntMultOp);
0x3: dpau_h_qbl({{
- dspac = dspDpa(dspac, Rs.sw, Rt.sw, ACDST,
+ dspac = dspDpa(dspac, Rs_sw, Rt_sw, ACDST,
SIMD_FMT_QB, UNSIGNED, MODE_L);
}}, IntMultOp);
0x4: dpaq_s_w_ph({{
- dspac = dspDpaq(dspac, Rs.sw, Rt.sw,
+ dspac = dspDpaq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_PH,
SIMD_FMT_W, NOSATURATE,
MODE_L, &dspctl);
}}, IntMultOp);
0x5: dpsq_s_w_ph({{
- dspac = dspDpsq(dspac, Rs.sw, Rt.sw,
+ dspac = dspDpsq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_PH,
SIMD_FMT_W, NOSATURATE,
MODE_L, &dspctl);
}}, IntMultOp);
0x6: mulsaq_s_w_ph({{
- dspac = dspMulsaq(dspac, Rs.sw, Rt.sw,
+ dspac = dspMulsaq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_PH,
&dspctl);
}}, IntMultOp);
0x7: dpau_h_qbr({{
- dspac = dspDpa(dspac, Rs.sw, Rt.sw, ACDST,
+ dspac = dspDpa(dspac, Rs_sw, Rt_sw, ACDST,
SIMD_FMT_QB, UNSIGNED, MODE_R);
}}, IntMultOp);
}
0x1: decode OP_LO {
format DspHiLoOp {
0x0: dpax_w_ph({{
- dspac = dspDpa(dspac, Rs.sw, Rt.sw, ACDST,
+ dspac = dspDpa(dspac, Rs_sw, Rt_sw, ACDST,
SIMD_FMT_PH, SIGNED, MODE_X);
}}, IntMultOp);
0x1: dpsx_w_ph({{
- dspac = dspDps(dspac, Rs.sw, Rt.sw, ACDST,
+ dspac = dspDps(dspac, Rs_sw, Rt_sw, ACDST,
SIMD_FMT_PH, SIGNED, MODE_X);
}}, IntMultOp);
0x3: dpsu_h_qbl({{
- dspac = dspDps(dspac, Rs.sw, Rt.sw, ACDST,
+ dspac = dspDps(dspac, Rs_sw, Rt_sw, ACDST,
SIMD_FMT_QB, UNSIGNED, MODE_L);
}}, IntMultOp);
0x4: dpaq_sa_l_w({{
- dspac = dspDpaq(dspac, Rs.sw, Rt.sw,
+ dspac = dspDpaq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_W,
SIMD_FMT_L, SATURATE,
MODE_L, &dspctl);
}}, IntMultOp);
0x5: dpsq_sa_l_w({{
- dspac = dspDpsq(dspac, Rs.sw, Rt.sw,
+ dspac = dspDpsq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_W,
SIMD_FMT_L, SATURATE,
MODE_L, &dspctl);
}}, IntMultOp);
0x7: dpsu_h_qbr({{
- dspac = dspDps(dspac, Rs.sw, Rt.sw, ACDST,
+ dspac = dspDps(dspac, Rs_sw, Rt_sw, ACDST,
SIMD_FMT_QB, UNSIGNED, MODE_R);
}}, IntMultOp);
}
0x2: decode OP_LO {
format DspHiLoOp {
0x0: maq_sa_w_phl({{
- dspac = dspMaq(dspac, Rs.uw, Rt.uw,
+ dspac = dspMaq(dspac, Rs_uw, Rt_uw,
ACDST, SIMD_FMT_PH,
MODE_L, SATURATE, &dspctl);
}}, IntMultOp);
0x2: maq_sa_w_phr({{
- dspac = dspMaq(dspac, Rs.uw, Rt.uw,
+ dspac = dspMaq(dspac, Rs_uw, Rt_uw,
ACDST, SIMD_FMT_PH,
MODE_R, SATURATE, &dspctl);
}}, IntMultOp);
0x4: maq_s_w_phl({{
- dspac = dspMaq(dspac, Rs.uw, Rt.uw,
+ dspac = dspMaq(dspac, Rs_uw, Rt_uw,
ACDST, SIMD_FMT_PH,
MODE_L, NOSATURATE, &dspctl);
}}, IntMultOp);
0x6: maq_s_w_phr({{
- dspac = dspMaq(dspac, Rs.uw, Rt.uw,
+ dspac = dspMaq(dspac, Rs_uw, Rt_uw,
ACDST, SIMD_FMT_PH,
MODE_R, NOSATURATE, &dspctl);
}}, IntMultOp);
0x3: decode OP_LO {
format DspHiLoOp {
0x0: dpaqx_s_w_ph({{
- dspac = dspDpaq(dspac, Rs.sw, Rt.sw,
+ dspac = dspDpaq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_PH,
SIMD_FMT_W, NOSATURATE,
MODE_X, &dspctl);
}}, IntMultOp);
0x1: dpsqx_s_w_ph({{
- dspac = dspDpsq(dspac, Rs.sw, Rt.sw,
+ dspac = dspDpsq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_PH,
SIMD_FMT_W, NOSATURATE,
MODE_X, &dspctl);
}}, IntMultOp);
0x2: dpaqx_sa_w_ph({{
- dspac = dspDpaq(dspac, Rs.sw, Rt.sw,
+ dspac = dspDpaq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_PH,
SIMD_FMT_W, SATURATE,
MODE_X, &dspctl);
}}, IntMultOp);
0x3: dpsqx_sa_w_ph({{
- dspac = dspDpsq(dspac, Rs.sw, Rt.sw,
+ dspac = dspDpsq(dspac, Rs_sw, Rt_sw,
ACDST, SIMD_FMT_PH,
SIMD_FMT_W, SATURATE,
MODE_X, &dspctl);
0x0: decode OP_LO {
format IntOp {
0x0: append({{
- Rt.uw = (Rt.uw << RD) | bits(Rs.uw, RD - 1, 0);
+ Rt_uw = (Rt_uw << RD) | bits(Rs_uw, RD - 1, 0);
}});
0x1: prepend({{
- Rt.uw = (Rt.uw >> RD) |
- (bits(Rs.uw, RD - 1, 0) << (32 - RD));
+ Rt_uw = (Rt_uw >> RD) |
+ (bits(Rs_uw, RD - 1, 0) << (32 - RD));
}});
}
}
0x2: decode OP_LO {
format IntOp {
0x0: balign({{
- Rt.uw = (Rt.uw << (8 * BP)) |
- (Rs.uw >> (8 * (4 - BP)));
+ Rt_uw = (Rt_uw << (8 * BP)) |
+ (Rs_uw >> (8 * (4 - BP)));
}});
}
}
0x0: decode OP_LO {
format DspHiLoOp {
0x0: extr_w({{
- Rt.uw = dspExtr(dspac, SIMD_FMT_W, RS,
+ Rt_uw = dspExtr(dspac, SIMD_FMT_W, RS,
NOROUND, NOSATURATE, &dspctl);
}});
0x1: extrv_w({{
- Rt.uw = dspExtr(dspac, SIMD_FMT_W, Rs.uw,
+ Rt_uw = dspExtr(dspac, SIMD_FMT_W, Rs_uw,
NOROUND, NOSATURATE, &dspctl);
}});
0x2: extp({{
- Rt.uw = dspExtp(dspac, RS, &dspctl);
+ Rt_uw = dspExtp(dspac, RS, &dspctl);
}});
0x3: extpv({{
- Rt.uw = dspExtp(dspac, Rs.uw, &dspctl);
+ Rt_uw = dspExtp(dspac, Rs_uw, &dspctl);
}});
0x4: extr_r_w({{
- Rt.uw = dspExtr(dspac, SIMD_FMT_W, RS,
+ Rt_uw = dspExtr(dspac, SIMD_FMT_W, RS,
ROUND, NOSATURATE, &dspctl);
}});
0x5: extrv_r_w({{
- Rt.uw = dspExtr(dspac, SIMD_FMT_W, Rs.uw,
+ Rt_uw = dspExtr(dspac, SIMD_FMT_W, Rs_uw,
ROUND, NOSATURATE, &dspctl);
}});
0x6: extr_rs_w({{
- Rt.uw = dspExtr(dspac, SIMD_FMT_W, RS,
+ Rt_uw = dspExtr(dspac, SIMD_FMT_W, RS,
ROUND, SATURATE, &dspctl);
}});
0x7: extrv_rs_w({{
- Rt.uw = dspExtr(dspac, SIMD_FMT_W, Rs.uw,
+ Rt_uw = dspExtr(dspac, SIMD_FMT_W, Rs_uw,
ROUND, SATURATE, &dspctl);
}});
}
0x1: decode OP_LO {
format DspHiLoOp {
0x2: extpdp({{
- Rt.uw = dspExtpd(dspac, RS, &dspctl);
+ Rt_uw = dspExtpd(dspac, RS, &dspctl);
}});
0x3: extpdpv({{
- Rt.uw = dspExtpd(dspac, Rs.uw, &dspctl);
+ Rt_uw = dspExtpd(dspac, Rs_uw, &dspctl);
}});
0x6: extr_s_h({{
- Rt.uw = dspExtr(dspac, SIMD_FMT_PH, RS,
+ Rt_uw = dspExtr(dspac, SIMD_FMT_PH, RS,
NOROUND, SATURATE, &dspctl);
}});
0x7: extrv_s_h({{
- Rt.uw = dspExtr(dspac, SIMD_FMT_PH, Rs.uw,
+ Rt_uw = dspExtr(dspac, SIMD_FMT_PH, Rs_uw,
NOROUND, SATURATE, &dspctl);
}});
}
0x2: decode OP_LO {
format DspIntOp {
0x2: rddsp({{
- Rd.uw = readDSPControl(&dspctl, RDDSPMASK);
+ Rd_uw = readDSPControl(&dspctl, RDDSPMASK);
}});
0x3: wrdsp({{
- writeDSPControl(&dspctl, Rs.uw, WRDSPMASK);
+ writeDSPControl(&dspctl, Rs_uw, WRDSPMASK);
}});
}
}
}
}});
0x3: shilov({{
- if (sext<6>(Rs.sw<5:0>) < 0) {
+ if (sext<6>(Rs_sw<5:0>) < 0) {
dspac = (uint64_t)dspac <<
- -sext<6>(Rs.sw<5:0>);
+ -sext<6>(Rs_sw<5:0>);
} else {
dspac = (uint64_t)dspac >>
- sext<6>(Rs.sw<5:0>);
+ sext<6>(Rs_sw<5:0>);
}
}});
0x7: mthlip({{
dspac = dspac << 32;
- dspac |= Rs.uw;
+ dspac |= Rs_uw;
dspctl = insertBits(dspctl, 5, 0,
dspctl<5:0> + 32);
}});
0x4: decode OPCODE_LO {
format LoadMemory {
- 0x0: lb({{ Rt.sw = Mem.sb; }});
- 0x1: lh({{ Rt.sw = Mem.sh; }});
- 0x3: lw({{ Rt.sw = Mem.sw; }});
- 0x4: lbu({{ Rt.uw = Mem.ub;}});
- 0x5: lhu({{ Rt.uw = Mem.uh; }});
+ 0x0: lb({{ Rt_sw = Mem_sb; }});
+ 0x1: lh({{ Rt_sw = Mem_sh; }});
+ 0x3: lw({{ Rt_sw = Mem_sw; }});
+ 0x4: lbu({{ Rt_uw = Mem_ub;}});
+ 0x5: lhu({{ Rt_uw = Mem_uh; }});
}
format LoadUnalignedMemory {
0x2: lwl({{
uint32_t mem_shift = 24 - (8 * byte_offset);
- Rt.uw = mem_word << mem_shift | (Rt.uw & mask(mem_shift));
+ Rt_uw = mem_word << mem_shift | (Rt_uw & mask(mem_shift));
}});
0x6: lwr({{
uint32_t mem_shift = 8 * byte_offset;
- Rt.uw = (Rt.uw & (mask(mem_shift) << (32 - mem_shift))) |
+ Rt_uw = (Rt_uw & (mask(mem_shift) << (32 - mem_shift))) |
(mem_word >> mem_shift);
}});
}
0x5: decode OPCODE_LO {
format StoreMemory {
- 0x0: sb({{ Mem.ub = Rt<7:0>; }});
- 0x1: sh({{ Mem.uh = Rt<15:0>; }});
- 0x3: sw({{ Mem.uw = Rt<31:0>; }});
+ 0x0: sb({{ Mem_ub = Rt<7:0>; }});
+ 0x1: sh({{ Mem_uh = Rt<15:0>; }});
+ 0x3: sw({{ Mem_uw = Rt<31:0>; }});
}
format StoreUnalignedMemory {
uint32_t reg_shift = 24 - (8 * byte_offset);
uint32_t mem_shift = 32 - reg_shift;
mem_word = (mem_word & (mask(reg_shift) << mem_shift)) |
- (Rt.uw >> reg_shift);
+ (Rt_uw >> reg_shift);
}});
0x6: swr({{
uint32_t reg_shift = 8 * byte_offset;
- mem_word = Rt.uw << reg_shift |
+ mem_word = Rt_uw << reg_shift |
(mem_word & (mask(reg_shift)));
}});
}
format CP0Control {
0x7: cache({{
- //Addr CacheEA = Rs.uw + OFFSET;
+ //Addr CacheEA = Rs_uw + OFFSET;
//fault = xc->CacheOp((uint8_t)CACHE_OP,(Addr) CacheEA);
}});
}
0x6: decode OPCODE_LO {
format LoadMemory {
- 0x0: ll({{ Rt.uw = Mem.uw; }}, mem_flags=LLSC);
- 0x1: lwc1({{ Ft.uw = Mem.uw; }});
- 0x5: ldc1({{ Ft.ud = Mem.ud; }});
+ 0x0: ll({{ Rt_uw = Mem_uw; }}, mem_flags=LLSC);
+ 0x1: lwc1({{ Ft_uw = Mem_uw; }});
+ 0x5: ldc1({{ Ft_ud = Mem_ud; }});
}
0x2: CP2Unimpl::lwc2();
0x6: CP2Unimpl::ldc2();
0x7: decode OPCODE_LO {
- 0x0: StoreCond::sc({{ Mem.uw = Rt.uw; }},
+ 0x0: StoreCond::sc({{ Mem_uw = Rt_uw; }},
{{ uint64_t tmp = write_result;
- Rt.uw = (tmp == 0 || tmp == 1) ? tmp : Rt.uw;
+ Rt_uw = (tmp == 0 || tmp == 1) ? tmp : Rt_uw;
}}, mem_flags=LLSC,
inst_flags = IsStoreConditional);
format StoreMemory {
- 0x1: swc1({{ Mem.uw = Ft.uw; }});
- 0x5: sdc1({{ Mem.ud = Ft.ud; }});
+ 0x1: swc1({{ Mem_uw = Ft_uw; }});
+ 0x5: sdc1({{ Mem_ud = Ft_ud; }});
}
0x2: CP2Unimpl::swc2();
0x6: CP2Unimpl::sdc2();
import sys
code = 'bool cond;\n'
- if '.sf' in cond_code or 'SinglePrecision' in flags:
+ if '_sf' in cond_code or 'SinglePrecision' in flags:
if 'QnanException' in flags:
- code += 'if (isQnan(&Fs.sf, 32) || isQnan(&Ft.sf, 32)) {\n'
+ code += 'if (isQnan(&Fs_sf, 32) || isQnan(&Ft_sf, 32)) {\n'
code += '\tFCSR = genInvalidVector(FCSR);\n'
code += '\treturn NoFault;'
code += '}\n else '
- code += 'if (isNan(&Fs.sf, 32) || isNan(&Ft.sf, 32)) {\n'
- elif '.df' in cond_code or 'DoublePrecision' in flags:
+ code += 'if (isNan(&Fs_sf, 32) || isNan(&Ft_sf, 32)) {\n'
+ elif '_df' in cond_code or 'DoublePrecision' in flags:
if 'QnanException' in flags:
- code += 'if (isQnan(&Fs.df, 64) || isQnan(&Ft.df, 64)) {\n'
+ code += 'if (isQnan(&Fs_df, 64) || isQnan(&Ft_df, 64)) {\n'
code += '\tFCSR = genInvalidVector(FCSR);\n'
code += '\treturn NoFault;'
code += '}\n else '
- code += 'if (isNan(&Fs.df, 64) || isNan(&Ft.df, 64)) {\n'
+ code += 'if (isNan(&Fs_df, 64) || isNan(&Ft_df, 64)) {\n'
else:
sys.exit('Decoder Failed: Can\'t Determine Operand Type\n')
#Determine Source Type
convert = 'fpConvert('
- if '.sf' in code:
+ if '_sf' in code:
code = 'float ' + code + '\n'
convert += 'SINGLE_TO_'
- elif '.df' in code:
+ elif '_df' in code:
code = 'double ' + code + '\n'
convert += 'DOUBLE_TO_'
- elif '.uw' in code:
+ elif '_uw' in code:
code = 'uint32_t ' + code + '\n'
convert += 'WORD_TO_'
- elif '.ud' in code:
+ elif '_ud' in code:
code = 'uint64_t ' + code + '\n'
convert += 'LONG_TO_'
else:
#Determine Destination Type
if 'ToSingle' in flags:
- code += 'Fd.uw = ' + convert + 'SINGLE, '
+ code += 'Fd_uw = ' + convert + 'SINGLE, '
elif 'ToDouble' in flags:
- code += 'Fd.ud = ' + convert + 'DOUBLE, '
+ code += 'Fd_ud = ' + convert + 'DOUBLE, '
elif 'ToWord' in flags:
- code += 'Fd.uw = ' + convert + 'WORD, '
+ code += 'Fd_uw = ' + convert + 'WORD, '
elif 'ToLong' in flags:
- code += 'Fd.ud = ' + convert + 'LONG, '
+ code += 'Fd_ud = ' + convert + 'LONG, '
else:
sys.exit("Error Determining Destination Type for Conversion")
code += 'code_block1 = code_block2 = true;\n'
if 'QnanException' in flags:
- code += 'if (isQnan(&Fs1.sf, 32) || isQnan(&Ft1.sf, 32)) {\n'
+ code += 'if (isQnan(&Fs1_sf, 32) || isQnan(&Ft1_sf, 32)) {\n'
code += '\tFCSR = genInvalidVector(FCSR);\n'
code += 'code_block1 = false;'
code += '}\n'
- code += 'if (isQnan(&Fs2.sf, 32) || isQnan(&Ft2.sf, 32)) {\n'
+ code += 'if (isQnan(&Fs2_sf, 32) || isQnan(&Ft2_sf, 32)) {\n'
code += '\tFCSR = genInvalidVector(FCSR);\n'
code += 'code_block2 = false;'
code += '}\n'
code += 'if (code_block1) {'
- code += '\tif (isNan(&Fs1.sf, 32) || isNan(&Ft1.sf, 32)) {\n'
+ code += '\tif (isNan(&Fs1_sf, 32) || isNan(&Ft1_sf, 32)) {\n'
if 'UnorderedTrue' in flags:
code += 'cond1 = 1;\n'
elif 'UnorderedFalse' in flags:
code += 'FCSR = genCCVector(FCSR, CC, cond1);}\n}\n'
code += 'if (code_block2) {'
- code += '\tif (isNan(&Fs2.sf, 32) || isNan(&Ft2.sf, 32)) {\n'
+ code += '\tif (isNan(&Fs2_sf, 32) || isNan(&Ft2_sf, 32)) {\n'
if 'UnorderedTrue' in flags:
code += 'cond2 = 1;\n'
elif 'UnorderedFalse' in flags:
def format HiLoRdSelValOp(code, *opt_flags) {{
- if '.sd' in code:
+ if '_sd' in code:
code = 'int64_t ' + code
- elif '.ud' in code:
+ elif '_ud' in code:
code = 'uint64_t ' + code
code += 'HI_RD_SEL = val<63:32>;\n'
def format LoadUnalignedMemory(memacc_code, ea_code = {{ EA = (Rs + disp) & ~3; }},
mem_flags = [], inst_flags = []) {{
decl_code = '''
- uint32_t mem_word = Mem.uw;
+ uint32_t mem_word = Mem_uw;
uint32_t unalign_addr = Rs + disp;
uint32_t byte_offset = unalign_addr & 3;
if (GuestByteOrder == BigEndianByteOrder)
}});
11: cmpi({{
Xer xer = XER;
- uint32_t cr = makeCRField(Ra.sw, (int32_t)imm, xer.so);
+ uint32_t cr = makeCRField(Ra_sw, (int32_t)imm, xer.so);
CR = insertCRField(CR, BF, cr);
}});
}
// Arithmetic instructions all use source registers Ra and Rb,
// with destination register Rt.
format IntArithOp {
- 75: mulhw({{ int64_t prod = Ra.sq * Rb.sq; Rt = prod >> 32; }});
- 11: mulhwu({{ uint64_t prod = Ra.uq * Rb.uq; Rt = prod >> 32; }});
- 235: mullw({{ int64_t prod = Ra.sq * Rb.sq; Rt = prod; }});
- 747: mullwo({{ int64_t src1 = Ra.sq; int64_t src2 = Rb; int64_t prod = src1 * src2; Rt = prod; }},
+ 75: mulhw({{ int64_t prod = Ra_sq * Rb_sq; Rt = prod >> 32; }});
+ 11: mulhwu({{ uint64_t prod = Ra_uq * Rb_uq; Rt = prod >> 32; }});
+ 235: mullw({{ int64_t prod = Ra_sq * Rb_sq; Rt = prod; }});
+ 747: mullwo({{ int64_t src1 = Ra_sq; int64_t src2 = Rb; int64_t prod = src1 * src2; Rt = prod; }},
true);
491: divw({{
- int32_t src1 = Ra.sw;
- int32_t src2 = Rb.sw;
+ int32_t src1 = Ra_sw;
+ int32_t src2 = Rb_sw;
if ((src1 != 0x80000000 || src2 != 0xffffffff)
&& src2 != 0) {
Rt = src1 / src2;
}});
1003: divwo({{
- int32_t src1 = Ra.sw;
- int32_t src2 = Rb.sw;
+ int32_t src1 = Ra_sw;
+ int32_t src2 = Rb_sw;
if ((src1 != 0x80000000 || src2 != 0xffffffff)
&& src2 != 0) {
Rt = src1 / src2;
true);
459: divwu({{
- uint32_t src1 = Ra.sw;
- uint32_t src2 = Rb.sw;
+ uint32_t src1 = Ra_sw;
+ uint32_t src2 = Rb_sw;
if (src2 != 0) {
Rt = src1 / src2;
} else {
}});
971: divwuo({{
- uint32_t src1 = Ra.sw;
- uint32_t src2 = Rb.sw;
+ uint32_t src1 = Ra_sw;
+ uint32_t src2 = Rb_sw;
if (src2 != 0) {
Rt = src1 / src2;
} else {
format IntOp {
0: cmp({{
Xer xer = XER;
- uint32_t cr = makeCRField(Ra.sw, Rb.sw, xer.so);
+ uint32_t cr = makeCRField(Ra_sw, Rb_sw, xer.so);
CR = insertCRField(CR, BF, cr);
}});
32: cmpl({{
// R0. Others update Ra with the effective address. In all cases,
// Ra and Rb are source registers, Rt is the destintation.
format LoadIndexOp {
- 87: lbzx({{ Rt = Mem.ub; }});
- 279: lhzx({{ Rt = Mem.uh; }});
- 343: lhax({{ Rt = Mem.sh; }});
+ 87: lbzx({{ Rt = Mem_ub; }});
+ 279: lhzx({{ Rt = Mem_uh; }});
+ 343: lhax({{ Rt = Mem_sh; }});
23: lwzx({{ Rt = Mem; }});
- 341: lwax({{ Rt = Mem.sw; }});
- 20: lwarx({{ Rt = Mem.sw; Rsv = 1; RsvLen = 4; RsvAddr = EA; }});
- 535: lfsx({{ Ft.sf = Mem.sf; }});
- 599: lfdx({{ Ft = Mem.df; }});
- 855: lfiwax({{ Ft.uw = Mem; }});
+ 341: lwax({{ Rt = Mem_sw; }});
+ 20: lwarx({{ Rt = Mem_sw; Rsv = 1; RsvLen = 4; RsvAddr = EA; }});
+ 535: lfsx({{ Ft_sf = Mem_sf; }});
+ 599: lfdx({{ Ft = Mem_df; }});
+ 855: lfiwax({{ Ft_uw = Mem; }});
}
format LoadIndexUpdateOp {
- 119: lbzux({{ Rt = Mem.ub; }});
- 311: lhzux({{ Rt = Mem.uh; }});
- 375: lhaux({{ Rt = Mem.sh; }});
+ 119: lbzux({{ Rt = Mem_ub; }});
+ 311: lhzux({{ Rt = Mem_uh; }});
+ 375: lhaux({{ Rt = Mem_sh; }});
55: lwzux({{ Rt = Mem; }});
- 373: lwaux({{ Rt = Mem.sw; }});
- 567: lfsux({{ Ft.sf = Mem.sf; }});
- 631: lfdux({{ Ft = Mem.df; }});
+ 373: lwaux({{ Rt = Mem_sw; }});
+ 567: lfsux({{ Ft_sf = Mem_sf; }});
+ 631: lfdux({{ Ft = Mem_df; }});
}
format StoreIndexOp {
- 215: stbx({{ Mem.ub = Rs.ub; }});
- 407: sthx({{ Mem.uh = Rs.uh; }});
+ 215: stbx({{ Mem_ub = Rs_ub; }});
+ 407: sthx({{ Mem_uh = Rs_uh; }});
151: stwx({{ Mem = Rs; }});
150: stwcx({{
bool store_performed = false;
CR = cr;
Rsv = 0;
}});
- 663: stfsx({{ Mem.sf = Fs.sf; }});
- 727: stfdx({{ Mem.df = Fs; }});
- 983: stfiwx({{ Mem = Fs.uw; }});
+ 663: stfsx({{ Mem_sf = Fs_sf; }});
+ 727: stfdx({{ Mem_df = Fs; }});
+ 983: stfiwx({{ Mem = Fs_uw; }});
}
format StoreIndexUpdateOp {
- 247: stbux({{ Mem.ub = Rs.ub; }});
- 439: sthux({{ Mem.uh = Rs.uh; }});
+ 247: stbux({{ Mem_ub = Rs_ub; }});
+ 439: sthux({{ Mem_uh = Rs_uh; }});
183: stwux({{ Mem = Rs; }});
- 695: stfsux({{ Mem.sf = Fs.sf; }});
- 759: stfdux({{ Mem.df = Fs; }});
+ 695: stfsux({{ Mem_sf = Fs_sf; }});
+ 759: stfdux({{ Mem_df = Fs; }});
}
// These instructions all provide data cache hints
8: subfic({{ int32_t src = ~Ra; Rt = src + imm + 1; }},
[computeCA]);
7: mulli({{
- int32_t src = Ra.sw;
+ int32_t src = Ra_sw;
int64_t prod = src * imm;
Rt = (uint32_t)prod;
}});
}
format LoadDispOp {
- 34: lbz({{ Rt = Mem.ub; }});
- 40: lhz({{ Rt = Mem.uh; }});
- 42: lha({{ Rt = Mem.sh; }});
+ 34: lbz({{ Rt = Mem_ub; }});
+ 40: lhz({{ Rt = Mem_uh; }});
+ 42: lha({{ Rt = Mem_sh; }});
32: lwz({{ Rt = Mem; }});
- 58: lwa({{ Rt = Mem.sw; }},
+ 58: lwa({{ Rt = Mem_sw; }},
{{ EA = Ra + (disp & 0xfffffffc); }},
{{ EA = disp & 0xfffffffc; }});
- 48: lfs({{ Ft.sf = Mem.sf; }});
- 50: lfd({{ Ft = Mem.df; }});
+ 48: lfs({{ Ft_sf = Mem_sf; }});
+ 50: lfd({{ Ft = Mem_df; }});
}
format LoadDispUpdateOp {
- 35: lbzu({{ Rt = Mem.ub; }});
- 41: lhzu({{ Rt = Mem.uh; }});
- 43: lhau({{ Rt = Mem.sh; }});
+ 35: lbzu({{ Rt = Mem_ub; }});
+ 41: lhzu({{ Rt = Mem_uh; }});
+ 43: lhau({{ Rt = Mem_sh; }});
33: lwzu({{ Rt = Mem; }});
- 49: lfsu({{ Ft.sf = Mem.sf; }});
- 51: lfdu({{ Ft = Mem.df; }});
+ 49: lfsu({{ Ft_sf = Mem_sf; }});
+ 51: lfdu({{ Ft = Mem_df; }});
}
format StoreDispOp {
- 38: stb({{ Mem.ub = Rs.ub; }});
- 44: sth({{ Mem.uh = Rs.uh; }});
+ 38: stb({{ Mem_ub = Rs_ub; }});
+ 44: sth({{ Mem_uh = Rs_uh; }});
36: stw({{ Mem = Rs; }});
- 52: stfs({{ Mem.sf = Fs.sf; }});
- 54: stfd({{ Mem.df = Fs; }});
+ 52: stfs({{ Mem_sf = Fs_sf; }});
+ 54: stfd({{ Mem_df = Fs; }});
}
format StoreDispUpdateOp {
- 39: stbu({{ Mem.ub = Rs.ub; }});
- 45: sthu({{ Mem.uh = Rs.uh; }});
+ 39: stbu({{ Mem_ub = Rs_ub; }});
+ 45: sthu({{ Mem_uh = Rs_uh; }});
37: stwu({{ Mem = Rs; }});
- 53: stfsu({{ Mem.sf = Fs.sf; }});
- 55: stfdu({{ Mem.df = Fs; }});
+ 53: stfsu({{ Mem_sf = Fs_sf; }});
+ 55: stfdu({{ Mem_df = Fs; }});
}
17: IntOp::sc({{ xc->syscall(R0); }},
default: decode XO_XO {
format FloatConvertOp {
- 12: frsp({{ Ft.sf = Fb; }});
- 15: fctiwz({{ Ft.sw = (int32_t)trunc(Fb); }});
+ 12: frsp({{ Ft_sf = Fb; }});
+ 15: fctiwz({{ Ft_sw = (int32_t)trunc(Fb); }});
}
format FloatOp {
format FloatRCCheckOp {
72: fmr({{ Ft = Fb; }});
264: fabs({{
- Ft.uq = Fb.uq;
- Ft.uq = insertBits(Ft.uq, 63, 0); }});
+ Ft_uq = Fb_uq;
+ Ft_uq = insertBits(Ft_uq, 63, 0); }});
136: fnabs({{
- Ft.uq = Fb.uq;
- Ft.uq = insertBits(Ft.uq, 63, 1); }});
+ Ft_uq = Fb_uq;
+ Ft_uq = insertBits(Ft_uq, 63, 1); }});
40: fneg({{ Ft = -Fb; }});
8: fcpsgn({{
- Ft.uq = Fb.uq;
- Ft.uq = insertBits(Ft.uq, 63, Fa.uq<63:63>);
+ Ft_uq = Fb_uq;
+ Ft_uq = insertBits(Ft_uq, 63, Fa_uq<63:63>);
}});
- 583: mffs({{ Ft.uq = FPSCR; }});
+ 583: mffs({{ Ft_uq = FPSCR; }});
134: mtfsfi({{
FPSCR = insertCRField(FPSCR, BF + (8 * (1 - W)), U_FIELD);
}});
711: mtfsf({{
- if (L == 1) { FPSCR = Fb.uq; }
+ if (L == 1) { FPSCR = Fb_uq; }
else {
for (int i = 0; i < 8; ++i) {
if (bits(FLM, i) == 1) {
int k = 4 * (i + (8 * (1 - W)));
FPSCR = insertBits(FPSCR, k, k + 3,
- bits(Fb.uq, k, k + 3));
+ bits(Fb_uq, k, k + 3));
}
}
}
// Floating point elementary arithmetic operations. Besides having two
// versions of each instruction for when Rc is set or not, we also have
// to alter lots of special registers depending on the result of the
-// operation. The result is always in Ft.sf.
+// operation. The result is always in Ft_sf.
def format FloatArithOp(code, inst_flags = []) {{
# Code when Rc is set
// Floating point rounding and conversion operations. Besides having two
// versions of each instruction for when Rc is set or not, we also have
// to alter lots of special registers depending on the result of the
-// operation. The result is always in Ft.sf.
+// operation. The result is always in Ft_sf.
def format FloatConvertOp(code, inst_flags = []) {{
# Code when Rc is set
for opName in ("Frd", "Frs1", "Frs2", "Frd_N"):
next_pos = 0
operandsREString = (r'''
- (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
- ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
- (?![\w\.]) # neg. lookahead assertion: prevent partial matches
+ (?<!\w) # neg. lookbehind assertion: prevent partial matches
+ ((%s)(?:_([^\W_]+))?) # match: operand with optional '.' then suffix
+ (?!\w) # neg. lookahead assertion: prevent partial matches
''' % opName)
operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
is_src = False
let {{
def splitOutImm(code):
- matcher = re.compile(r'Rs(?P<rNum>\d)_or_imm(?P<iNum>\d+)(?P<typeQual>\.\w+)?')
+ matcher = re.compile(r'Rs(?P<rNum>\d)_or_imm(?P<iNum>\d+)(?P<typeQual>_[^\W_]+)?')
rOrImmMatch = matcher.search(code)
if (rOrImmMatch == None):
return (False, code, '', '', '')
{
format BranchSplit
{
- 0x1: bpreq(test={{Rs1.sdw == 0}});
- 0x2: bprle(test={{Rs1.sdw <= 0}});
- 0x3: bprl(test={{Rs1.sdw < 0}});
- 0x5: bprne(test={{Rs1.sdw != 0}});
- 0x6: bprg(test={{Rs1.sdw > 0}});
- 0x7: bprge(test={{Rs1.sdw >= 0}});
+ 0x1: bpreq(test={{Rs1_sdw == 0}});
+ 0x2: bprle(test={{Rs1_sdw <= 0}});
+ 0x3: bprl(test={{Rs1_sdw < 0}});
+ 0x5: bprne(test={{Rs1_sdw != 0}});
+ 0x6: bprg(test={{Rs1_sdw > 0}});
+ 0x7: bprge(test={{Rs1_sdw >= 0}});
}
}
// SETHI (or NOP if rd == 0 and imm == 0)
- 0x4: SetHi::sethi({{Rd.udw = imm;}});
+ 0x4: SetHi::sethi({{Rd_udw = imm;}});
// fbpfcc
0x5: decode COND2 {
format BranchN {
}},None, None, IsIndirectControl, IsCall);
0x2: decode OP3 {
format IntOp {
- 0x00: add({{Rd = Rs1.sdw + Rs2_or_imm13;}});
- 0x01: and({{Rd = Rs1.sdw & Rs2_or_imm13;}});
- 0x02: or({{Rd = Rs1.sdw | Rs2_or_imm13;}});
- 0x03: xor({{Rd = Rs1.sdw ^ Rs2_or_imm13;}});
- 0x04: sub({{Rd = Rs1.sdw - Rs2_or_imm13;}});
- 0x05: andn({{Rd = Rs1.sdw & ~Rs2_or_imm13;}});
- 0x06: orn({{Rd = Rs1.sdw | ~Rs2_or_imm13;}});
- 0x07: xnor({{Rd = ~(Rs1.sdw ^ Rs2_or_imm13);}});
- 0x08: addc({{Rd = Rs1.sdw + Rs2_or_imm13 + Ccr<0:0>;}});
- 0x09: mulx({{Rd = Rs1.sdw * Rs2_or_imm13;}});
+ 0x00: add({{Rd = Rs1_sdw + Rs2_or_imm13;}});
+ 0x01: and({{Rd = Rs1_sdw & Rs2_or_imm13;}});
+ 0x02: or({{Rd = Rs1_sdw | Rs2_or_imm13;}});
+ 0x03: xor({{Rd = Rs1_sdw ^ Rs2_or_imm13;}});
+ 0x04: sub({{Rd = Rs1_sdw - Rs2_or_imm13;}});
+ 0x05: andn({{Rd = Rs1_sdw & ~Rs2_or_imm13;}});
+ 0x06: orn({{Rd = Rs1_sdw | ~Rs2_or_imm13;}});
+ 0x07: xnor({{Rd = ~(Rs1_sdw ^ Rs2_or_imm13);}});
+ 0x08: addc({{Rd = Rs1_sdw + Rs2_or_imm13 + Ccr<0:0>;}});
+ 0x09: mulx({{Rd = Rs1_sdw * Rs2_or_imm13;}});
0x0A: umul({{
- Rd = Rs1.udw<31:0> * Rs2_or_imm13<31:0>;
+ Rd = Rs1_udw<31:0> * Rs2_or_imm13<31:0>;
Y = Rd<63:32>;
}});
0x0B: smul({{
- Rd.sdw = sext<32>(Rs1.sdw<31:0>) * sext<32>(Rs2_or_imm13<31:0>);
- Y = Rd.sdw<63:32>;
+ Rd_sdw = sext<32>(Rs1_sdw<31:0>) * sext<32>(Rs2_or_imm13<31:0>);
+ Y = Rd_sdw<63:32>;
}});
- 0x0C: subc({{Rd.sdw = Rs1.sdw + (~Rs2_or_imm13) + 1 - Ccr<0:0>}});
+ 0x0C: subc({{Rd_sdw = Rs1_sdw + (~Rs2_or_imm13) + 1 - Ccr<0:0>}});
0x0D: udivx({{
if (Rs2_or_imm13 == 0)
fault = new DivisionByZero;
else
- Rd.udw = Rs1.udw / Rs2_or_imm13;
+ Rd_udw = Rs1_udw / Rs2_or_imm13;
}});
0x0E: udiv({{
if (Rs2_or_imm13 == 0) {
fault = new DivisionByZero;
} else {
- Rd.udw = ((Y << 32) | Rs1.udw<31:0>) / Rs2_or_imm13;
- if (Rd.udw >> 32 != 0)
- Rd.udw = 0xFFFFFFFF;
+ Rd_udw = ((Y << 32) | Rs1_udw<31:0>) / Rs2_or_imm13;
+ if (Rd_udw >> 32 != 0)
+ Rd_udw = 0xFFFFFFFF;
}
}});
0x0F: sdiv({{
- if (Rs2_or_imm13.sdw == 0) {
+ if (Rs2_or_imm13_sdw == 0) {
fault = new DivisionByZero;
} else {
- Rd.udw = ((int64_t)((Y << 32) |
- Rs1.sdw<31:0>)) / Rs2_or_imm13.sdw;
- if ((int64_t)Rd.udw >=
+ Rd_udw = ((int64_t)((Y << 32) |
+ Rs1_sdw<31:0>)) / Rs2_or_imm13_sdw;
+ if ((int64_t)Rd_udw >=
std::numeric_limits<int32_t>::max()) {
- Rd.udw = 0x7FFFFFFF;
- } else if ((int64_t)Rd.udw <=
+ Rd_udw = 0x7FFFFFFF;
+ } else if ((int64_t)Rd_udw <=
std::numeric_limits<int32_t>::min()) {
- Rd.udw = ULL(0xFFFFFFFF80000000);
+ Rd_udw = ULL(0xFFFFFFFF80000000);
}
}
}});
}});
0x1A: IntOpCcRes::umulcc({{
uint64_t resTemp;
- Rd = resTemp = Rs1.udw<31:0> * Rs2_or_imm13.udw<31:0>;
+ Rd = resTemp = Rs1_udw<31:0> * Rs2_or_imm13_udw<31:0>;
Y = resTemp<63:32>;}});
0x1B: IntOpCcRes::smulcc({{
int64_t resTemp;
- Rd = resTemp = sext<32>(Rs1.sdw<31:0>) * sext<32>(Rs2_or_imm13<31:0>);
+ Rd = resTemp = sext<32>(Rs1_sdw<31:0>) * sext<32>(Rs2_or_imm13<31:0>);
Y = resTemp<63:32>;}});
0x1C: subccc({{
int64_t res, op1 = Rs1, op2 = Rs2_or_imm13;
Rd = res = op1 - op2 - Ccr<0:>;
}}, sub=True);
0x1D: IntOpCcRes::udivxcc({{
- if (Rs2_or_imm13.udw == 0)
+ if (Rs2_or_imm13_udw == 0)
fault = new DivisionByZero;
else
- Rd = Rs1.udw / Rs2_or_imm13.udw;}});
+ Rd = Rs1_udw / Rs2_or_imm13_udw;}});
0x1E: IntOpCcRes::udivcc({{
uint64_t resTemp;
- uint32_t val2 = Rs2_or_imm13.udw;
+ uint32_t val2 = Rs2_or_imm13_udw;
int32_t overflow = 0;
if (val2 == 0) {
fault = new DivisionByZero;
} else {
- resTemp = (uint64_t)((Y << 32) | Rs1.udw<31:0>) / val2;
+ resTemp = (uint64_t)((Y << 32) | Rs1_udw<31:0>) / val2;
overflow = (resTemp<63:32> != 0);
if (overflow)
Rd = resTemp = 0xFFFFFFFF;
}
}}, iv={{overflow}});
0x1F: IntOpCcRes::sdivcc({{
- int64_t val2 = Rs2_or_imm13.sdw<31:0>;
+ int64_t val2 = Rs2_or_imm13_sdw<31:0>;
bool overflow = false, underflow = false;
if (val2 == 0) {
fault = new DivisionByZero;
} else {
- Rd = (int64_t)((Y << 32) | Rs1.sdw<31:0>) / val2;
+ Rd = (int64_t)((Y << 32) | Rs1_sdw<31:0>) / val2;
overflow = ((int64_t)Rd >= std::numeric_limits<int32_t>::max());
underflow = ((int64_t)Rd <= std::numeric_limits<int32_t>::min());
if (overflow)
0x1: sllx({{Rd = Rs1 << (I ? SHCNT64 : Rs2<5:0>);}});
}
0x26: decode X {
- 0x0: srl({{Rd = Rs1.uw >> (I ? SHCNT32 : Rs2<4:0>);}});
- 0x1: srlx({{Rd = Rs1.udw >> (I ? SHCNT64 : Rs2<5:0>);}});
+ 0x0: srl({{Rd = Rs1_uw >> (I ? SHCNT32 : Rs2<4:0>);}});
+ 0x1: srlx({{Rd = Rs1_udw >> (I ? SHCNT64 : Rs2<5:0>);}});
}
0x27: decode X {
- 0x0: sra({{Rd = Rs1.sw >> (I ? SHCNT32 : Rs2<4:0>);}});
- 0x1: srax({{Rd = Rs1.sdw >> (I ? SHCNT64 : Rs2<5:0>);}});
+ 0x0: sra({{Rd = Rs1_sw >> (I ? SHCNT32 : Rs2<4:0>);}});
+ 0x1: srax({{Rd = Rs1_sdw >> (I ? SHCNT64 : Rs2<5:0>);}});
}
0x28: decode RS1 {
0x00: NoPriv::rdy({{Rd = Y<31:0>;}});
}
}
0x2D: sdivx({{
- if (Rs2_or_imm13.sdw == 0)
+ if (Rs2_or_imm13_sdw == 0)
fault = new DivisionByZero;
else
- Rd.sdw = Rs1.sdw / Rs2_or_imm13.sdw;
+ Rd_sdw = Rs1_sdw / Rs2_or_imm13_sdw;
}});
0x2E: Trap::popc({{fault = new IllegalInstruction;}});
0x2F: decode RCOND3
{
- 0x1: movreq({{Rd = (Rs1.sdw == 0) ? Rs2_or_imm10 : Rd;}});
- 0x2: movrle({{Rd = (Rs1.sdw <= 0) ? Rs2_or_imm10 : Rd;}});
- 0x3: movrl({{Rd = (Rs1.sdw < 0) ? Rs2_or_imm10 : Rd;}});
- 0x5: movrne({{Rd = (Rs1.sdw != 0) ? Rs2_or_imm10 : Rd;}});
- 0x6: movrg({{Rd = (Rs1.sdw > 0) ? Rs2_or_imm10 : Rd;}});
- 0x7: movrge({{Rd = (Rs1.sdw >= 0) ? Rs2_or_imm10 : Rd;}});
+ 0x1: movreq({{Rd = (Rs1_sdw == 0) ? Rs2_or_imm10 : Rd;}});
+ 0x2: movrle({{Rd = (Rs1_sdw <= 0) ? Rs2_or_imm10 : Rd;}});
+ 0x3: movrl({{Rd = (Rs1_sdw < 0) ? Rs2_or_imm10 : Rd;}});
+ 0x5: movrne({{Rd = (Rs1_sdw != 0) ? Rs2_or_imm10 : Rd;}});
+ 0x6: movrg({{Rd = (Rs1_sdw > 0) ? Rs2_or_imm10 : Rd;}});
+ 0x7: movrge({{Rd = (Rs1_sdw >= 0) ? Rs2_or_imm10 : Rd;}});
}
0x30: decode RD {
0x00: NoPriv::wry({{Y = (Rs1 ^ Rs2_or_imm13)<31:0>;}});
}
0x34: decode OPF{
format FpBasic{
- 0x01: fmovs({{Frds.uw = Frs2s.uw;}});
- 0x02: fmovd({{Frd.udw = Frs2.udw;}});
+ 0x01: fmovs({{Frds_uw = Frs2s_uw;}});
+ 0x02: fmovd({{Frd_udw = Frs2_udw;}});
0x03: FpUnimpl::fmovq();
- 0x05: fnegs({{Frds.uw = Frs2s.uw ^ (1UL << 31);}});
- 0x06: fnegd({{Frd.udw = Frs2.udw ^ (1ULL << 63);}});
+ 0x05: fnegs({{Frds_uw = Frs2s_uw ^ (1UL << 31);}});
+ 0x06: fnegd({{Frd_udw = Frs2_udw ^ (1ULL << 63);}});
0x07: FpUnimpl::fnegq();
- 0x09: fabss({{Frds.uw = ((1UL << 31) - 1) & Frs2s.uw;}});
- 0x0A: fabsd({{Frd.udw = ((1ULL << 63) - 1) & Frs2.udw;}});
+ 0x09: fabss({{Frds_uw = ((1UL << 31) - 1) & Frs2s_uw;}});
+ 0x0A: fabsd({{Frd_udw = ((1ULL << 63) - 1) & Frs2_udw;}});
0x0B: FpUnimpl::fabsq();
- 0x29: fsqrts({{Frds.sf = std::sqrt(Frs2s.sf);}});
- 0x2A: fsqrtd({{Frd.df = std::sqrt(Frs2.df);}});
+ 0x29: fsqrts({{Frds_sf = std::sqrt(Frs2s_sf);}});
+ 0x2A: fsqrtd({{Frd_df = std::sqrt(Frs2_df);}});
0x2B: FpUnimpl::fsqrtq();
- 0x41: fadds({{Frds.sf = Frs1s.sf + Frs2s.sf;}});
- 0x42: faddd({{Frd.df = Frs1.df + Frs2.df;}});
+ 0x41: fadds({{Frds_sf = Frs1s_sf + Frs2s_sf;}});
+ 0x42: faddd({{Frd_df = Frs1_df + Frs2_df;}});
0x43: FpUnimpl::faddq();
- 0x45: fsubs({{Frds.sf = Frs1s.sf - Frs2s.sf;}});
- 0x46: fsubd({{Frd.df = Frs1.df - Frs2.df; }});
+ 0x45: fsubs({{Frds_sf = Frs1s_sf - Frs2s_sf;}});
+ 0x46: fsubd({{Frd_df = Frs1_df - Frs2_df; }});
0x47: FpUnimpl::fsubq();
- 0x49: fmuls({{Frds.sf = Frs1s.sf * Frs2s.sf;}});
- 0x4A: fmuld({{Frd.df = Frs1.df * Frs2.df;}});
+ 0x49: fmuls({{Frds_sf = Frs1s_sf * Frs2s_sf;}});
+ 0x4A: fmuld({{Frd_df = Frs1_df * Frs2_df;}});
0x4B: FpUnimpl::fmulq();
- 0x4D: fdivs({{Frds.sf = Frs1s.sf / Frs2s.sf;}});
- 0x4E: fdivd({{Frd.df = Frs1.df / Frs2.df;}});
+ 0x4D: fdivs({{Frds_sf = Frs1s_sf / Frs2s_sf;}});
+ 0x4E: fdivd({{Frd_df = Frs1_df / Frs2_df;}});
0x4F: FpUnimpl::fdivq();
- 0x69: fsmuld({{Frd.df = Frs1s.sf * Frs2s.sf;}});
+ 0x69: fsmuld({{Frd_df = Frs1s_sf * Frs2s_sf;}});
0x6E: FpUnimpl::fdmulq();
- 0x81: fstox({{Frd.sdw = static_cast<int64_t>(Frs2s.sf);}});
- 0x82: fdtox({{Frd.sdw = static_cast<int64_t>(Frs2.df);}});
+ 0x81: fstox({{Frd_sdw = static_cast<int64_t>(Frs2s_sf);}});
+ 0x82: fdtox({{Frd_sdw = static_cast<int64_t>(Frs2_df);}});
0x83: FpUnimpl::fqtox();
- 0x84: fxtos({{Frds.sf = static_cast<float>(Frs2.sdw);}});
- 0x88: fxtod({{Frd.df = static_cast<double>(Frs2.sdw);}});
+ 0x84: fxtos({{Frds_sf = static_cast<float>(Frs2_sdw);}});
+ 0x88: fxtod({{Frd_df = static_cast<double>(Frs2_sdw);}});
0x8C: FpUnimpl::fxtoq();
- 0xC4: fitos({{Frds.sf = static_cast<float>(Frs2s.sw);}});
- 0xC6: fdtos({{Frds.sf = Frs2.df;}});
+ 0xC4: fitos({{Frds_sf = static_cast<float>(Frs2s_sw);}});
+ 0xC6: fdtos({{Frds_sf = Frs2_df;}});
0xC7: FpUnimpl::fqtos();
- 0xC8: fitod({{Frd.df = static_cast<double>(Frs2s.sw);}});
- 0xC9: fstod({{Frd.df = Frs2s.sf;}});
+ 0xC8: fitod({{Frd_df = static_cast<double>(Frs2s_sw);}});
+ 0xC9: fstod({{Frd_df = Frs2s_sf;}});
0xCB: FpUnimpl::fqtod();
0xCC: FpUnimpl::fitoq();
0xCD: FpUnimpl::fstoq();
0xCE: FpUnimpl::fdtoq();
0xD1: fstoi({{
- Frds.sw = static_cast<int32_t>(Frs2s.sf);
- float t = Frds.sw;
- if (t != Frs2s.sf)
+ Frds_sw = static_cast<int32_t>(Frs2s_sf);
+ float t = Frds_sw;
+ if (t != Frs2s_sf)
Fsr = insertBits(Fsr, 4,0, 0x01);
}});
0xD2: fdtoi({{
- Frds.sw = static_cast<int32_t>(Frs2.df);
- double t = Frds.sw;
- if (t != Frs2.df)
+ Frds_sw = static_cast<int32_t>(Frs2_df);
+ double t = Frds_sw;
+ if (t != Frs2_df)
Fsr = insertBits(Fsr, 4,0, 0x01);
}});
0xD3: FpUnimpl::fqtoi();
0x3D: Trap::fpackfix({{fault = new IllegalInstruction;}});
0x3E: Trap::pdist({{fault = new IllegalInstruction;}});
0x48: BasicOperate::faligndata({{
- uint64_t msbX = Frs1.udw;
- uint64_t lsbX = Frs2.udw;
+ uint64_t msbX = Frs1_udw;
+ uint64_t lsbX = Frs2_udw;
// Some special cases need to be split out, first
// because they're the most likely to be used, and
// second because otherwise, we end up shifting by
// according to the C standard.
switch (Gsr<2:0>) {
case 0:
- Frd.udw = msbX;
+ Frd_udw = msbX;
break;
case 8:
- Frd.udw = lsbX;
+ Frd_udw = lsbX;
break;
default:
uint64_t msbShift = Gsr<2:0> * 8;
uint64_t lsbShift = (8 - Gsr<2:0>) * 8;
uint64_t msbMask = ((uint64_t)(-1)) >> msbShift;
uint64_t lsbMask = ((uint64_t)(-1)) << lsbShift;
- Frd.udw = ((msbX & msbMask) << msbShift) |
+ Frd_udw = ((msbX & msbMask) << msbShift) |
((lsbX & lsbMask) >> lsbShift);
}
}});
0x55: FailUnimpl::fpsub16s();
0x56: FailUnimpl::fpsub32();
0x57: FailUnimpl::fpsub32s();
- 0x60: FpBasic::fzero({{Frd.df = 0;}});
- 0x61: FpBasic::fzeros({{Frds.sf = 0;}});
+ 0x60: FpBasic::fzero({{Frd_df = 0;}});
+ 0x61: FpBasic::fzeros({{Frds_sf = 0;}});
0x62: FailUnimpl::fnor();
0x63: FailUnimpl::fnors();
0x64: FailUnimpl::fandnot2();
0x65: FailUnimpl::fandnot2s();
0x66: FpBasic::fnot2({{
- Frd.df = (double)(~((uint64_t)Frs2.df));
+ Frd_df = (double)(~((uint64_t)Frs2_df));
}});
0x67: FpBasic::fnot2s({{
- Frds.sf = (float)(~((uint32_t)Frs2s.sf));
+ Frds_sf = (float)(~((uint32_t)Frs2s_sf));
}});
0x68: FailUnimpl::fandnot1();
0x69: FailUnimpl::fandnot1s();
0x6A: FpBasic::fnot1({{
- Frd.df = (double)(~((uint64_t)Frs1.df));
+ Frd_df = (double)(~((uint64_t)Frs1_df));
}});
0x6B: FpBasic::fnot1s({{
- Frds.sf = (float)(~((uint32_t)Frs1s.sf));
+ Frds_sf = (float)(~((uint32_t)Frs1s_sf));
}});
0x6C: FailUnimpl::fxor();
0x6D: FailUnimpl::fxors();
0x71: FailUnimpl::fands();
0x72: FailUnimpl::fxnor();
0x73: FailUnimpl::fxnors();
- 0x74: FpBasic::fsrc1({{Frd.udw = Frs1.udw;}});
- 0x75: FpBasic::fsrc1s({{Frds.uw = Frs1s.uw;}});
+ 0x74: FpBasic::fsrc1({{Frd_udw = Frs1_udw;}});
+ 0x75: FpBasic::fsrc1s({{Frds_uw = Frs1s_uw;}});
0x76: FailUnimpl::fornot2();
0x77: FailUnimpl::fornot2s();
- 0x78: FpBasic::fsrc2({{Frd.udw = Frs2.udw;}});
- 0x79: FpBasic::fsrc2s({{Frds.uw = Frs2s.uw;}});
+ 0x78: FpBasic::fsrc2({{Frd_udw = Frs2_udw;}});
+ 0x79: FpBasic::fsrc2s({{Frds_uw = Frs2s_uw;}});
0x7A: FailUnimpl::fornot1();
0x7B: FailUnimpl::fornot1s();
0x7C: FailUnimpl::for();
0x7D: FailUnimpl::fors();
- 0x7E: FpBasic::fone({{Frd.udw = std::numeric_limits<uint64_t>::max();}});
- 0x7F: FpBasic::fones({{Frds.uw = std::numeric_limits<uint32_t>::max();}});
+ 0x7E: FpBasic::fone({{Frd_udw = std::numeric_limits<uint64_t>::max();}});
+ 0x7F: FpBasic::fones({{Frds_uw = std::numeric_limits<uint32_t>::max();}});
0x80: Trap::shutdown({{fault = new IllegalInstruction;}});
0x81: FailUnimpl::siam();
}
}
0x3: decode OP3 {
format Load {
- 0x00: lduw({{Rd = Mem.uw;}});
- 0x01: ldub({{Rd = Mem.ub;}});
- 0x02: lduh({{Rd = Mem.uhw;}});
+ 0x00: lduw({{Rd = Mem_uw;}});
+ 0x01: ldub({{Rd = Mem_ub;}});
+ 0x02: lduh({{Rd = Mem_uhw;}});
0x03: ldtw({{
- RdLow = (Mem.tuw).a;
- RdHigh = (Mem.tuw).b;
+ RdLow = (Mem_tuw).a;
+ RdHigh = (Mem_tuw).b;
}});
}
format Store {
- 0x04: stw({{Mem.uw = Rd.sw;}});
- 0x05: stb({{Mem.ub = Rd.sb;}});
- 0x06: sth({{Mem.uhw = Rd.shw;}});
+ 0x04: stw({{Mem_uw = Rd_sw;}});
+ 0x05: stb({{Mem_ub = Rd_sb;}});
+ 0x06: sth({{Mem_uhw = Rd_shw;}});
0x07: sttw({{
// This temporary needs to be here so that the parser
// will correctly identify this instruction as a store.
Twin32_t temp;
temp.a = RdLow<31:0>;
temp.b = RdHigh<31:0>;
- Mem.tuw = temp;
+ Mem_tuw = temp;
}});
}
format Load {
- 0x08: ldsw({{Rd = Mem.sw;}});
- 0x09: ldsb({{Rd = Mem.sb;}});
- 0x0A: ldsh({{Rd = Mem.shw;}});
- 0x0B: ldx({{Rd = Mem.sdw;}});
+ 0x08: ldsw({{Rd = Mem_sw;}});
+ 0x09: ldsb({{Rd = Mem_sb;}});
+ 0x0A: ldsh({{Rd = Mem_shw;}});
+ 0x0B: ldx({{Rd = Mem_sdw;}});
}
- 0x0D: Swap::ldstub({{Mem.ub = 0xFF;}},
+ 0x0D: Swap::ldstub({{Mem_ub = 0xFF;}},
{{
uint8_t tmp = mem_data;
- Rd.ub = tmp;
+ Rd_ub = tmp;
}}, MEM_SWAP);
- 0x0E: Store::stx({{Mem.udw = Rd}});
- 0x0F: Swap::swap({{Mem.uw = Rd.uw}},
+ 0x0E: Store::stx({{Mem_udw = Rd}});
+ 0x0F: Swap::swap({{Mem_uw = Rd_uw}},
{{
uint32_t tmp = mem_data;
- Rd.uw = tmp;
+ Rd_uw = tmp;
}}, MEM_SWAP);
format LoadAlt {
- 0x10: lduwa({{Rd = Mem.uw;}});
- 0x11: lduba({{Rd = Mem.ub;}});
- 0x12: lduha({{Rd = Mem.uhw;}});
+ 0x10: lduwa({{Rd = Mem_uw;}});
+ 0x11: lduba({{Rd = Mem_ub;}});
+ 0x12: lduha({{Rd = Mem_uhw;}});
0x13: decode EXT_ASI {
// ASI_LDTD_AIUP
0x22: TwinLoad::ldtx_aiup(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTD_AIUS
0x23: TwinLoad::ldtx_aius(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_QUAD_LDD
0x24: TwinLoad::ldtx_quad_ldd(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_REAL
0x26: TwinLoad::ldtx_real(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_N
0x27: TwinLoad::ldtx_n(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_AIUP_L
0x2A: TwinLoad::ldtx_aiup_l(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_AIUS_L
0x2B: TwinLoad::ldtx_aius_l(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_L
0x2C: TwinLoad::ldtx_l(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_REAL_L
0x2E: TwinLoad::ldtx_real_l(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_N_L
0x2F: TwinLoad::ldtx_n_l(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_P
0xE2: TwinLoad::ldtx_p(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_S
0xE3: TwinLoad::ldtx_s(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_PL
0xEA: TwinLoad::ldtx_pl(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
// ASI_LDTX_SL
0xEB: TwinLoad::ldtx_sl(
- {{RdLow.udw = (Mem.tudw).a;
- RdHigh.udw = (Mem.tudw).b;}});
+ {{RdLow_udw = (Mem_tudw).a;
+ RdHigh_udw = (Mem_tudw).b;}});
default: ldtwa({{
- RdLow = (Mem.tuw).a;
- RdHigh = (Mem.tuw).b;}});
+ RdLow = (Mem_tuw).a;
+ RdHigh = (Mem_tuw).b;}});
}
}
format StoreAlt {
- 0x14: stwa({{Mem.uw = Rd;}});
- 0x15: stba({{Mem.ub = Rd;}});
- 0x16: stha({{Mem.uhw = Rd;}});
+ 0x14: stwa({{Mem_uw = Rd;}});
+ 0x15: stba({{Mem_ub = Rd;}});
+ 0x16: stha({{Mem_uhw = Rd;}});
0x17: sttwa({{
// This temporary needs to be here so that the parser
// will correctly identify this instruction as a store.
Twin32_t temp;
temp.a = RdLow<31:0>;
temp.b = RdHigh<31:0>;
- Mem.tuw = temp;
+ Mem_tuw = temp;
}});
}
format LoadAlt {
- 0x18: ldswa({{Rd = Mem.sw;}});
- 0x19: ldsba({{Rd = Mem.sb;}});
- 0x1A: ldsha({{Rd = Mem.shw;}});
- 0x1B: ldxa({{Rd = Mem.sdw;}});
+ 0x18: ldswa({{Rd = Mem_sw;}});
+ 0x19: ldsba({{Rd = Mem_sb;}});
+ 0x1A: ldsha({{Rd = Mem_shw;}});
+ 0x1B: ldxa({{Rd = Mem_sdw;}});
}
- 0x1D: SwapAlt::ldstuba({{Mem.ub = 0xFF;}},
+ 0x1D: SwapAlt::ldstuba({{Mem_ub = 0xFF;}},
{{
uint8_t tmp = mem_data;
- Rd.ub = tmp;
+ Rd_ub = tmp;
}}, MEM_SWAP);
- 0x1E: StoreAlt::stxa({{Mem.udw = Rd}});
- 0x1F: SwapAlt::swapa({{Mem.uw = Rd.uw}},
+ 0x1E: StoreAlt::stxa({{Mem_udw = Rd}});
+ 0x1F: SwapAlt::swapa({{Mem_uw = Rd_uw}},
{{
uint32_t tmp = mem_data;
- Rd.uw = tmp;
+ Rd_uw = tmp;
}}, MEM_SWAP);
format Trap {
- 0x20: Load::ldf({{Frds.uw = Mem.uw;}});
+ 0x20: Load::ldf({{Frds_uw = Mem_uw;}});
0x21: decode RD {
0x0: Load::ldfsr({{fault = checkFpEnableFault(xc);
if (fault)
return fault;
- Fsr = Mem.uw | Fsr<63:32>;}});
+ Fsr = Mem_uw | Fsr<63:32>;}});
0x1: Load::ldxfsr({{fault = checkFpEnableFault(xc);
if (fault)
return fault;
- Fsr = Mem.udw;}});
+ Fsr = Mem_udw;}});
default: FailUnimpl::ldfsrOther();
}
0x22: ldqf({{fault = new FpDisabled;}});
- 0x23: Load::lddf({{Frd.udw = Mem.udw;}});
- 0x24: Store::stf({{Mem.uw = Frds.uw;}});
+ 0x23: Load::lddf({{Frd_udw = Mem_udw;}});
+ 0x24: Store::stf({{Mem_uw = Frds_uw;}});
0x25: decode RD {
0x0: StoreFsr::stfsr({{fault = checkFpEnableFault(xc);
if (fault)
return fault;
- Mem.uw = Fsr<31:0>;}});
+ Mem_uw = Fsr<31:0>;}});
0x1: StoreFsr::stxfsr({{fault = checkFpEnableFault(xc);
if (fault)
return fault;
- Mem.udw = Fsr;}});
+ Mem_udw = Fsr;}});
default: FailUnimpl::stfsrOther();
}
0x26: stqf({{fault = new FpDisabled;}});
- 0x27: Store::stdf({{Mem.udw = Frd.udw;}});
+ 0x27: Store::stdf({{Mem_udw = Frd_udw;}});
0x2D: Nop::prefetch({{ }});
- 0x30: LoadAlt::ldfa({{Frds.uw = Mem.uw;}});
+ 0x30: LoadAlt::ldfa({{Frds_uw = Mem_uw;}});
0x32: ldqfa({{fault = new FpDisabled;}});
format LoadAlt {
0x33: decode EXT_ASI {
// ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE
0x1F: FailUnimpl::ldblockf_aiusl();
// ASI_BLOCK_PRIMARY
- 0xF0: ldblockf_p({{Frd_N.udw = Mem.udw;}});
+ 0xF0: ldblockf_p({{Frd_N_udw = Mem_udw;}});
// ASI_BLOCK_SECONDARY
0xF1: FailUnimpl::ldblockf_s();
// ASI_BLOCK_PRIMARY_LITTLE
{{fault = new DataAccessException;}});
}
}
- 0x34: Store::stfa({{Mem.uw = Frds.uw;}});
+ 0x34: Store::stfa({{Mem_uw = Frds_uw;}});
0x36: stqfa({{fault = new FpDisabled;}});
format StoreAlt {
0x37: decode EXT_ASI {
// ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE
0x1F: FailUnimpl::stblockf_aiusl();
// ASI_BLOCK_PRIMARY
- 0xF0: stblockf_p({{Mem.udw = Frd_N.udw;}});
+ 0xF0: stblockf_p({{Mem_udw = Frd_N_udw;}});
// ASI_BLOCK_SECONDARY
0xF1: FailUnimpl::stblockf_s();
// ASI_BLOCK_PRIMARY_LITTLE
}
}
0x3C: CasAlt::casa({{
- mem_data = htog(Rs2.uw);
- Mem.uw = Rd.uw;}},
+ mem_data = htog(Rs2_uw);
+ Mem_uw = Rd_uw;}},
{{
uint32_t tmp = mem_data;
- Rd.uw = tmp;
+ Rd_uw = tmp;
}}, MEM_SWAP_COND);
0x3D: Nop::prefetcha({{ }});
0x3E: CasAlt::casxa({{mem_data = gtoh(Rs2);
- Mem.udw = Rd.udw; }},
- {{ Rd.udw = mem_data; }}, MEM_SWAP_COND);
+ Mem_udw = Rd_udw; }},
+ {{ Rd_udw = mem_data; }}, MEM_SWAP_COND);
}
}
}
SetStatus=False, dataSize="env.dataSize"):
super(Movfp, self).__init__(dest, src1, "InstRegIndex(0)", \
spm, SetStatus, dataSize)
- code = 'FpDestReg.uqw = FpSrcReg1.uqw;'
- else_code = 'FpDestReg.uqw = FpDestReg.uqw;'
+ code = 'FpDestReg_uqw = FpSrcReg1_uqw;'
+ else_code = 'FpDestReg_uqw = FpDestReg_uqw;'
cond_check = "checkCondition(ccFlagBits, src2)"
class Xorfp(FpOp):
- code = 'FpDestReg.uqw = FpSrcReg1.uqw ^ FpSrcReg2.uqw;'
+ code = 'FpDestReg_uqw = FpSrcReg1_uqw ^ FpSrcReg2_uqw;'
class Sqrtfp(FpOp):
code = 'FpDestReg = sqrt(FpSrcReg2);'
defineMicroLoadOp('Ldstl', 'Data = merge(Data, Mem, dataSize);',
'Data = Mem & mask(dataSize * 8);',
'(StoreCheck << FlagShift) | Request::LOCKED')
- defineMicroLoadOp('Ldfp', 'FpData.uqw = Mem;', big = False)
+ defineMicroLoadOp('Ldfp', 'FpData_uqw = Mem;', big = False)
def defineMicroStoreOp(mnemonic, code, completeCode="", mem_flags="0"):
global header_output
defineMicroStoreOp('St', 'Mem = pick(Data, 2, dataSize);')
defineMicroStoreOp('Stul', 'Mem = pick(Data, 2, dataSize);',
mem_flags="Request::LOCKED")
- defineMicroStoreOp('Stfp', 'Mem = FpData.uqw;')
+ defineMicroStoreOp('Stfp', 'Mem = FpData_uqw;')
defineMicroStoreOp('Cda', 'Mem = 0;', mem_flags="Request::NO_ACCESS")
iop = InstObjParams("lea", "Lea", 'X86ISA::LdStOp',
exec_output += MicroLimmOpExecute.subst(iop)
iop = InstObjParams("lfpimm", "Lfpimm", 'X86MicroopBase',
- {"code" : "FpDestReg.uqw = imm"})
+ {"code" : "FpDestReg_uqw = imm"})
header_output += MicroLimmOpDeclare.subst(iop)
decoder_output += MicroLimmOpConstructor.subst(iop)
decoder_output += MicroLimmOpDisassembly.subst(iop)
# If op2 is used anywhere, make register and immediate versions
# of this code.
- matcher = re.compile("(?<!\\w)(?P<prefix>s?)op2(?P<typeQual>\\.\\w+)?")
+ matcher = re.compile(r"(?<!\w)(?P<prefix>s?)op2(?P<typeQual>_[^\W_]+)?")
match = matcher.search(code)
if match:
typeQual = ""
# If op2 is used anywhere, make register and immediate versions
# of this code.
- matcher = re.compile("op2(?P<typeQual>\\.\\w+)?")
+ matcher = re.compile(r"op2(?P<typeQual>_[^\W_]+)?")
if matcher.search(code):
microopClasses[name + 'i'] = cls
return cls
offset -= items;
if (offset >= 0 && offset < items) {
uint64_t fpSrcReg1 =
- bits(FpSrcReg1.uqw,
+ bits(FpSrcReg1_uqw,
(offset + 1) * srcSize * 8 - 1,
(offset + 0) * srcSize * 8);
DestReg = merge(0, fpSrcReg1, destSize);
offset -= items;
if (offset >= 0 && offset < items) {
uint64_t srcReg1 = pick(SrcReg1, 0, srcSize);
- FpDestReg.uqw =
- insertBits(FpDestReg.uqw,
+ FpDestReg_uqw =
+ insertBits(FpDestReg_uqw,
(offset + 1) * destSize * 8 - 1,
(offset + 0) * destSize * 8, srcReg1);
} else {
- FpDestReg.uqw = FpDestReg.uqw;
+ FpDestReg_uqw = FpDestReg_uqw;
}
'''
int offset = (ext & 0x1) ? items : 0;
for (int i = 0; i < items; i++) {
uint64_t picked =
- bits(FpSrcReg1.uqw, (i + 1) * 8 * srcSize - 1);
+ bits(FpSrcReg1_uqw, (i + 1) * 8 * srcSize - 1);
result = insertBits(result, i + offset, i + offset, picked);
}
DestReg = DestReg | result;
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- if (bits(FpSrcReg2.uqw, hiIndex))
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ if (bits(FpSrcReg2_uqw, hiIndex))
result = insertBits(result, hiIndex, loIndex, arg1Bits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class shuffle(MediaOp):
uint8_t lsel = sel & mask(optionBits);
if (lsel * size >= sizeof(FloatRegBits)) {
lsel -= options / 2;
- resBits = bits(FpSrcReg2.uqw,
+ resBits = bits(FpSrcReg2_uqw,
(lsel + 1) * sizeBits - 1,
(lsel + 0) * sizeBits);
} else {
- resBits = bits(FpSrcReg1.uqw,
+ resBits = bits(FpSrcReg1_uqw,
(lsel + 1) * sizeBits - 1,
(lsel + 0) * sizeBits);
}
int loIndex = (i + 0) * sizeBits;
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Unpack(MediaOp):
uint64_t result = 0;
for (int i = 0; i < items; i++) {
uint64_t pickedLow =
- bits(FpSrcReg1.uqw, (i + offset + 1) * 8 * size - 1,
+ bits(FpSrcReg1_uqw, (i + offset + 1) * 8 * size - 1,
(i + offset) * 8 * size);
result = insertBits(result,
(2 * i + 1) * 8 * size - 1,
(2 * i + 0) * 8 * size,
pickedLow);
uint64_t pickedHigh =
- bits(FpSrcReg2.uqw, (i + offset + 1) * 8 * size - 1,
+ bits(FpSrcReg2_uqw, (i + offset + 1) * 8 * size - 1,
(i + offset) * 8 * size);
result = insertBits(result,
(2 * i + 2) * 8 * size - 1,
(2 * i + 1) * 8 * size,
pickedHigh);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Pack(MediaOp):
int i;
for (i = 0; i < items / 2; i++) {
uint64_t picked =
- bits(FpSrcReg1.uqw, (i + 1) * srcBits - 1,
+ bits(FpSrcReg1_uqw, (i + 1) * srcBits - 1,
(i + 0) * srcBits);
unsigned signBit = bits(picked, srcBits - 1);
uint64_t overflow = bits(picked, srcBits - 1, destBits - 1);
}
for (;i < items; i++) {
uint64_t picked =
- bits(FpSrcReg2.uqw, (i - items + 1) * srcBits - 1,
+ bits(FpSrcReg2_uqw, (i - items + 1) * srcBits - 1,
(i - items + 0) * srcBits);
unsigned signBit = bits(picked, srcBits - 1);
uint64_t overflow = bits(picked, srcBits - 1, destBits - 1);
(i + 0) * destBits,
picked);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mxor(MediaOp):
def __init__(self, dest, src1, src2):
super(Mxor, self).__init__(dest, src1, src2, 1)
code = '''
- FpDestReg.uqw = FpSrcReg1.uqw ^ FpSrcReg2.uqw;
+ FpDestReg_uqw = FpSrcReg1_uqw ^ FpSrcReg2_uqw;
'''
class Mor(MediaOp):
def __init__(self, dest, src1, src2):
super(Mor, self).__init__(dest, src1, src2, 1)
code = '''
- FpDestReg.uqw = FpSrcReg1.uqw | FpSrcReg2.uqw;
+ FpDestReg_uqw = FpSrcReg1_uqw | FpSrcReg2_uqw;
'''
class Mand(MediaOp):
def __init__(self, dest, src1, src2):
super(Mand, self).__init__(dest, src1, src2, 1)
code = '''
- FpDestReg.uqw = FpSrcReg1.uqw & FpSrcReg2.uqw;
+ FpDestReg_uqw = FpSrcReg1_uqw & FpSrcReg2_uqw;
'''
class Mandn(MediaOp):
def __init__(self, dest, src1, src2):
super(Mandn, self).__init__(dest, src1, src2, 1)
code = '''
- FpDestReg.uqw = ~FpSrcReg1.uqw & FpSrcReg2.uqw;
+ FpDestReg_uqw = ~FpSrcReg1_uqw & FpSrcReg2_uqw;
'''
class Mminf(MediaOp):
int sizeBits = size * 8;
assert(srcSize == 4 || srcSize == 8);
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
double arg1, arg2;
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
if (size == 4) {
floatInt fi;
result = insertBits(result, hiIndex, loIndex, arg2Bits);
}
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mmaxf(MediaOp):
int sizeBits = size * 8;
assert(srcSize == 4 || srcSize == 8);
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
double arg1, arg2;
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
if (size == 4) {
floatInt fi;
result = insertBits(result, hiIndex, loIndex, arg2Bits);
}
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mmini(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
int64_t arg1 = arg1Bits |
(0 - (arg1Bits & (ULL(1) << (sizeBits - 1))));
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
int64_t arg2 = arg2Bits |
(0 - (arg2Bits & (ULL(1) << (sizeBits - 1))));
uint64_t resBits;
}
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mmaxi(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
int64_t arg1 = arg1Bits |
(0 - (arg1Bits & (ULL(1) << (sizeBits - 1))));
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
int64_t arg2 = arg2Bits |
(0 - (arg2Bits & (ULL(1) << (sizeBits - 1))));
uint64_t resBits;
}
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Msqrt(MediaOp):
int sizeBits = size * 8;
assert(srcSize == 4 || srcSize == 8);
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t argBits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
+ uint64_t argBits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
if (size == 4) {
floatInt fi;
}
result = insertBits(result, hiIndex, loIndex, argBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Maddf(MediaOp):
int sizeBits = size * 8;
assert(srcSize == 4 || srcSize == 8);
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
uint64_t resBits;
if (size == 4) {
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Msubf(MediaOp):
int sizeBits = size * 8;
assert(srcSize == 4 || srcSize == 8);
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
uint64_t resBits;
if (size == 4) {
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mmulf(MediaOp):
int sizeBits = size * 8;
assert(srcSize == 4 || srcSize == 8);
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
uint64_t resBits;
if (size == 4) {
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mdivf(MediaOp):
int sizeBits = size * 8;
assert(srcSize == 4 || srcSize == 8);
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
uint64_t resBits;
if (size == 4) {
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Maddi(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
uint64_t resBits = arg1Bits + arg2Bits;
if (ext & 0x2) {
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Msubi(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
uint64_t resBits = arg1Bits - arg2Bits;
if (ext & 0x2) {
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mmuli(MediaOp):
assert(destBits <= 64);
assert(destSize >= srcSize);
int items = numItems(destSize);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int offset = 0;
}
int srcHiIndex = (i + 1) * srcBits - 1 + offset;
int srcLoIndex = (i + 0) * srcBits + offset;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, srcHiIndex, srcLoIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, srcHiIndex, srcLoIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, srcHiIndex, srcLoIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, srcHiIndex, srcLoIndex);
uint64_t resBits;
if (signedOp()) {
int destLoIndex = (i + 0) * destBits;
result = insertBits(result, destHiIndex, destLoIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mavg(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
uint64_t resBits = (arg1Bits + arg2Bits + 1) / 2;
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Msad(MediaOp):
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * srcBits - 1;
int loIndex = (i + 0) * srcBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
int64_t resBits = arg1Bits - arg2Bits;
if (resBits < 0)
resBits = -resBits;
sum += resBits;
}
- FpDestReg.uqw = sum & mask(destSize * 8);
+ FpDestReg_uqw = sum & mask(destSize * 8);
'''
class Msrl(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t shiftAmt = op2.uqw;
- uint64_t result = FpDestReg.uqw;
+ uint64_t shiftAmt = op2_uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
uint64_t resBits;
if (shiftAmt >= sizeBits) {
resBits = 0;
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Msra(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t shiftAmt = op2.uqw;
- uint64_t result = FpDestReg.uqw;
+ uint64_t shiftAmt = op2_uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
uint64_t resBits;
if (shiftAmt >= sizeBits) {
if (bits(arg1Bits, sizeBits - 1))
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Msll(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t shiftAmt = op2.uqw;
- uint64_t result = FpDestReg.uqw;
+ uint64_t shiftAmt = op2_uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
uint64_t resBits;
if (shiftAmt >= sizeBits) {
resBits = 0;
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Cvtf2i(MediaOp):
} else {
items = numItems(destSize);
}
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int srcHiIndex = srcStart + (i + 1) * srcSizeBits - 1;
int srcLoIndex = srcStart + (i + 0) * srcSizeBits;
- uint64_t argBits = bits(FpSrcReg1.uqw, srcHiIndex, srcLoIndex);
+ uint64_t argBits = bits(FpSrcReg1_uqw, srcHiIndex, srcLoIndex);
double arg;
if (srcSize == 4) {
int destLoIndex = destStart + (i + 0) * destSizeBits;
result = insertBits(result, destHiIndex, destLoIndex, argBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Cvti2f(MediaOp):
} else {
items = numItems(destSize);
}
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int srcHiIndex = srcStart + (i + 1) * srcSizeBits - 1;
int srcLoIndex = srcStart + (i + 0) * srcSizeBits;
- uint64_t argBits = bits(FpSrcReg1.uqw, srcHiIndex, srcLoIndex);
+ uint64_t argBits = bits(FpSrcReg1_uqw, srcHiIndex, srcLoIndex);
int64_t sArg = argBits |
(0 - (argBits & (ULL(1) << (srcSizeBits - 1))));
int destLoIndex = destStart + (i + 0) * destSizeBits;
result = insertBits(result, destHiIndex, destLoIndex, argBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Cvtf2f(MediaOp):
} else {
items = numItems(destSize);
}
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int srcHiIndex = srcStart + (i + 1) * srcSizeBits - 1;
int srcLoIndex = srcStart + (i + 0) * srcSizeBits;
- uint64_t argBits = bits(FpSrcReg1.uqw, srcHiIndex, srcLoIndex);
+ uint64_t argBits = bits(FpSrcReg1_uqw, srcHiIndex, srcLoIndex);
double arg;
if (srcSize == 4) {
int destLoIndex = destStart + (i + 0) * destSizeBits;
result = insertBits(result, destHiIndex, destLoIndex, argBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mcmpi2r(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
int64_t arg1 = arg1Bits |
(0 - (arg1Bits & (ULL(1) << (sizeBits - 1))));
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
int64_t arg2 = arg2Bits |
(0 - (arg2Bits & (ULL(1) << (sizeBits - 1))));
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mcmpf2r(MediaOp):
int size = srcSize;
int sizeBits = size * 8;
int items = numItems(size);
- uint64_t result = FpDestReg.uqw;
+ uint64_t result = FpDestReg_uqw;
for (int i = 0; i < items; i++) {
int hiIndex = (i + 1) * sizeBits - 1;
int loIndex = (i + 0) * sizeBits;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, hiIndex, loIndex);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, hiIndex, loIndex);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, hiIndex, loIndex);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, hiIndex, loIndex);
double arg1, arg2;
if (size == 4) {
result = insertBits(result, hiIndex, loIndex, resBits);
}
- FpDestReg.uqw = result;
+ FpDestReg_uqw = result;
'''
class Mcmpf2rf(MediaOp):
int sizeBits = size * 8;
double arg1, arg2;
- uint64_t arg1Bits = bits(FpSrcReg1.uqw, sizeBits - 1, 0);
- uint64_t arg2Bits = bits(FpSrcReg2.uqw, sizeBits - 1, 0);
+ uint64_t arg1Bits = bits(FpSrcReg1_uqw, sizeBits - 1, 0);
+ uint64_t arg2Bits = bits(FpSrcReg2_uqw, sizeBits - 1, 0);
if (size == 4) {
floatInt fi;
fi.i = arg1Bits;
# If op2 is used anywhere, make register and immediate versions
# of this code.
- matcher = re.compile("(?<!\\w)(?P<prefix>s?)op2(?P<typeQual>\\.\\w+)?")
+ matcher = re.compile(r"(?<!\w)(?P<prefix>s?)op2(?P<typeQual>_[^\W_]+)?")
match = matcher.search(allCode + allBigCode)
if match:
typeQual = ""
# If op2 is used anywhere, make register and immediate versions
# of this code.
- matcher = re.compile("op2(?P<typeQual>\\.\\w+)?")
+ matcher = re.compile(r"op2(?P<typeQual>_[^\W_]+)?")
if matcher.search(allCode):
microopClasses[name + 'i'] = cls
return cls