:cache:av:::VS:VS:
:cache:av::vreg *:vS:VS:(cpu_registers(processor)->altivec.vr + VS)
-:cache:av::unsigned32:VS_BITMASK:VS:(1 << VS)
+:cache:av::uint32_t:VS_BITMASK:VS:(1 << VS)
:cache:av:::VA:VA:
:cache:av::vreg *:vA:VA:(cpu_registers(processor)->altivec.vr + VA)
-:cache:av::unsigned32:VA_BITMASK:VA:(1 << VA)
+:cache:av::uint32_t:VA_BITMASK:VA:(1 << VA)
:cache:av:::VB:VB:
:cache:av::vreg *:vB:VB:(cpu_registers(processor)->altivec.vr + VB)
-:cache:av::unsigned32:VB_BITMASK:VB:(1 << VB)
+:cache:av::uint32_t:VB_BITMASK:VB:(1 << VB)
:cache:av:::VC:VC:
:cache:av::vreg *:vC:VC:(cpu_registers(processor)->altivec.vr + VC)
-:cache:av::unsigned32:VC_BITMASK:VC:(1 << VC)
+:cache:av::uint32_t:VC_BITMASK:VC:(1 << VC)
# Flags for model.h
::model-macro:::
} while (0)
# Trace waiting for AltiVec registers to become available
-void::model-static::model_trace_altivec_busy_p:model_data *model_ptr, unsigned32 vr_busy
+void::model-static::model_trace_altivec_busy_p:model_data *model_ptr, uint32_t vr_busy
int i;
if (vr_busy) {
vr_busy &= model_ptr->vr_busy;
TRACE(trace_model, ("Waiting for VSCR\n"));
# Trace making AltiVec registers busy
-void::model-static::model_trace_altivec_make_busy:model_data *model_ptr, unsigned32 vr_mask, unsigned32 cr_mask
+void::model-static::model_trace_altivec_make_busy:model_data *model_ptr, uint32_t vr_mask, uint32_t cr_mask
int i;
if (vr_mask) {
for(i = 0; i < 32; i++) {
}
# Schedule an AltiVec instruction that takes integer input registers and produces output registers
-void::model-function::ppc_insn_int_vr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned32 out_vmask, const unsigned32 in_vmask
- const unsigned32 int_mask = out_mask | in_mask;
- const unsigned32 vr_mask = out_vmask | in_vmask;
+void::model-function::ppc_insn_int_vr:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask, const uint32_t out_vmask, const uint32_t in_vmask
+ const uint32_t int_mask = out_mask | in_mask;
+ const uint32_t vr_mask = out_vmask | in_vmask;
model_busy *busy_ptr;
if ((model_ptr->int_busy & int_mask) != 0 || (model_ptr->vr_busy & vr_mask)) {
}
# Schedule an AltiVec instruction that takes vector input registers and produces vector output registers
-void::model-function::ppc_insn_vr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask
- const unsigned32 vr_mask = out_vmask | in_vmask;
+void::model-function::ppc_insn_vr:itable_index index, model_data *model_ptr, const uint32_t out_vmask, const uint32_t in_vmask
+ const uint32_t vr_mask = out_vmask | in_vmask;
model_busy *busy_ptr;
if (model_ptr->vr_busy & vr_mask) {
}
# Schedule an AltiVec instruction that takes vector input registers and produces vector output registers, touches CR
-void::model-function::ppc_insn_vr_cr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask, const unsigned32 cr_mask
- const unsigned32 vr_mask = out_vmask | in_vmask;
+void::model-function::ppc_insn_vr_cr:itable_index index, model_data *model_ptr, const uint32_t out_vmask, const uint32_t in_vmask, const uint32_t cr_mask
+ const uint32_t vr_mask = out_vmask | in_vmask;
model_busy *busy_ptr;
if ((model_ptr->vr_busy & vr_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
model_trace_altivec_make_busy(model_ptr, vr_mask, cr_mask);
# Schedule an AltiVec instruction that takes vector input registers and produces vector output registers, touches VSCR
-void::model-function::ppc_insn_vr_vscr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask
- const unsigned32 vr_mask = out_vmask | in_vmask;
+void::model-function::ppc_insn_vr_vscr:itable_index index, model_data *model_ptr, const uint32_t out_vmask, const uint32_t in_vmask
+ const uint32_t vr_mask = out_vmask | in_vmask;
model_busy *busy_ptr;
if ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
# Schedule an MFVSCR instruction that VSCR input register and produces an AltiVec output register
-void::model-function::ppc_insn_from_vscr:itable_index index, model_data *model_ptr, const unsigned32 vr_mask
+void::model-function::ppc_insn_from_vscr:itable_index index, model_data *model_ptr, const uint32_t vr_mask
model_busy *busy_ptr;
while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
# Schedule an MTVSCR instruction that one AltiVec input register and produces a vscr output register
-void::model-function::ppc_insn_to_vscr:itable_index index, model_data *model_ptr, const unsigned32 vr_mask
+void::model-function::ppc_insn_to_vscr:itable_index index, model_data *model_ptr, const uint32_t vr_mask
model_busy *busy_ptr;
while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
# The follow are AltiVec saturate operations
-signed8::model-function::altivec_signed_saturate_8:signed16 val, int *sat
- signed8 rv;
+int8_t::model-function::altivec_signed_saturate_8:int16_t val, int *sat
+ int8_t rv;
if (val > 127) {
rv = 127;
*sat = 1;
}
return rv;
-signed16::model-function::altivec_signed_saturate_16:signed32 val, int *sat
- signed16 rv;
+int16_t::model-function::altivec_signed_saturate_16:int32_t val, int *sat
+ int16_t rv;
if (val > 32767) {
rv = 32767;
*sat = 1;
}
return rv;
-signed32::model-function::altivec_signed_saturate_32:signed64 val, int *sat
- signed32 rv;
+int32_t::model-function::altivec_signed_saturate_32:int64_t val, int *sat
+ int32_t rv;
if (val > 2147483647) {
rv = 2147483647;
*sat = 1;
}
return rv;
-unsigned8::model-function::altivec_unsigned_saturate_8:signed16 val, int *sat
- unsigned8 rv;
+uint8_t::model-function::altivec_unsigned_saturate_8:int16_t val, int *sat
+ uint8_t rv;
if (val > 255) {
rv = 255;
*sat = 1;
}
return rv;
-unsigned16::model-function::altivec_unsigned_saturate_16:signed32 val, int *sat
- unsigned16 rv;
+uint16_t::model-function::altivec_unsigned_saturate_16:int32_t val, int *sat
+ uint16_t rv;
if (val > 65535) {
rv = 65535;
*sat = 1;
}
return rv;
-unsigned32::model-function::altivec_unsigned_saturate_32:signed64 val, int *sat
- unsigned32 rv;
+uint32_t::model-function::altivec_unsigned_saturate_32:int64_t val, int *sat
+ uint32_t rv;
if (val > 4294967295LL) {
rv = 4294967295LL;
*sat = 1;
#
0.4,6.VS,11.VA,16.VB,21.384:VX:av:vaddcuw %VD, %VA, %VB:Vector Add Carryout Unsigned Word
- unsigned64 temp;
+ uint64_t temp;
int i;
for (i = 0; i < 4; i++) {
- temp = (unsigned64)(*vA).w[i] + (unsigned64)(*vB).w[i];
+ temp = (uint64_t)(*vA).w[i] + (uint64_t)(*vB).w[i];
(*vS).w[i] = temp >> 32;
}
PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
0.4,6.VS,11.VA,16.VB,21.10:VX:av:vaddfp %VD, %VA, %VB:Vector Add Floating Point
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu a, b, d;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&a, (*vA).w[i]);
0.4,6.VS,11.VA,16.VB,21.768:VX:av:vaddsbs %VD, %VA, %VB:Vector Add Signed Byte Saturate
int i, sat, tempsat;
- signed16 temp;
+ int16_t temp;
for (i = 0; i < 16; i++) {
- temp = (signed16)(signed8)(*vA).b[i] + (signed16)(signed8)(*vB).b[i];
+ temp = (int16_t)(int8_t)(*vA).b[i] + (int16_t)(int8_t)(*vB).b[i];
(*vS).b[i] = altivec_signed_saturate_8(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.832:VX:av:vaddshs %VD, %VA, %VB:Vector Add Signed Half Word Saturate
int i, sat, tempsat;
- signed32 temp, a, b;
+ int32_t temp, a, b;
for (i = 0; i < 8; i++) {
- a = (signed32)(signed16)(*vA).h[i];
- b = (signed32)(signed16)(*vB).h[i];
+ a = (int32_t)(int16_t)(*vA).h[i];
+ b = (int32_t)(int16_t)(*vB).h[i];
temp = a + b;
(*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
sat |= tempsat;
0.4,6.VS,11.VA,16.VB,21.896:VX:av:vaddsws %VD, %VA, %VB:Vector Add Signed Word Saturate
int i, sat, tempsat;
- signed64 temp;
+ int64_t temp;
for (i = 0; i < 4; i++) {
- temp = (signed64)(signed32)(*vA).w[i] + (signed64)(signed32)(*vB).w[i];
+ temp = (int64_t)(int32_t)(*vA).w[i] + (int64_t)(int32_t)(*vB).w[i];
(*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.512:VX:av:vaddubs %VD, %VA, %VB:Vector Add Unsigned Byte Saturate
int i, sat, tempsat;
- signed16 temp;
+ int16_t temp;
sat = 0;
for (i = 0; i < 16; i++) {
- temp = (signed16)(unsigned8)(*vA).b[i] + (signed16)(unsigned8)(*vB).b[i];
+ temp = (int16_t)(uint8_t)(*vA).b[i] + (int16_t)(uint8_t)(*vB).b[i];
(*vS).b[i] = altivec_unsigned_saturate_8(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.576:VX:av:vadduhs %VD, %VA, %VB:Vector Add Unsigned Half Word Saturate
int i, sat, tempsat;
- signed32 temp;
+ int32_t temp;
for (i = 0; i < 8; i++) {
- temp = (signed32)(unsigned16)(*vA).h[i] + (signed32)(unsigned16)(*vB).h[i];
+ temp = (int32_t)(uint16_t)(*vA).h[i] + (int32_t)(uint16_t)(*vB).h[i];
(*vS).h[i] = altivec_unsigned_saturate_16(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.640:VX:av:vadduws %VD, %VA, %VB:Vector Add Unsigned Word Saturate
int i, sat, tempsat;
- signed64 temp;
+ int64_t temp;
for (i = 0; i < 4; i++) {
- temp = (signed64)(unsigned32)(*vA).w[i] + (signed64)(unsigned32)(*vB).w[i];
+ temp = (int64_t)(uint32_t)(*vA).w[i] + (int64_t)(uint32_t)(*vB).w[i];
(*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1282:VX:av:vavgsb %VD, %VA, %VB:Vector Average Signed Byte
int i;
- signed16 temp, a, b;
+ int16_t temp, a, b;
for (i = 0; i < 16; i++) {
- a = (signed16)(signed8)(*vA).b[i];
- b = (signed16)(signed8)(*vB).b[i];
+ a = (int16_t)(int8_t)(*vA).b[i];
+ b = (int16_t)(int8_t)(*vB).b[i];
temp = a + b + 1;
(*vS).b[i] = (temp >> 1) & 0xff;
}
0.4,6.VS,11.VA,16.VB,21.1346:VX:av:vavgsh %VD, %VA, %VB:Vector Average Signed Half Word
int i;
- signed32 temp, a, b;
+ int32_t temp, a, b;
for (i = 0; i < 8; i++) {
- a = (signed32)(signed16)(*vA).h[i];
- b = (signed32)(signed16)(*vB).h[i];
+ a = (int32_t)(int16_t)(*vA).h[i];
+ b = (int32_t)(int16_t)(*vB).h[i];
temp = a + b + 1;
(*vS).h[i] = (temp >> 1) & 0xffff;
}
0.4,6.VS,11.VA,16.VB,21.1410:VX:av:vavgsw %VD, %VA, %VB:Vector Average Signed Word
int i;
- signed64 temp, a, b;
+ int64_t temp, a, b;
for (i = 0; i < 4; i++) {
- a = (signed64)(signed32)(*vA).w[i];
- b = (signed64)(signed32)(*vB).w[i];
+ a = (int64_t)(int32_t)(*vA).w[i];
+ b = (int64_t)(int32_t)(*vB).w[i];
temp = a + b + 1;
(*vS).w[i] = (temp >> 1) & 0xffffffff;
}
0.4,6.VS,11.VA,16.VB,21.1026:VX:av:vavgub %VD, %VA, %VB:Vector Average Unsigned Byte
int i;
- unsigned16 temp, a, b;
+ uint16_t temp, a, b;
for (i = 0; i < 16; i++) {
a = (*vA).b[i];
b = (*vB).b[i];
0.4,6.VS,11.VA,16.VB,21.1090:VX:av:vavguh %VD, %VA, %VB:Vector Average Unsigned Half Word
int i;
- unsigned32 temp, a, b;
+ uint32_t temp, a, b;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
0.4,6.VS,11.VA,16.VB,21.1154:VX:av:vavguw %VD, %VA, %VB:Vector Average Unsigned Word
int i;
- unsigned64 temp, a, b;
+ uint64_t temp, a, b;
for (i = 0; i < 4; i++) {
a = (*vA).w[i];
b = (*vB).w[i];
0.4,6.VS,11.UIMM,16.VB,21.842:VX:av:vcfsx %VD, %VB, %UIMM:Vector Convert From Signed Fixed-Point Word
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu b, div, d;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&b, (*vB).w[i]);
0.4,6.VS,11.UIMM,16.VB,21.778:VX:av:vcfux %VD, %VA, %UIMM:Vector Convert From Unsigned Fixed-Point Word
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu b, d, div;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&b, (*vB).w[i]);
0.4,6.VS,11.VA,16.VB,21.RC,22.774:VXR:av:vcmpgtsbx %VD, %VA, %VB:Vector Compare Greater-Than Signed Byte
int i;
- signed8 a, b;
+ int8_t a, b;
for (i = 0; i < 16; i++) {
a = (*vA).b[i];
b = (*vB).b[i];
0.4,6.VS,11.VA,16.VB,21.RC,22.838:VXR:av:vcmpgtshx %VD, %VA, %VB:Vector Compare Greater-Than Signed Half Word
int i;
- signed16 a, b;
+ int16_t a, b;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
0.4,6.VS,11.VA,16.VB,21.RC,22.902:VXR:av:vcmpgtswx %VD, %VA, %VB:Vector Compare Greater-Than Signed Word
int i;
- signed32 a, b;
+ int32_t a, b;
for (i = 0; i < 4; i++) {
a = (*vA).w[i];
b = (*vB).w[i];
0.4,6.VS,11.VA,16.VB,21.RC,22.518:VXR:av:vcmpgtubx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Byte
int i;
- unsigned8 a, b;
+ uint8_t a, b;
for (i = 0; i < 16; i++) {
a = (*vA).b[i];
b = (*vB).b[i];
0.4,6.VS,11.VA,16.VB,21.RC,22.582:VXR:av:vcmpgtuhx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Half Word
int i;
- unsigned16 a, b;
+ uint16_t a, b;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
0.4,6.VS,11.VA,16.VB,21.RC,22.646:VXR:av:vcmpgtuwx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Word
int i;
- unsigned32 a, b;
+ uint32_t a, b;
for (i = 0; i < 4; i++) {
a = (*vA).w[i];
b = (*vB).w[i];
0.4,6.VS,11.UIMM,16.VB,21.970:VX:av:vctsxs %VD, %VB, %UIMM:Vector Convert to Signed Fixed-Point Word Saturate
int i, sat, tempsat;
- signed64 temp;
+ int64_t temp;
sim_fpu a, b, m;
sat = 0;
for (i = 0; i < 4; i++) {
0.4,6.VS,11.UIMM,16.VB,21.906:VX:av:vctuxs %VD, %VB, %UIMM:Vector Convert to Unsigned Fixed-Point Word Saturate
int i, sat, tempsat;
- signed64 temp;
+ int64_t temp;
sim_fpu a, b, m;
sat = 0;
for (i = 0; i < 4; i++) {
0.4,6.VS,11.0,16.VB,21.394:VX:av:vexptefp %VD, %VB:Vector 2 Raised to the Exponent Estimate Floating Point
int i;
- unsigned32 f;
- signed32 bi;
+ uint32_t f;
+ int32_t bi;
sim_fpu b, d;
for (i = 0; i < 4; i++) {
/*HACK!*/
0.4,6.VS,11.0,16.VB,21.458:VX:av:vlogefp %VD, %VB:Vector Log2 Estimate Floating Point
int i;
- unsigned32 c, u, f;
+ uint32_t c, u, f;
sim_fpu b, cfpu, d;
for (i = 0; i < 4; i++) {
/*HACK!*/
0.4,6.VS,11.VA,16.VB,21.VC,26.46:VAX:av:vmaddfp %VD, %VA, %VB, %VC:Vector Multiply Add Floating Point
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu a, b, c, d, e;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&a, (*vA).w[i]);
0.4,6.VS,11.VA,16.VB,21.1034:VX:av:vmaxfp %VD, %VA, %VB:Vector Maximum Floating Point
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu a, b, d;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&a, (*vA).w[i]);
0.4,6.VS,11.VA,16.VB,21.258:VX:av:vmaxsb %VD, %VA, %VB:Vector Maximum Signed Byte
int i;
- signed8 a, b;
+ int8_t a, b;
for (i = 0; i < 16; i++) {
a = (*vA).b[i];
b = (*vB).b[i];
0.4,6.VS,11.VA,16.VB,21.322:VX:av:vmaxsh %VD, %VA, %VB:Vector Maximum Signed Half Word
int i;
- signed16 a, b;
+ int16_t a, b;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
0.4,6.VS,11.VA,16.VB,21.386:VX:av:vmaxsw %VD, %VA, %VB:Vector Maximum Signed Word
int i;
- signed32 a, b;
+ int32_t a, b;
for (i = 0; i < 4; i++) {
a = (*vA).w[i];
b = (*vB).w[i];
0.4,6.VS,11.VA,16.VB,21.2:VX:av:vmaxub %VD, %VA, %VB:Vector Maximum Unsigned Byte
int i;
- unsigned8 a, b;
+ uint8_t a, b;
for (i = 0; i < 16; i++) {
a = (*vA).b[i];
b = (*vB).b[i];
0.4,6.VS,11.VA,16.VB,21.66:VX:av:vmaxus %VD, %VA, %VB:Vector Maximum Unsigned Half Word
int i;
- unsigned16 a, b;
+ uint16_t a, b;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
0.4,6.VS,11.VA,16.VB,21.130:VX:av:vmaxuw %VD, %VA, %VB:Vector Maximum Unsigned Word
int i;
- unsigned32 a, b;
+ uint32_t a, b;
for (i = 0; i < 4; i++) {
a = (*vA).w[i];
b = (*vB).w[i];
0.4,6.VS,11.VA,16.VB,21.VC,26.32:VAX:av:vmhaddshs %VD, %VA, %VB, %VC:Vector Multiple High and Add Signed Half Word Saturate
int i, sat, tempsat;
- signed16 a, b;
- signed32 prod, temp, c;
+ int16_t a, b;
+ int32_t prod, temp, c;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
- c = (signed32)(signed16)(*vC).h[i];
- prod = (signed32)a * (signed32)b;
+ c = (int32_t)(int16_t)(*vC).h[i];
+ prod = (int32_t)a * (int32_t)b;
temp = (prod >> 15) + c;
(*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
sat |= tempsat;
0.4,6.VS,11.VA,16.VB,21.VC,26.33:VAX:av:vmhraddshs %VD, %VA, %VB, %VC:Vector Multiple High Round and Add Signed Half Word Saturate
int i, sat, tempsat;
- signed16 a, b;
- signed32 prod, temp, c;
+ int16_t a, b;
+ int32_t prod, temp, c;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
- c = (signed32)(signed16)(*vC).h[i];
- prod = (signed32)a * (signed32)b;
+ c = (int32_t)(int16_t)(*vC).h[i];
+ prod = (int32_t)a * (int32_t)b;
prod += 0x4000;
temp = (prod >> 15) + c;
(*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
0.4,6.VS,11.VA,16.VB,21.1098:VX:av:vminfp %VD, %VA, %VB:Vector Minimum Floating Point
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu a, b, d;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&a, (*vA).w[i]);
0.4,6.VS,11.VA,16.VB,21.770:VX:av:vminsb %VD, %VA, %VB:Vector Minimum Signed Byte
int i;
- signed8 a, b;
+ int8_t a, b;
for (i = 0; i < 16; i++) {
a = (*vA).b[i];
b = (*vB).b[i];
0.4,6.VS,11.VA,16.VB,21.834:VX:av:vminsh %VD, %VA, %VB:Vector Minimum Signed Half Word
int i;
- signed16 a, b;
+ int16_t a, b;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
0.4,6.VS,11.VA,16.VB,21.898:VX:av:vminsw %VD, %VA, %VB:Vector Minimum Signed Word
int i;
- signed32 a, b;
+ int32_t a, b;
for (i = 0; i < 4; i++) {
a = (*vA).w[i];
b = (*vB).w[i];
0.4,6.VS,11.VA,16.VB,21.514:VX:av:vminub %VD, %VA, %VB:Vector Minimum Unsigned Byte
int i;
- unsigned8 a, b;
+ uint8_t a, b;
for (i = 0; i < 16; i++) {
a = (*vA).b[i];
b = (*vB).b[i];
0.4,6.VS,11.VA,16.VB,21.578:VX:av:vminuh %VD, %VA, %VB:Vector Minimum Unsigned Half Word
int i;
- unsigned16 a, b;
+ uint16_t a, b;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
0.4,6.VS,11.VA,16.VB,21.642:VX:av:vminuw %VD, %VA, %VB:Vector Minimum Unsigned Word
int i;
- unsigned32 a, b;
+ uint32_t a, b;
for (i = 0; i < 4; i++) {
a = (*vA).w[i];
b = (*vB).w[i];
0.4,6.VS,11.VA,16.VB,21.VC,26.34:VAX:av:vmladduhm %VD, %VA, %VB, %VC:Vector Multiply Low and Add Unsigned Half Word Modulo
int i;
- unsigned16 a, b, c;
- unsigned32 prod;
+ uint16_t a, b, c;
+ uint32_t prod;
for (i = 0; i < 8; i++) {
a = (*vA).h[i];
b = (*vB).h[i];
c = (*vC).h[i];
- prod = (unsigned32)a * (unsigned32)b;
+ prod = (uint32_t)a * (uint32_t)b;
(*vS).h[i] = (prod + c) & 0xffff;
}
PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
0.4,6.VS,11.VA,16.VB,21.VC,26.37:VAX:av:vmsummbm %VD, %VA, %VB, %VC:Vector Multiply Sum Mixed-Sign Byte Modulo
int i, j;
- signed32 temp;
- signed16 prod, a;
- unsigned16 b;
+ int32_t temp;
+ int16_t prod, a;
+ uint16_t b;
for (i = 0; i < 4; i++) {
temp = (*vC).w[i];
for (j = 0; j < 4; j++) {
- a = (signed16)(signed8)(*vA).b[i*4+j];
+ a = (int16_t)(int8_t)(*vA).b[i*4+j];
b = (*vB).b[i*4+j];
prod = a * b;
- temp += (signed32)prod;
+ temp += (int32_t)prod;
}
(*vS).w[i] = temp;
}
0.4,6.VS,11.VA,16.VB,21.VC,26.40:VAX:av:vmsumshm %VD, %VA, %VB, %VC:Vector Multiply Sum Signed Half Word Modulo
int i, j;
- signed32 temp, prod, a, b;
+ int32_t temp, prod, a, b;
for (i = 0; i < 4; i++) {
temp = (*vC).w[i];
for (j = 0; j < 2; j++) {
- a = (signed32)(signed16)(*vA).h[i*2+j];
- b = (signed32)(signed16)(*vB).h[i*2+j];
+ a = (int32_t)(int16_t)(*vA).h[i*2+j];
+ b = (int32_t)(int16_t)(*vB).h[i*2+j];
prod = a * b;
temp += prod;
}
0.4,6.VS,11.VA,16.VB,21.VC,26.41:VAX:av:vmsumshs %VD, %VA, %VB, %VC:Vector Multiply Sum Signed Half Word Saturate
int i, j, sat, tempsat;
- signed64 temp;
- signed32 prod, a, b;
+ int64_t temp;
+ int32_t prod, a, b;
sat = 0;
for (i = 0; i < 4; i++) {
- temp = (signed64)(signed32)(*vC).w[i];
+ temp = (int64_t)(int32_t)(*vC).w[i];
for (j = 0; j < 2; j++) {
- a = (signed32)(signed16)(*vA).h[i*2+j];
- b = (signed32)(signed16)(*vB).h[i*2+j];
+ a = (int32_t)(int16_t)(*vA).h[i*2+j];
+ b = (int32_t)(int16_t)(*vB).h[i*2+j];
prod = a * b;
- temp += (signed64)prod;
+ temp += (int64_t)prod;
}
(*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
sat |= tempsat;
0.4,6.VS,11.VA,16.VB,21.VC,26.36:VAX:av:vmsumubm %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Byte Modulo
int i, j;
- unsigned32 temp;
- unsigned16 prod, a, b;
+ uint32_t temp;
+ uint16_t prod, a, b;
for (i = 0; i < 4; i++) {
temp = (*vC).w[i];
for (j = 0; j < 4; j++) {
0.4,6.VS,11.VA,16.VB,21.VC,26.38:VAX:av:vmsumuhm %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Half Word Modulo
int i, j;
- unsigned32 temp, prod, a, b;
+ uint32_t temp, prod, a, b;
for (i = 0; i < 4; i++) {
temp = (*vC).w[i];
for (j = 0; j < 2; j++) {
0.4,6.VS,11.VA,16.VB,21.VC,26.39:VAX:av:vmsumuhs %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Half Word Saturate
int i, j, sat, tempsat;
- unsigned32 temp, prod, a, b;
+ uint32_t temp, prod, a, b;
sat = 0;
for (i = 0; i < 4; i++) {
temp = (*vC).w[i];
0.4,6.VS,11.VA,16.VB,21.776:VX:av:vmulesb %VD, %VA, %VB:Vector Multiply Even Signed Byte
int i;
- signed8 a, b;
- signed16 prod;
+ int8_t a, b;
+ int16_t prod;
for (i = 0; i < 8; i++) {
a = (*vA).b[AV_BINDEX(i*2)];
b = (*vB).b[AV_BINDEX(i*2)];
0.4,6.VS,11.VA,16.VB,21.840:VX:av:vmulesh %VD, %VA, %VB:Vector Multiply Even Signed Half Word
int i;
- signed16 a, b;
- signed32 prod;
+ int16_t a, b;
+ int32_t prod;
for (i = 0; i < 4; i++) {
a = (*vA).h[AV_HINDEX(i*2)];
b = (*vB).h[AV_HINDEX(i*2)];
0.4,6.VS,11.VA,16.VB,21.520:VX:av:vmuleub %VD, %VA, %VB:Vector Multiply Even Unsigned Byte
int i;
- unsigned8 a, b;
- unsigned16 prod;
+ uint8_t a, b;
+ uint16_t prod;
for (i = 0; i < 8; i++) {
a = (*vA).b[AV_BINDEX(i*2)];
b = (*vB).b[AV_BINDEX(i*2)];
0.4,6.VS,11.VA,16.VB,21.584:VX:av:vmuleuh %VD, %VA, %VB:Vector Multiply Even Unsigned Half Word
int i;
- unsigned16 a, b;
- unsigned32 prod;
+ uint16_t a, b;
+ uint32_t prod;
for (i = 0; i < 4; i++) {
a = (*vA).h[AV_HINDEX(i*2)];
b = (*vB).h[AV_HINDEX(i*2)];
0.4,6.VS,11.VA,16.VB,21.264:VX:av:vmulosb %VD, %VA, %VB:Vector Multiply Odd Signed Byte
int i;
- signed8 a, b;
- signed16 prod;
+ int8_t a, b;
+ int16_t prod;
for (i = 0; i < 8; i++) {
a = (*vA).b[AV_BINDEX((i*2)+1)];
b = (*vB).b[AV_BINDEX((i*2)+1)];
0.4,6.VS,11.VA,16.VB,21.328:VX:av:vmulosh %VD, %VA, %VB:Vector Multiply Odd Signed Half Word
int i;
- signed16 a, b;
- signed32 prod;
+ int16_t a, b;
+ int32_t prod;
for (i = 0; i < 4; i++) {
a = (*vA).h[AV_HINDEX((i*2)+1)];
b = (*vB).h[AV_HINDEX((i*2)+1)];
0.4,6.VS,11.VA,16.VB,21.8:VX:av:vmuloub %VD, %VA, %VB:Vector Multiply Odd Unsigned Byte
int i;
- unsigned8 a, b;
- unsigned16 prod;
+ uint8_t a, b;
+ uint16_t prod;
for (i = 0; i < 8; i++) {
a = (*vA).b[AV_BINDEX((i*2)+1)];
b = (*vB).b[AV_BINDEX((i*2)+1)];
0.4,6.VS,11.VA,16.VB,21.72:VX:av:vmulouh %VD, %VA, %VB:Vector Multiply Odd Unsigned Half Word
int i;
- unsigned16 a, b;
- unsigned32 prod;
+ uint16_t a, b;
+ uint32_t prod;
for (i = 0; i < 4; i++) {
a = (*vA).h[AV_HINDEX((i*2)+1)];
b = (*vB).h[AV_HINDEX((i*2)+1)];
0.4,6.VS,11.VA,16.VB,21.VC,26.47:VX:av:vnmsubfp %VD, %VA, %VB, %VC:Vector Negative Multiply-Subtract Floating Point
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu a, b, c, d, i1, i2;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&a, (*vA).w[i]);
0.4,6.VS,11.VA,16.VB,21.398:VX:av:vpkshss %VD, %VA, %VB:Vector Pack Signed Half Word Signed Saturate
int i, sat, tempsat;
- signed16 temp;
+ int16_t temp;
sat = 0;
for (i = 0; i < 16; i++) {
if (i < 8)
0.4,6.VS,11.VA,16.VB,21.270:VX:av:vpkshus %VD, %VA, %VB:Vector Pack Signed Half Word Unsigned Saturate
int i, sat, tempsat;
- signed16 temp;
+ int16_t temp;
sat = 0;
for (i = 0; i < 16; i++) {
if (i < 8)
0.4,6.VS,11.VA,16.VB,21.462:VX:av:vpkswss %VD, %VA, %VB:Vector Pack Signed Word Signed Saturate
int i, sat, tempsat;
- signed32 temp;
+ int32_t temp;
sat = 0;
for (i = 0; i < 8; i++) {
if (i < 4)
0.4,6.VS,11.VA,16.VB,21.334:VX:av:vpkswus %VD, %VA, %VB:Vector Pack Signed Word Unsigned Saturate
int i, sat, tempsat;
- signed32 temp;
+ int32_t temp;
sat = 0;
for (i = 0; i < 8; i++) {
if (i < 4)
0.4,6.VS,11.VA,16.VB,21.142:VX:av:vpkuhus %VD, %VA, %VB:Vector Pack Unsigned Half Word Unsigned Saturate
int i, sat, tempsat;
- signed16 temp;
+ int16_t temp;
sat = 0;
for (i = 0; i < 16; i++) {
if (i < 8)
temp = (*vA).h[AV_HINDEX(i)];
else
temp = (*vB).h[AV_HINDEX(i-8)];
- /* force positive in signed16, ok as we'll toss the bit away anyway */
+ /* force positive in int16_t, ok as we'll toss the bit away anyway */
temp &= ~0x8000;
(*vS).b[AV_BINDEX(i)] = altivec_unsigned_saturate_8(temp, &tempsat);
sat |= tempsat;
0.4,6.VS,11.VA,16.VB,21.206:VX:av:vpkuwus %VD, %VA, %VB:Vector Pack Unsigned Word Unsigned Saturate
int i, sat, tempsat;
- signed32 temp;
+ int32_t temp;
sat = 0;
for (i = 0; i < 8; i++) {
if (i < 4)
temp = (*vA).w[i];
else
temp = (*vB).w[i-4];
- /* force positive in signed32, ok as we'll toss the bit away anyway */
+ /* force positive in int32_t, ok as we'll toss the bit away anyway */
temp &= ~0x80000000;
(*vS).h[AV_HINDEX(i)] = altivec_unsigned_saturate_16(temp, &tempsat);
sat |= tempsat;
0.4,6.VS,11.0,16.VB,21.266:VX:av:vrefp %VD, %VB:Vector Reciprocal Estimate Floating Point
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu op, d;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&op, (*vB).w[i]);
0.4,6.VS,11.0,16.VB,21.330:VX:av:vrsqrtefp %VD, %VB:Vector Reciprocal Square Root Estimate Floating Point
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu op, i1, one, d;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&op, (*vB).w[i]);
0.4,6.VS,11.0,16.VB,21.714:VX:av:vrfim %VD, %VB:Vector Round to Floating-Point Integer towards Minus Infinity
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu op;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&op, (*vB).w[i]);
0.4,6.VS,11.0,16.VB,21.522:VX:av:vrfin %VD, %VB:Vector Round to Floating-Point Integer Nearest
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu op;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&op, (*vB).w[i]);
0.4,6.VS,11.0,16.VB,21.650:VX:av:vrfip %VD, %VB:Vector Round to Floating-Point Integer towards Plus Infinity
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu op;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&op, (*vB).w[i]);
0.4,6.VS,11.0,16.VB,21.586:VX:av:vrfiz %VD, %VB:Vector Round to Floating-Point Integer towards Zero
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu op;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&op, (*vB).w[i]);
0.4,6.VS,11.VA,16.VB,21.4:VX:av:vrlb %VD, %VA, %VB:Vector Rotate Left Integer Byte
int i;
- unsigned16 temp;
+ uint16_t temp;
for (i = 0; i < 16; i++) {
- temp = (unsigned16)(*vA).b[i] << (((*vB).b[i]) & 7);
+ temp = (uint16_t)(*vA).b[i] << (((*vB).b[i]) & 7);
(*vS).b[i] = (temp & 0xff) | ((temp >> 8) & 0xff);
}
PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
0.4,6.VS,11.VA,16.VB,21.68:VX:av:vrlh %VD, %VA, %VB:Vector Rotate Left Integer Half Word
int i;
- unsigned32 temp;
+ uint32_t temp;
for (i = 0; i < 8; i++) {
- temp = (unsigned32)(*vA).h[i] << (((*vB).h[i]) & 0xf);
+ temp = (uint32_t)(*vA).h[i] << (((*vB).h[i]) & 0xf);
(*vS).h[i] = (temp & 0xffff) | ((temp >> 16) & 0xffff);
}
PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
0.4,6.VS,11.VA,16.VB,21.132:VX:av:vrlw %VD, %VA, %VB:Vector Rotate Left Integer Word
int i;
- unsigned64 temp;
+ uint64_t temp;
for (i = 0; i < 4; i++) {
- temp = (unsigned64)(*vA).w[i] << (((*vB).w[i]) & 0x1f);
+ temp = (uint64_t)(*vA).w[i] << (((*vB).w[i]) & 0x1f);
(*vS).w[i] = (temp & 0xffffffff) | ((temp >> 32) & 0xffffffff);
}
PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
0.4,6.VS,11.VA,16.VB,21.VC,26.42:VAX:av:vsel %VD, %VA, %VB, %VC:Vector Conditional Select
int i;
- unsigned32 c;
+ uint32_t c;
for (i = 0; i < 4; i++) {
c = (*vC).w[i];
(*vS).w[i] = ((*vB).w[i] & c) | ((*vA).w[i] & ~c);
0.4,6.VS,11.UIMM,16.VB,21.524:VX:av:vspltb %VD, %VB, %UIMM:Vector Splat Byte
int i;
- unsigned8 b;
+ uint8_t b;
b = (*vB).b[AV_BINDEX(UIMM & 0xf)];
for (i = 0; i < 16; i++)
(*vS).b[i] = b;
0.4,6.VS,11.UIMM,16.VB,21.588:VX:av:vsplth %VD, %VB, %UIMM:Vector Splat Half Word
int i;
- unsigned16 h;
+ uint16_t h;
h = (*vB).h[AV_HINDEX(UIMM & 0x7)];
for (i = 0; i < 8; i++)
(*vS).h[i] = h;
0.4,6.VS,11.SIMM,16.0,21.780:VX:av:vspltisb %VD, %SIMM:Vector Splat Immediate Signed Byte
int i;
- signed8 b = SIMM;
+ int8_t b = SIMM;
/* manual 5-bit signed extension */
if (b & 0x10)
b -= 0x20;
0.4,6.VS,11.SIMM,16.0,21.844:VX:av:vspltish %VD, %SIMM:Vector Splat Immediate Signed Half Word
int i;
- signed16 h = SIMM;
+ int16_t h = SIMM;
/* manual 5-bit signed extension */
if (h & 0x10)
h -= 0x20;
0.4,6.VS,11.SIMM,16.0,21.908:VX:av:vspltisw %VD, %SIMM:Vector Splat Immediate Signed Word
int i;
- signed32 w = SIMM;
+ int32_t w = SIMM;
/* manual 5-bit signed extension */
if (w & 0x10)
w -= 0x20;
0.4,6.VS,11.UIMM,16.VB,21.652:VX:av:vspltw %VD, %VB, %UIMM:Vector Splat Word
int i;
- unsigned32 w;
+ uint32_t w;
w = (*vB).w[UIMM & 0x3];
for (i = 0; i < 4; i++)
(*vS).w[i] = w;
0.4,6.VS,11.VA,16.VB,21.772:VX:av:vsrab %VD, %VA, %VB:Vector Shift Right Algebraic Byte
int i, sh;
- signed16 a;
+ int16_t a;
for (i = 0; i < 16; i++) {
sh = ((*vB).b[i]) & 7;
- a = (signed16)(signed8)(*vA).b[i];
+ a = (int16_t)(int8_t)(*vA).b[i];
(*vS).b[i] = (a >> sh) & 0xff;
}
PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
0.4,6.VS,11.VA,16.VB,21.836:VX:av:vsrah %VD, %VA, %VB:Vector Shift Right Algebraic Half Word
int i, sh;
- signed32 a;
+ int32_t a;
for (i = 0; i < 8; i++) {
sh = ((*vB).h[i]) & 0xf;
- a = (signed32)(signed16)(*vA).h[i];
+ a = (int32_t)(int16_t)(*vA).h[i];
(*vS).h[i] = (a >> sh) & 0xffff;
}
PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
0.4,6.VS,11.VA,16.VB,21.900:VX:av:vsraw %VD, %VA, %VB:Vector Shift Right Algebraic Word
int i, sh;
- signed64 a;
+ int64_t a;
for (i = 0; i < 4; i++) {
sh = ((*vB).w[i]) & 0xf;
- a = (signed64)(signed32)(*vA).w[i];
+ a = (int64_t)(int32_t)(*vA).w[i];
(*vS).w[i] = (a >> sh) & 0xffffffff;
}
PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
0.4,6.VS,11.VA,16.VB,21.1408:VX:av:vsubcuw %VD, %VA, %VB:Vector Subtract Carryout Unsigned Word
int i;
- signed64 temp, a, b;
+ int64_t temp, a, b;
for (i = 0; i < 4; i++) {
- a = (signed64)(unsigned32)(*vA).w[i];
- b = (signed64)(unsigned32)(*vB).w[i];
+ a = (int64_t)(uint32_t)(*vA).w[i];
+ b = (int64_t)(uint32_t)(*vB).w[i];
temp = a - b;
(*vS).w[i] = ~(temp >> 32) & 1;
}
0.4,6.VS,11.VA,16.VB,21.74:VX:av:vsubfp %VD, %VA, %VB:Vector Subtract Floating Point
int i;
- unsigned32 f;
+ uint32_t f;
sim_fpu a, b, d;
for (i = 0; i < 4; i++) {
sim_fpu_32to (&a, (*vA).w[i]);
0.4,6.VS,11.VA,16.VB,21.1792:VX:av:vsubsbs %VD, %VA, %VB:Vector Subtract Signed Byte Saturate
int i, sat, tempsat;
- signed16 temp;
+ int16_t temp;
sat = 0;
for (i = 0; i < 16; i++) {
- temp = (signed16)(signed8)(*vA).b[i] - (signed16)(signed8)(*vB).b[i];
+ temp = (int16_t)(int8_t)(*vA).b[i] - (int16_t)(int8_t)(*vB).b[i];
(*vS).b[i] = altivec_signed_saturate_8(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1856:VX:av:vsubshs %VD, %VA, %VB:Vector Subtract Signed Half Word Saturate
int i, sat, tempsat;
- signed32 temp;
+ int32_t temp;
sat = 0;
for (i = 0; i < 8; i++) {
- temp = (signed32)(signed16)(*vA).h[i] - (signed32)(signed16)(*vB).h[i];
+ temp = (int32_t)(int16_t)(*vA).h[i] - (int32_t)(int16_t)(*vB).h[i];
(*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1920:VX:av:vsubsws %VD, %VA, %VB:Vector Subtract Signed Word Saturate
int i, sat, tempsat;
- signed64 temp;
+ int64_t temp;
sat = 0;
for (i = 0; i < 4; i++) {
- temp = (signed64)(signed32)(*vA).w[i] - (signed64)(signed32)(*vB).w[i];
+ temp = (int64_t)(int32_t)(*vA).w[i] - (int64_t)(int32_t)(*vB).w[i];
(*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1536:VX:av:vsububs %VD, %VA, %VB:Vector Subtract Unsigned Byte Saturate
int i, sat, tempsat;
- signed16 temp;
+ int16_t temp;
sat = 0;
for (i = 0; i < 16; i++) {
- temp = (signed16)(unsigned8)(*vA).b[i] - (signed16)(unsigned8)(*vB).b[i];
+ temp = (int16_t)(uint8_t)(*vA).b[i] - (int16_t)(uint8_t)(*vB).b[i];
(*vS).b[i] = altivec_unsigned_saturate_8(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1600:VX:av:vsubuhs %VD, %VA, %VB:Vector Subtract Unsigned Half Word Saturate
int i, sat, tempsat;
- signed32 temp;
+ int32_t temp;
for (i = 0; i < 8; i++) {
- temp = (signed32)(unsigned16)(*vA).h[i] - (signed32)(unsigned16)(*vB).h[i];
+ temp = (int32_t)(uint16_t)(*vA).h[i] - (int32_t)(uint16_t)(*vB).h[i];
(*vS).h[i] = altivec_unsigned_saturate_16(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1664:VX:av:vsubuws %VD, %VA, %VB:Vector Subtract Unsigned Word Saturate
int i, sat, tempsat;
- signed64 temp;
+ int64_t temp;
for (i = 0; i < 4; i++) {
- temp = (signed64)(unsigned32)(*vA).w[i] - (signed64)(unsigned32)(*vB).w[i];
+ temp = (int64_t)(uint32_t)(*vA).w[i] - (int64_t)(uint32_t)(*vB).w[i];
(*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1928:VX:av:vsumsws %VD, %VA, %VB:Vector Sum Across Signed Word Saturate
int i, sat;
- signed64 temp;
- temp = (signed64)(signed32)(*vB).w[3];
+ int64_t temp;
+ temp = (int64_t)(int32_t)(*vB).w[3];
for (i = 0; i < 4; i++)
- temp += (signed64)(signed32)(*vA).w[i];
+ temp += (int64_t)(int32_t)(*vA).w[i];
(*vS).w[3] = altivec_signed_saturate_32(temp, &sat);
(*vS).w[0] = (*vS).w[1] = (*vS).w[2] = 0;
ALTIVEC_SET_SAT(sat);
0.4,6.VS,11.VA,16.VB,21.1672:VX:av:vsum2sws %VD, %VA, %VB:Vector Sum Across Partial (1/2) Signed Word Saturate
int i, j, sat, tempsat;
- signed64 temp;
+ int64_t temp;
for (j = 0; j < 4; j += 2) {
- temp = (signed64)(signed32)(*vB).w[j+1];
- temp += (signed64)(signed32)(*vA).w[j] + (signed64)(signed32)(*vA).w[j+1];
+ temp = (int64_t)(int32_t)(*vB).w[j+1];
+ temp += (int64_t)(int32_t)(*vA).w[j] + (int64_t)(int32_t)(*vA).w[j+1];
(*vS).w[j+1] = altivec_signed_saturate_32(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1800:VX:av:vsum4sbs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Signed Byte Saturate
int i, j, sat, tempsat;
- signed64 temp;
+ int64_t temp;
for (j = 0; j < 4; j++) {
- temp = (signed64)(signed32)(*vB).w[j];
+ temp = (int64_t)(int32_t)(*vB).w[j];
for (i = 0; i < 4; i++)
- temp += (signed64)(signed8)(*vA).b[i+(j*4)];
+ temp += (int64_t)(int8_t)(*vA).b[i+(j*4)];
(*vS).w[j] = altivec_signed_saturate_32(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1608:VX:av:vsum4shs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Signed Half Word Saturate
int i, j, sat, tempsat;
- signed64 temp;
+ int64_t temp;
for (j = 0; j < 4; j++) {
- temp = (signed64)(signed32)(*vB).w[j];
+ temp = (int64_t)(int32_t)(*vB).w[j];
for (i = 0; i < 2; i++)
- temp += (signed64)(signed16)(*vA).h[i+(j*2)];
+ temp += (int64_t)(int16_t)(*vA).h[i+(j*2)];
(*vS).w[j] = altivec_signed_saturate_32(temp, &tempsat);
sat |= tempsat;
}
0.4,6.VS,11.VA,16.VB,21.1544:VX:av:vsum4ubs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Unsigned Byte Saturate
int i, j, sat, tempsat;
- signed64 utemp;
- signed64 temp;
+ int64_t utemp;
+ int64_t temp;
for (j = 0; j < 4; j++) {
- utemp = (signed64)(unsigned32)(*vB).w[j];
+ utemp = (int64_t)(uint32_t)(*vB).w[j];
for (i = 0; i < 4; i++)
- utemp += (signed64)(unsigned16)(*vA).b[i+(j*4)];
+ utemp += (int64_t)(uint16_t)(*vA).b[i+(j*4)];
temp = utemp;
(*vS).w[j] = altivec_unsigned_saturate_32(temp, &tempsat);
sat |= tempsat;
0.4,6.VS,11.0,16.VB,21.846:VX:av:vupkhpx %VD, %VB:Vector Unpack High Pixel16
int i;
- unsigned16 h;
+ uint16_t h;
for (i = 0; i < 4; i++) {
h = (*vB).h[AV_HINDEX(i)];
(*vS).w[i] = ((h & 0x8000) ? 0xff000000 : 0)
0.4,6.VS,11.0,16.VB,21.526:VX:av:vupkhsb %VD, %VB:Vector Unpack High Signed Byte
int i;
for (i = 0; i < 8; i++)
- (*vS).h[AV_HINDEX(i)] = (signed16)(signed8)(*vB).b[AV_BINDEX(i)];
+ (*vS).h[AV_HINDEX(i)] = (int16_t)(int8_t)(*vB).b[AV_BINDEX(i)];
PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
0.4,6.VS,11.0,16.VB,21.590:VX:av:vupkhsh %VD, %VB:Vector Unpack High Signed Half Word
int i;
for (i = 0; i < 4; i++)
- (*vS).w[i] = (signed32)(signed16)(*vB).h[AV_HINDEX(i)];
+ (*vS).w[i] = (int32_t)(int16_t)(*vB).h[AV_HINDEX(i)];
PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
0.4,6.VS,11.0,16.VB,21.974:VX:av:vupklpx %VD, %VB:Vector Unpack Low Pixel16
int i;
- unsigned16 h;
+ uint16_t h;
for (i = 0; i < 4; i++) {
h = (*vB).h[AV_HINDEX(i + 4)];
(*vS).w[i] = ((h & 0x8000) ? 0xff000000 : 0)
0.4,6.VS,11.0,16.VB,21.654:VX:av:vupklsb %VD, %VB:Vector Unpack Low Signed Byte
int i;
for (i = 0; i < 8; i++)
- (*vS).h[AV_HINDEX(i)] = (signed16)(signed8)(*vB).b[AV_BINDEX(i + 8)];
+ (*vS).h[AV_HINDEX(i)] = (int16_t)(int8_t)(*vB).b[AV_BINDEX(i + 8)];
PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
0.4,6.VS,11.0,16.VB,21.718:VX:av:vupklsh %VD, %VB:Vector Unpack Low Signed Half Word
int i;
for (i = 0; i < 4; i++)
- (*vS).w[i] = (signed32)(signed16)(*vB).h[AV_HINDEX(i + 4)];
+ (*vS).w[i] = (int32_t)(int16_t)(*vB).h[AV_HINDEX(i + 4)];
PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
} while (0)
# Schedule an instruction that takes 2 integer register and produces a special purpose output register plus an integer output register
-void::model-function::ppc_insn_int_spr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned nSPR
- const unsigned32 int_mask = out_mask | in_mask;
+void::model-function::ppc_insn_int_spr:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask, const unsigned nSPR
+ const uint32_t int_mask = out_mask | in_mask;
model_busy *busy_ptr;
while ((model_ptr->int_busy & int_mask) != 0 || model_ptr->spr_busy[nSPR] != 0) {
#
# SPE Modulo Fractional Multiplication handling support
#
-:function:e500::unsigned64:ev_multiply16_smf:signed16 a, signed16 b, int *sat
- signed32 a32 = a, b32 = b, rv32;
+:function:e500::uint64_t:ev_multiply16_smf:int16_t a, int16_t b, int *sat
+ int32_t a32 = a, b32 = b, rv32;
rv32 = a * b;
*sat = (rv32 & (3<<30)) == (3<<30);
- return (signed64)rv32 << 1;
+ return (int64_t)rv32 << 1;
-:function:e500::unsigned64:ev_multiply32_smf:signed32 a, signed32 b, int *sat
- signed64 rv64, a64 = a, b64 = b;
+:function:e500::uint64_t:ev_multiply32_smf:int32_t a, int32_t b, int *sat
+ int64_t rv64, a64 = a, b64 = b;
rv64 = a64 * b64;
- *sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
+ *sat = (rv64 & ((int64_t)3<<62)) == ((int64_t)3<<62);
/* Loses top sign bit. */
return rv64 << 1;
#
# SPE Saturation handling support
#
-:function:e500::signed32:ev_multiply16_ssf:signed16 a, signed16 b, int *sat
- signed32 rv32;
+:function:e500::int32_t:ev_multiply16_ssf:int16_t a, int16_t b, int *sat
+ int32_t rv32;
if (a == 0xffff8000 && b == 0xffff8000)
{
rv32 = 0x7fffffffL;
}
else
{
- signed32 a32 = a, b32 = b;
+ int32_t a32 = a, b32 = b;
rv32 = a * b;
* sat = (rv32 & (3<<30)) == (3<<30);
- return (signed64)rv32 << 1;
+ return (int64_t)rv32 << 1;
}
-:function:e500::signed64:ev_multiply32_ssf:signed32 a, signed32 b, int *sat
- signed64 rv64;
+:function:e500::int64_t:ev_multiply32_ssf:int32_t a, int32_t b, int *sat
+ int64_t rv64;
if (a == 0x80000000 && b == 0x80000000)
{
rv64 = 0x7fffffffffffffffLL;
}
else
{
- signed64 a64 = a, b64 = b;
+ int64_t a64 = a, b64 = b;
rv64 = a64 * b64;
- *sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
+ *sat = (rv64 & ((int64_t)3<<62)) == ((int64_t)3<<62);
/* Loses top sign bit. */
return rv64 << 1;
}
#
:function:e500::void:ev_check_guard:sim_fpu *a, int fg, int fx, cpu *processor
- unsigned64 guard;
+ uint64_t guard;
guard = sim_fpu_guard(a, 0);
if (guard & 1)
EV_SET_SPEFSCR_BITS(fg);
if (guard & ~1)
EV_SET_SPEFSCR_BITS(fx);
-:function:e500::void:booke_sim_fpu_32to:sim_fpu *dst, unsigned32 packed
+:function:e500::void:booke_sim_fpu_32to:sim_fpu *dst, uint32_t packed
sim_fpu_32to (dst, packed);
/* Set normally unused fields to allow booke arithmetic. */
if (dst->class == sim_fpu_class_infinity)
{
dst->normal_exp = 128;
- dst->fraction = ((unsigned64)1 << 60);
+ dst->fraction = ((uint64_t)1 << 60);
}
else if (dst->class == sim_fpu_class_qnan
|| dst->class == sim_fpu_class_snan)
dst->normal_exp = 128;
/* This is set, but without the implicit bit, so we have to or
in the implicit bit. */
- dst->fraction |= ((unsigned64)1 << 60);
+ dst->fraction |= ((uint64_t)1 << 60);
}
:function:e500::int:booke_sim_fpu_add:sim_fpu *d, sim_fpu *a, sim_fpu *b, int inv, int over, int under, cpu *processor
return invalid_operand || overflow_result || underflow_result;
-:function:e500::unsigned32:ev_fs_add:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
+:function:e500::uint32_t:ev_fs_add:uint32_t aa, uint32_t bb, int inv, int over, int under, int fg, int fx, cpu *processor
sim_fpu a, b, d;
- unsigned32 w;
+ uint32_t w;
int exception;
booke_sim_fpu_32to (&a, aa);
ev_check_guard(&d, fg, fx, processor);
return w;
-:function:e500::unsigned32:ev_fs_sub:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
+:function:e500::uint32_t:ev_fs_sub:uint32_t aa, uint32_t bb, int inv, int over, int under, int fg, int fx, cpu *processor
sim_fpu a, b, d;
- unsigned32 w;
+ uint32_t w;
int exception;
booke_sim_fpu_32to (&a, aa);
# sim_fpu_exp leaves the normal_exp field undefined for Inf and NaN.
# The booke algorithms require exp values, so we fake them here.
# fixme: It also apparently does the same for zero, but should not.
-:function:e500::unsigned32:booke_sim_fpu_exp:sim_fpu *x
+:function:e500::uint32_t:booke_sim_fpu_exp:sim_fpu *x
int y = sim_fpu_is (x);
if (y == SIM_FPU_IS_PZERO || y == SIM_FPU_IS_NZERO)
return 0;
else
return sim_fpu_exp (x);
-:function:e500::unsigned32:ev_fs_mul:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
+:function:e500::uint32_t:ev_fs_mul:uint32_t aa, uint32_t bb, int inv, int over, int under, int fg, int fx, cpu *processor
sim_fpu a, b, d;
- unsigned32 w;
+ uint32_t w;
int sa, sb, ea, eb, ei;
sim_fpu_32to (&a, aa);
sim_fpu_32to (&b, bb);
}
return w;
-:function:e500::unsigned32:ev_fs_div:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int dbz, int fg, int fx, cpu *processor
+:function:e500::uint32_t:ev_fs_div:uint32_t aa, uint32_t bb, int inv, int over, int under, int dbz, int fg, int fx, cpu *processor
sim_fpu a, b, d;
- unsigned32 w;
+ uint32_t w;
int sa, sb, ea, eb, ei;
sim_fpu_32to (&a, aa);
#
0.4,6.RS,11.RA,16.RB,21.512:X:e500:evaddw %RS,%RA,%RB:Vector Add Word
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rBh + *rAh;
w2 = *rB + *rA;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.IMM,16.RB,21.514:X:e500:evaddiw %RS,%RB,%IMM:Vector Add Immediate Word
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rBh + IMM;
w2 = *rB + IMM;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.516:X:e500:evsubfw %RS,%RA,%RB:Vector Subtract from Word
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rBh - *rAh;
w2 = *rB - *rA;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.IMM,16.RB,21.518:X:e500:evsubifw %RS,%RB,%IMM:Vector Subtract Immediate from Word
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rBh - IMM;
w2 = *rB - IMM;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.520:X:e500:evabs %RS,%RA:Vector Absolute Value
- signed32 w1, w2;
+ int32_t w1, w2;
w1 = *rAh;
if (w1 < 0 && w1 != 0x80000000)
w1 = -w1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.521:X:e500:evneg %RS,%RA:Vector Negate
- signed32 w1, w2;
+ int32_t w1, w2;
w1 = *rAh;
/* the negative most negative number is the most negative number */
if (w1 != 0x80000000)
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.522:X:e500:evextsb %RS,%RA:Vector Extend Signed Byte
- unsigned64 w1, w2;
+ uint64_t w1, w2;
w1 = *rAh & 0xff;
if (w1 & 0x80)
w1 |= 0xffffff00;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK , 0);
0.4,6.RS,11.RA,16.0,21.523:X:e500:evextsb %RS,%RA:Vector Extend Signed Half Word
- unsigned64 w1, w2;
+ uint64_t w1, w2;
w1 = *rAh & 0xffff;
if (w1 & 0x8000)
w1 |= 0xffff0000;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.529:X:e500:evand %RS,%RA,%RB:Vector AND
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rBh & *rAh;
w2 = *rB & *rA;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.535:X:e500:evor %RS,%RA,%RB:Vector OR
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rBh | *rAh;
w2 = *rB | *rA;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.534:X:e500:evxor %RS,%RA,%RB:Vector XOR
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rBh ^ *rAh;
w2 = *rB ^ *rA;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.542:X:e500:evnand %RS,%RA,%RB:Vector NAND
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = ~(*rBh & *rAh);
w2 = ~(*rB & *rA);
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.536:X:e500:evnor %RS,%RA,%RB:Vector NOR
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = ~(*rBh | *rAh);
w2 = ~(*rB | *rA);
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.537:X:e500:eveqv %RS,%RA,%RB:Vector Equivalent
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = (~*rBh) ^ *rAh;
w2 = (~*rB) ^ *rA;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.530:X:e500:evandc %RS,%RA,%RB:Vector AND with Compliment
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = (~*rBh) & *rAh;
w2 = (~*rB) & *rA;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.539:X:e500:evorc %RS,%RA,%RB:Vector OR with Compliment
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = (~*rBh) | *rAh;
w2 = (~*rB) | *rA;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.552:X:e500:evrlw %RS,%RA,%RB:Vector Rotate Left Word
- unsigned32 nh, nl, w1, w2;
+ uint32_t nh, nl, w1, w2;
nh = *rBh & 0x1f;
nl = *rB & 0x1f;
- w1 = ((unsigned32)*rAh) << nh | ((unsigned32)*rAh) >> (32 - nh);
- w2 = ((unsigned32)*rA) << nl | ((unsigned32)*rA) >> (32 - nl);
+ w1 = ((uint32_t)*rAh) << nh | ((uint32_t)*rAh) >> (32 - nh);
+ w2 = ((uint32_t)*rA) << nl | ((uint32_t)*rA) >> (32 - nl);
EV_SET_REG2(*rSh, *rS, w1, w2);
//printf("evrlw: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.UIMM,21.554:X:e500:evrlwi %RS,%RA,%UIMM:Vector Rotate Left Word Immediate
- unsigned32 w1, w2, imm;
- imm = (unsigned32)UIMM;
- w1 = ((unsigned32)*rAh) << imm | ((unsigned32)*rAh) >> (32 - imm);
- w2 = ((unsigned32)*rA) << imm | ((unsigned32)*rA) >> (32 - imm);
+ uint32_t w1, w2, imm;
+ imm = (uint32_t)UIMM;
+ w1 = ((uint32_t)*rAh) << imm | ((uint32_t)*rAh) >> (32 - imm);
+ w2 = ((uint32_t)*rA) << imm | ((uint32_t)*rA) >> (32 - imm);
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.548:X:e500:evslw %RS,%RA,%RB:Vector Shift Left Word
- unsigned32 nh, nl, w1, w2;
+ uint32_t nh, nl, w1, w2;
nh = *rBh & 0x1f;
nl = *rB & 0x1f;
- w1 = ((unsigned32)*rAh) << nh;
- w2 = ((unsigned32)*rA) << nl;
+ w1 = ((uint32_t)*rAh) << nh;
+ w2 = ((uint32_t)*rA) << nl;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.UIMM,21.550:X:e500:evslwi %RS,%RA,%UIMM:Vector Shift Left Word Immediate
- unsigned32 w1, w2, imm = UIMM;
- w1 = ((unsigned32)*rAh) << imm;
- w2 = ((unsigned32)*rA) << imm;
+ uint32_t w1, w2, imm = UIMM;
+ w1 = ((uint32_t)*rAh) << imm;
+ w2 = ((uint32_t)*rA) << imm;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.545:X:e500:evsrws %RS,%RA,%RB:Vector Shift Right Word Signed
- signed32 w1, w2;
- unsigned32 nh, nl;
+ int32_t w1, w2;
+ uint32_t nh, nl;
nh = *rBh & 0x1f;
nl = *rB & 0x1f;
- w1 = ((signed32)*rAh) >> nh;
- w2 = ((signed32)*rA) >> nl;
+ w1 = ((int32_t)*rAh) >> nh;
+ w2 = ((int32_t)*rA) >> nl;
EV_SET_REG2(*rSh, *rS, w1, w2);
//printf("evsrws: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.544:X:e500:evsrwu %RS,%RA,%RB:Vector Shift Right Word Unsigned
- unsigned32 w1, w2, nh, nl;
+ uint32_t w1, w2, nh, nl;
nh = *rBh & 0x1f;
nl = *rB & 0x1f;
- w1 = ((unsigned32)*rAh) >> nh;
- w2 = ((unsigned32)*rA) >> nl;
+ w1 = ((uint32_t)*rAh) >> nh;
+ w2 = ((uint32_t)*rA) >> nl;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.UIMM,21.547:X:e500:evsrwis %RS,%RA,%UIMM:Vector Shift Right Word Immediate Signed
- signed32 w1, w2;
- unsigned32 imm = UIMM;
- w1 = ((signed32)*rAh) >> imm;
- w2 = ((signed32)*rA) >> imm;
+ int32_t w1, w2;
+ uint32_t imm = UIMM;
+ w1 = ((int32_t)*rAh) >> imm;
+ w2 = ((int32_t)*rA) >> imm;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.UIMM,21.546:X:e500:evsrwiu %RS,%RA,%UIMM:Vector Shift Right Word Immediate Unsigned
- unsigned32 w1, w2, imm = UIMM;
- w1 = ((unsigned32)*rAh) >> imm;
- w2 = ((unsigned32)*rA) >> imm;
+ uint32_t w1, w2, imm = UIMM;
+ w1 = ((uint32_t)*rAh) >> imm;
+ w2 = ((uint32_t)*rA) >> imm;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.525:X:e500:evcntlzw %RS,%RA:Vector Count Leading Zeros Word
- unsigned32 w1, w2, mask, c1, c2;
+ uint32_t w1, w2, mask, c1, c2;
for (c1 = 0, mask = 0x80000000, w1 = *rAh;
!(w1 & mask) && mask != 0; mask >>= 1)
c1++;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.526:X:e500:evcntlsw %RS,%RA:Vector Count Leading Sign Bits Word
- unsigned32 w1, w2, mask, sign_bit, c1, c2;
+ uint32_t w1, w2, mask, sign_bit, c1, c2;
for (c1 = 0, mask = 0x80000000, w1 = *rAh, sign_bit = w1 & mask;
((w1 & mask) == sign_bit) && mask != 0;
mask >>= 1, sign_bit >>= 1)
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.524:X:e500:evrndw %RS,%RA:Vector Round Word
- unsigned32 w1, w2;
- w1 = ((unsigned32)*rAh + 0x8000) & 0xffff0000;
- w2 = ((unsigned32)*rA + 0x8000) & 0xffff0000;
+ uint32_t w1, w2;
+ w1 = ((uint32_t)*rAh + 0x8000) & 0xffff0000;
+ w2 = ((uint32_t)*rA + 0x8000) & 0xffff0000;
EV_SET_REG2(*rSh, *rS, w1, w2);
//printf("evrndw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.556:X:e500:evmergehi %RS,%RA,%RB:Vector Merge Hi
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rAh;
w2 = *rBh;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.557:X:e500:evmergelo %RS,%RA,%RB:Vector Merge Low
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rA;
w2 = *rB;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.559:X:e500:evmergelohi %RS,%RA,%RB:Vector Merge Low Hi
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rA;
w2 = *rBh;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.558:X:e500:evmergehilo %RS,%RA,%RB:Vector Merge Hi Low
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rAh;
w2 = *rB;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.SIMM,16.0,21.553:X:e500:evsplati %RS,%SIMM:Vector Splat Immediate
- unsigned32 w;
+ uint32_t w;
w = SIMM & 0x1f;
if (w & 0x10)
w |= 0xffffffe0;
PPC_INSN_INT(RS_BITMASK, 0, 0);
0.4,6.RS,11.SIMM,16.0,21.555:X:e500:evsplatfi %RS,%SIMM:Vector Splat Fractional Immediate
- unsigned32 w;
+ uint32_t w;
w = SIMM << 27;
EV_SET_REG2(*rSh, *rS, w, w);
PPC_INSN_INT(RS_BITMASK, 0, 0);
0.4,6.BF,9.0,11.RA,16.RB,21.561:X:e500:evcmpgts %BF,%RA,%RB:Vector Compare Greater Than Signed
- signed32 ah, al, bh, bl;
+ int32_t ah, al, bh, bl;
int w, ch, cl;
ah = *rAh;
al = *rA;
PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
0.4,6.BF,9.0,11.RA,16.RB,21.560:X:e500:evcmpgtu %BF,%RA,%RB:Vector Compare Greater Than Unsigned
- unsigned32 ah, al, bh, bl;
+ uint32_t ah, al, bh, bl;
int w, ch, cl;
ah = *rAh;
al = *rA;
PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
0.4,6.BF,9.0,11.RA,16.RB,21.563:X:e500:evcmplts %BF,%RA,%RB:Vector Compare Less Than Signed
- signed32 ah, al, bh, bl;
+ int32_t ah, al, bh, bl;
int w, ch, cl;
ah = *rAh;
al = *rA;
PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
0.4,6.BF,9.0,11.RA,16.RB,21.562:X:e500:evcmpltu %BF,%RA,%RB:Vector Compare Less Than Unsigned
- unsigned32 ah, al, bh, bl;
+ uint32_t ah, al, bh, bl;
int w, ch, cl;
ah = *rAh;
al = *rA;
PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
0.4,6.BF,9.0,11.RA,16.RB,21.564:X:e500:evcmpeq %BF,%RA,%RB:Vector Compare Equal
- unsigned32 ah, al, bh, bl;
+ uint32_t ah, al, bh, bl;
int w, ch, cl;
ah = *rAh;
al = *rA;
PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
0.4,6.RS,11.RA,16.RB,21.79,29.CRFS:X:e500:evsel %RS,%RA,%RB,%CRFS:Vector Select
- unsigned32 w1, w2;
+ uint32_t w1, w2;
int cr;
cr = CR_FIELD(CRFS);
if (cr & 8)
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.527:X:e500:brinc %RS,%RA,%RB:Bit Reversed Increment
- unsigned32 w1, w2, a, d, mask;
+ uint32_t w1, w2, a, d, mask;
mask = (*rB) & 0xffff;
a = (*rA) & 0xffff;
d = EV_BITREVERSE16(1 + EV_BITREVERSE16(a | ~mask));
#
0.4,6.RS,11.RA,16.RB,21.1031:EVX:e500:evmhossf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional
- signed16 al, ah, bl, bh;
- signed32 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t tl, th;
int movl, movh;
- al = (signed16) EV_LOHALF (*rA);
- ah = (signed16) EV_LOHALF (*rAh);
- bl = (signed16) EV_LOHALF (*rB);
- bh = (signed16) EV_LOHALF (*rBh);
+ al = (int16_t) EV_LOHALF (*rA);
+ ah = (int16_t) EV_LOHALF (*rAh);
+ bl = (int16_t) EV_LOHALF (*rB);
+ bh = (int16_t) EV_LOHALF (*rBh);
tl = ev_multiply16_ssf (al, bl, &movl);
th = ev_multiply16_ssf (ah, bh, &movh);
EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1063:EVX:e500:evmhossfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional Accumulate
- signed16 al, ah, bl, bh;
- signed32 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t tl, th;
int movl, movh;
- al = (signed16) EV_LOHALF (*rA);
- ah = (signed16) EV_LOHALF (*rAh);
- bl = (signed16) EV_LOHALF (*rB);
- bh = (signed16) EV_LOHALF (*rBh);
+ al = (int16_t) EV_LOHALF (*rA);
+ ah = (int16_t) EV_LOHALF (*rAh);
+ bl = (int16_t) EV_LOHALF (*rB);
+ bh = (int16_t) EV_LOHALF (*rBh);
tl = ev_multiply16_ssf (al, bl, &movl);
th = ev_multiply16_ssf (ah, bh, &movh);
EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1039:EVX:e500:evmhosmf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional
- signed16 al, ah, bl, bh;
- signed32 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t tl, th;
int dummy;
- al = (signed16) EV_LOHALF (*rA);
- ah = (signed16) EV_LOHALF (*rAh);
- bl = (signed16) EV_LOHALF (*rB);
- bh = (signed16) EV_LOHALF (*rBh);
+ al = (int16_t) EV_LOHALF (*rA);
+ ah = (int16_t) EV_LOHALF (*rAh);
+ bl = (int16_t) EV_LOHALF (*rB);
+ bh = (int16_t) EV_LOHALF (*rBh);
tl = ev_multiply16_smf (al, bl, & dummy);
th = ev_multiply16_smf (ah, bh, & dummy);
EV_SET_REG2 (*rSh, *rS, th, tl);
PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1071:EVX:e500:evmhosmfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional Accumulate
- signed32 al, ah, bl, bh;
- signed32 tl, th;
+ int32_t al, ah, bl, bh;
+ int32_t tl, th;
int dummy;
- al = (signed16) EV_LOHALF (*rA);
- ah = (signed16) EV_LOHALF (*rAh);
- bl = (signed16) EV_LOHALF (*rB);
- bh = (signed16) EV_LOHALF (*rBh);
+ al = (int16_t) EV_LOHALF (*rA);
+ ah = (int16_t) EV_LOHALF (*rAh);
+ bl = (int16_t) EV_LOHALF (*rB);
+ bh = (int16_t) EV_LOHALF (*rBh);
tl = ev_multiply16_smf (al, bl, & dummy);
th = ev_multiply16_smf (ah, bh, & dummy);
EV_SET_REG2_ACC (*rSh, *rS, th, tl);
PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1037:EVX:e500:evmhosmi %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer
- signed32 al, ah, bl, bh, tl, th;
- al = (signed32)(signed16)EV_LOHALF(*rA);
- ah = (signed32)(signed16)EV_LOHALF(*rAh);
- bl = (signed32)(signed16)EV_LOHALF(*rB);
- bh = (signed32)(signed16)EV_LOHALF(*rBh);
+ int32_t al, ah, bl, bh, tl, th;
+ al = (int32_t)(int16_t)EV_LOHALF(*rA);
+ ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_LOHALF(*rB);
+ bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
tl = al * bl;
th = ah * bh;
EV_SET_REG2(*rSh, *rS, th, tl);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1069:EVX:e500:evmhosmia %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer Accumulate
- signed32 al, ah, bl, bh, tl, th;
- al = (signed32)(signed16)EV_LOHALF(*rA);
- ah = (signed32)(signed16)EV_LOHALF(*rAh);
- bl = (signed32)(signed16)EV_LOHALF(*rB);
- bh = (signed32)(signed16)EV_LOHALF(*rBh);
+ int32_t al, ah, bl, bh, tl, th;
+ al = (int32_t)(int16_t)EV_LOHALF(*rA);
+ ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_LOHALF(*rB);
+ bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
tl = al * bl;
th = ah * bh;
EV_SET_REG2_ACC(*rSh, *rS, th, tl);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1036:EVX:e500:evmhoumi %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer
- unsigned32 al, ah, bl, bh, tl, th;
- al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+ uint32_t al, ah, bl, bh, tl, th;
+ al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
tl = al * bl;
th = ah * bh;
EV_SET_REG2(*rSh, *rS, th, tl);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1068:EVX:e500:evmhoumia %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer Accumulate
- unsigned32 al, ah, bl, bh, tl, th;
- al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+ uint32_t al, ah, bl, bh, tl, th;
+ al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
tl = al * bl;
th = ah * bh;
EV_SET_REG2_ACC(*rSh, *rS, th, tl);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1027:EVX:e500:evmhessf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional
- signed16 al, ah, bl, bh;
- signed32 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t tl, th;
int movl, movh;
- al = (signed16) EV_HIHALF (*rA);
- ah = (signed16) EV_HIHALF (*rAh);
- bl = (signed16) EV_HIHALF (*rB);
- bh = (signed16) EV_HIHALF (*rBh);
+ al = (int16_t) EV_HIHALF (*rA);
+ ah = (int16_t) EV_HIHALF (*rAh);
+ bl = (int16_t) EV_HIHALF (*rB);
+ bh = (int16_t) EV_HIHALF (*rBh);
tl = ev_multiply16_ssf (al, bl, &movl);
th = ev_multiply16_ssf (ah, bh, &movh);
EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1059:EVX:e500:evmhessfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional Accumulate
- signed16 al, ah, bl, bh;
- signed32 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t tl, th;
int movl, movh;
- al = (signed16) EV_HIHALF (*rA);
- ah = (signed16) EV_HIHALF (*rAh);
- bl = (signed16) EV_HIHALF (*rB);
- bh = (signed16) EV_HIHALF (*rBh);
+ al = (int16_t) EV_HIHALF (*rA);
+ ah = (int16_t) EV_HIHALF (*rAh);
+ bl = (int16_t) EV_HIHALF (*rB);
+ bh = (int16_t) EV_HIHALF (*rBh);
tl = ev_multiply16_ssf (al, bl, &movl);
th = ev_multiply16_ssf (ah, bh, &movh);
EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1035:EVX:e500:evmhesmf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional
- signed16 al, ah, bl, bh;
- signed64 tl, th;
+ int16_t al, ah, bl, bh;
+ int64_t tl, th;
int movl, movh;
- al = (signed16) EV_HIHALF (*rA);
- ah = (signed16) EV_HIHALF (*rAh);
- bl = (signed16) EV_HIHALF (*rB);
- bh = (signed16) EV_HIHALF (*rBh);
+ al = (int16_t) EV_HIHALF (*rA);
+ ah = (int16_t) EV_HIHALF (*rAh);
+ bl = (int16_t) EV_HIHALF (*rB);
+ bh = (int16_t) EV_HIHALF (*rBh);
tl = ev_multiply16_smf (al, bl, &movl);
th = ev_multiply16_smf (ah, bh, &movh);
EV_SET_REG2 (*rSh, *rS, th, tl);
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1067:EVX:e500:evmhesmfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional Accumulate
- signed16 al, ah, bl, bh;
- signed32 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t tl, th;
int dummy;
- al = (signed16) EV_HIHALF (*rA);
- ah = (signed16) EV_HIHALF (*rAh);
- bl = (signed16) EV_HIHALF (*rB);
- bh = (signed16) EV_HIHALF (*rBh);
+ al = (int16_t) EV_HIHALF (*rA);
+ ah = (int16_t) EV_HIHALF (*rAh);
+ bl = (int16_t) EV_HIHALF (*rB);
+ bh = (int16_t) EV_HIHALF (*rBh);
tl = ev_multiply16_smf (al, bl, & dummy);
th = ev_multiply16_smf (ah, bh, & dummy);
EV_SET_REG2_ACC (*rSh, *rS, th, tl);
PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1033:EVX:e500:evmhesmi %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer
- signed16 al, ah, bl, bh;
- signed32 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t tl, th;
- al = (signed16) EV_HIHALF (*rA);
- ah = (signed16) EV_HIHALF (*rAh);
- bl = (signed16) EV_HIHALF (*rB);
- bh = (signed16) EV_HIHALF (*rBh);
+ al = (int16_t) EV_HIHALF (*rA);
+ ah = (int16_t) EV_HIHALF (*rAh);
+ bl = (int16_t) EV_HIHALF (*rB);
+ bh = (int16_t) EV_HIHALF (*rBh);
tl = al * bl;
th = ah * bh;
EV_SET_REG2 (*rSh, *rS, th, tl);
PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1065:EVX:e500:evmhesmia %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer Accumulate
- signed32 al, ah, bl, bh, tl, th;
- al = (signed32)(signed16)EV_HIHALF(*rA);
- ah = (signed32)(signed16)EV_HIHALF(*rAh);
- bl = (signed32)(signed16)EV_HIHALF(*rB);
- bh = (signed32)(signed16)EV_HIHALF(*rBh);
+ int32_t al, ah, bl, bh, tl, th;
+ al = (int32_t)(int16_t)EV_HIHALF(*rA);
+ ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_HIHALF(*rB);
+ bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
tl = al * bl;
th = ah * bh;
EV_SET_REG2_ACC(*rSh, *rS, th, tl);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1032:EVX:e500:evmheumi %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer
- unsigned32 al, ah, bl, bh, tl, th;
- al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+ uint32_t al, ah, bl, bh, tl, th;
+ al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
tl = al * bl;
th = ah * bh;
EV_SET_REG2(*rSh, *rS, th, tl);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1064:EVX:e500:evmheumia %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer Accumulate
- unsigned32 al, ah, bl, bh, tl, th;
- al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+ uint32_t al, ah, bl, bh, tl, th;
+ al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
tl = al * bl;
th = ah * bh;
EV_SET_REG2_ACC(*rSh, *rS, th, tl);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1287:EVX:e500:evmhossfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate into Words
- signed16 al, ah, bl, bh;
- signed32 t1, t2;
- signed64 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t t1, t2;
+ int64_t tl, th;
int movl, movh, ovl, ovh;
- al = (signed16) EV_LOHALF (*rA);
- ah = (signed16) EV_LOHALF (*rAh);
- bl = (signed16) EV_LOHALF (*rB);
- bh = (signed16) EV_LOHALF (*rBh);
+ al = (int16_t) EV_LOHALF (*rA);
+ ah = (int16_t) EV_LOHALF (*rAh);
+ bl = (int16_t) EV_LOHALF (*rB);
+ bh = (int16_t) EV_LOHALF (*rBh);
t1 = ev_multiply16_ssf (ah, bh, &movh);
t2 = ev_multiply16_ssf (al, bl, &movl);
th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1285:EVX:e500:evmhossiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
int ovl, ovh;
- al = (signed32)(signed16)EV_LOHALF(*rA);
- ah = (signed32)(signed16)EV_LOHALF(*rAh);
- bl = (signed32)(signed16)EV_LOHALF(*rB);
- bh = (signed32)(signed16)EV_LOHALF(*rBh);
+ al = (int32_t)(int16_t)EV_LOHALF(*rA);
+ ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_LOHALF(*rB);
+ bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH + t1;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1295:EVX:e500:evmhosmfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
- al = (signed32)(signed16)EV_LOHALF(*rA);
- ah = (signed32)(signed16)EV_LOHALF(*rAh);
- bl = (signed32)(signed16)EV_LOHALF(*rB);
- bh = (signed32)(signed16)EV_LOHALF(*rBh);
- t1 = ((signed64)ah * bh) << 1;
- t2 = ((signed64)al * bl) << 1;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
+ al = (int32_t)(int16_t)EV_LOHALF(*rA);
+ ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_LOHALF(*rB);
+ bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
+ t1 = ((int64_t)ah * bh) << 1;
+ t2 = ((int64_t)al * bl) << 1;
th = EV_ACCHIGH + (t1 & 0xffffffff);
tl = EV_ACCLOW + (t2 & 0xffffffff);
EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1293:EVX:e500:evmhosmiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
- al = (signed32)(signed16)EV_LOHALF(*rA);
- ah = (signed32)(signed16)EV_LOHALF(*rAh);
- bl = (signed32)(signed16)EV_LOHALF(*rB);
- bh = (signed32)(signed16)EV_LOHALF(*rBh);
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
+ al = (int32_t)(int16_t)EV_LOHALF(*rA);
+ ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_LOHALF(*rB);
+ bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH + t1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1284:EVX:e500:evmhousiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate into Words
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
- signed64 tl, th;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
+ int64_t tl, th;
int ovl, ovh;
- al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+ al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
- th = (signed64)EV_ACCHIGH + (signed64)t1;
- tl = (signed64)EV_ACCLOW + (signed64)t2;
+ th = (int64_t)EV_ACCHIGH + (int64_t)t1;
+ tl = (int64_t)EV_ACCLOW + (int64_t)t2;
ovh = EV_SAT_P_U32(th);
ovl = EV_SAT_P_U32(tl);
EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1292:EVX:e500:evmhoumiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate into Words
- unsigned32 al, ah, bl, bh;
- unsigned32 t1, t2;
- signed64 tl, th;
- al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+ uint32_t al, ah, bl, bh;
+ uint32_t t1, t2;
+ int64_t tl, th;
+ al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH + t1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1283:EVX:e500:evmhessfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate into Words
- signed16 al, ah, bl, bh;
- signed32 t1, t2;
- signed64 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t t1, t2;
+ int64_t tl, th;
int movl, movh, ovl, ovh;
- al = (signed16) EV_HIHALF (*rA);
- ah = (signed16) EV_HIHALF (*rAh);
- bl = (signed16) EV_HIHALF (*rB);
- bh = (signed16) EV_HIHALF (*rBh);
+ al = (int16_t) EV_HIHALF (*rA);
+ ah = (int16_t) EV_HIHALF (*rAh);
+ bl = (int16_t) EV_HIHALF (*rB);
+ bh = (int16_t) EV_HIHALF (*rBh);
t1 = ev_multiply16_ssf (ah, bh, &movh);
t2 = ev_multiply16_ssf (al, bl, &movl);
th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1281:EVX:e500:evmhessiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
int ovl, ovh;
- al = (signed32)(signed16)EV_HIHALF(*rA);
- ah = (signed32)(signed16)EV_HIHALF(*rAh);
- bl = (signed32)(signed16)EV_HIHALF(*rB);
- bh = (signed32)(signed16)EV_HIHALF(*rBh);
+ al = (int32_t)(int16_t)EV_HIHALF(*rA);
+ ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_HIHALF(*rB);
+ bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH + t1;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1291:EVX:e500:evmhesmfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate into Words
- signed16 al, ah, bl, bh;
- signed32 t1, t2, th, tl;
+ int16_t al, ah, bl, bh;
+ int32_t t1, t2, th, tl;
int dummy;
- al = (signed16)EV_HIHALF(*rA);
- ah = (signed16)EV_HIHALF(*rAh);
- bl = (signed16)EV_HIHALF(*rB);
- bh = (signed16)EV_HIHALF(*rBh);
+ al = (int16_t)EV_HIHALF(*rA);
+ ah = (int16_t)EV_HIHALF(*rAh);
+ bl = (int16_t)EV_HIHALF(*rB);
+ bh = (int16_t)EV_HIHALF(*rBh);
t1 = ev_multiply16_smf (ah, bh, &dummy);
t2 = ev_multiply16_smf (al, bl, &dummy);
th = EV_ACCHIGH + t1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1289:EVX:e500:evmhesmiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
- al = (signed32)(signed16)EV_HIHALF(*rA);
- ah = (signed32)(signed16)EV_HIHALF(*rAh);
- bl = (signed32)(signed16)EV_HIHALF(*rB);
- bh = (signed32)(signed16)EV_HIHALF(*rBh);
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
+ al = (int32_t)(int16_t)EV_HIHALF(*rA);
+ ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_HIHALF(*rB);
+ bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH + t1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1280:EVX:e500:evmheusiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate into Words
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
- signed64 tl, th;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
+ int64_t tl, th;
int ovl, ovh;
- al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+ al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
- th = (signed64)EV_ACCHIGH + (signed64)t1;
- tl = (signed64)EV_ACCLOW + (signed64)t2;
+ th = (int64_t)EV_ACCHIGH + (int64_t)t1;
+ tl = (int64_t)EV_ACCLOW + (int64_t)t2;
ovh = EV_SAT_P_U32(th);
ovl = EV_SAT_P_U32(tl);
EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1288:EVX:e500:evmheumiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate into Words
- unsigned32 al, ah, bl, bh;
- unsigned32 t1, t2;
- unsigned64 tl, th;
- al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+ uint32_t al, ah, bl, bh;
+ uint32_t t1, t2;
+ uint64_t tl, th;
+ al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH + t1;
0.4,6.RS,11.RA,16.RB,21.1415:EVX:e500:evmhossfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate Negative into Words
- signed16 al, ah, bl, bh;
- signed32 t1, t2;
- signed64 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t t1, t2;
+ int64_t tl, th;
int movl, movh, ovl, ovh;
- al = (signed16) EV_LOHALF (*rA);
- ah = (signed16) EV_LOHALF (*rAh);
- bl = (signed16) EV_LOHALF (*rB);
- bh = (signed16) EV_LOHALF (*rBh);
+ al = (int16_t) EV_LOHALF (*rA);
+ ah = (int16_t) EV_LOHALF (*rAh);
+ bl = (int16_t) EV_LOHALF (*rB);
+ bh = (int16_t) EV_LOHALF (*rBh);
t1 = ev_multiply16_ssf (ah, bh, &movh);
t2 = ev_multiply16_ssf (al, bl, &movl);
th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1413:EVX:e500:evmhossianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate Negative into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
int ovl, ovh;
- al = (signed32)(signed16)EV_LOHALF(*rA);
- ah = (signed32)(signed16)EV_LOHALF(*rAh);
- bl = (signed32)(signed16)EV_LOHALF(*rB);
- bh = (signed32)(signed16)EV_LOHALF(*rBh);
+ al = (int32_t)(int16_t)EV_LOHALF(*rA);
+ ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_LOHALF(*rB);
+ bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH - t1;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1423:EVX:e500:evmhosmfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate Negative into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
- al = (signed32)(signed16)EV_LOHALF(*rA);
- ah = (signed32)(signed16)EV_LOHALF(*rAh);
- bl = (signed32)(signed16)EV_LOHALF(*rB);
- bh = (signed32)(signed16)EV_LOHALF(*rBh);
- t1 = ((signed64)ah * bh) << 1;
- t2 = ((signed64)al * bl) << 1;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
+ al = (int32_t)(int16_t)EV_LOHALF(*rA);
+ ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_LOHALF(*rB);
+ bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
+ t1 = ((int64_t)ah * bh) << 1;
+ t2 = ((int64_t)al * bl) << 1;
th = EV_ACCHIGH - (t1 & 0xffffffff);
tl = EV_ACCLOW - (t2 & 0xffffffff);
EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1421:EVX:e500:evmhosmianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate Negative into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
- al = (signed32)(signed16)EV_LOHALF(*rA);
- ah = (signed32)(signed16)EV_LOHALF(*rAh);
- bl = (signed32)(signed16)EV_LOHALF(*rB);
- bh = (signed32)(signed16)EV_LOHALF(*rBh);
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
+ al = (int32_t)(int16_t)EV_LOHALF(*rA);
+ ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_LOHALF(*rB);
+ bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH - t1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1412:EVX:e500:evmhousianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate Negative into Words
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
- signed64 tl, th;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
+ int64_t tl, th;
int ovl, ovh;
- al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+ al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
- th = (signed64)EV_ACCHIGH - (signed64)t1;
- tl = (signed64)EV_ACCLOW - (signed64)t2;
+ th = (int64_t)EV_ACCHIGH - (int64_t)t1;
+ tl = (int64_t)EV_ACCLOW - (int64_t)t2;
ovl = EV_SAT_P_U32(tl);
ovh = EV_SAT_P_U32(th);
EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1420:EVX:e500:evmhoumianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate Negative into Words
- unsigned32 al, ah, bl, bh;
- unsigned32 t1, t2;
- unsigned64 tl, th;
- al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+ uint32_t al, ah, bl, bh;
+ uint32_t t1, t2;
+ uint64_t tl, th;
+ al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH - t1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1411:EVX:e500:evmhessfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate Negative into Words
- signed16 al, ah, bl, bh;
- signed32 t1, t2;
- signed64 tl, th;
+ int16_t al, ah, bl, bh;
+ int32_t t1, t2;
+ int64_t tl, th;
int movl, movh, ovl, ovh;
- al = (signed16) EV_HIHALF (*rA);
- ah = (signed16) EV_HIHALF (*rAh);
- bl = (signed16) EV_HIHALF (*rB);
- bh = (signed16) EV_HIHALF (*rBh);
+ al = (int16_t) EV_HIHALF (*rA);
+ ah = (int16_t) EV_HIHALF (*rAh);
+ bl = (int16_t) EV_HIHALF (*rB);
+ bh = (int16_t) EV_HIHALF (*rBh);
t1 = ev_multiply16_ssf (ah, bh, &movh);
t2 = ev_multiply16_ssf (al, bl, &movl);
th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1409:EVX:e500:evmhessianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate Negative into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
int ovl, ovh;
- al = (signed32)(signed16)EV_HIHALF(*rA);
- ah = (signed32)(signed16)EV_HIHALF(*rAh);
- bl = (signed32)(signed16)EV_HIHALF(*rB);
- bh = (signed32)(signed16)EV_HIHALF(*rBh);
+ al = (int32_t)(int16_t)EV_HIHALF(*rA);
+ ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_HIHALF(*rB);
+ bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH - t1;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1419:EVX:e500:evmhesmfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate Negative into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
- al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
- t1 = ((signed64)ah * bh) << 1;
- t2 = ((signed64)al * bl) << 1;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
+ al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
+ t1 = ((int64_t)ah * bh) << 1;
+ t2 = ((int64_t)al * bl) << 1;
th = EV_ACCHIGH - (t1 & 0xffffffff);
tl = EV_ACCLOW - (t2 & 0xffffffff);
EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1417:EVX:e500:evmhesmianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate Negative into Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
- al = (signed32)(signed16)EV_HIHALF(*rA);
- ah = (signed32)(signed16)EV_HIHALF(*rAh);
- bl = (signed32)(signed16)EV_HIHALF(*rB);
- bh = (signed32)(signed16)EV_HIHALF(*rBh);
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
+ al = (int32_t)(int16_t)EV_HIHALF(*rA);
+ ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
+ bl = (int32_t)(int16_t)EV_HIHALF(*rB);
+ bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH - t1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1408:EVX:e500:evmheusianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate Negative into Words
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
- signed64 tl, th;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
+ int64_t tl, th;
int ovl, ovh;
- al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+ al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
- th = (signed64)EV_ACCHIGH - (signed64)t1;
- tl = (signed64)EV_ACCLOW - (signed64)t2;
+ th = (int64_t)EV_ACCHIGH - (int64_t)t1;
+ tl = (int64_t)EV_ACCLOW - (int64_t)t2;
ovl = EV_SAT_P_U32(tl);
ovh = EV_SAT_P_U32(th);
EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1416:EVX:e500:evmheumianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate Negative into Words
- unsigned32 al, ah, bl, bh;
- unsigned32 t1, t2;
- unsigned64 tl, th;
- al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
- bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
- bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+ uint32_t al, ah, bl, bh;
+ uint32_t t1, t2;
+ uint64_t tl, th;
+ al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
+ bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
+ bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
t1 = ah * bh;
t2 = al * bl;
th = EV_ACCHIGH - t1;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1327:EVX:e500:evmhogsmfaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate
- signed32 a, b;
- signed64 t1, t2;
- a = (signed32)(signed16)EV_LOHALF(*rA);
- b = (signed32)(signed16)EV_LOHALF(*rB);
+ int32_t a, b;
+ int64_t t1, t2;
+ a = (int32_t)(int16_t)EV_LOHALF(*rA);
+ b = (int32_t)(int16_t)EV_LOHALF(*rB);
t1 = EV_MUL16_SSF(a, b);
- if (t1 & ((unsigned64)1 << 32))
+ if (t1 & ((uint64_t)1 << 32))
t1 |= 0xfffffffe00000000;
t2 = ACC + t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1325:EVX:e500:evmhogsmiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate
- signed32 a, b;
- signed64 t1, t2;
- a = (signed32)(signed16)EV_LOHALF(*rA);
- b = (signed32)(signed16)EV_LOHALF(*rB);
- t1 = (signed64)a * (signed64)b;
- t2 = (signed64)ACC + t1;
+ int32_t a, b;
+ int64_t t1, t2;
+ a = (int32_t)(int16_t)EV_LOHALF(*rA);
+ b = (int32_t)(int16_t)EV_LOHALF(*rB);
+ t1 = (int64_t)a * (int64_t)b;
+ t2 = (int64_t)ACC + t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
//printf("evmhogsmiaa: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
//printf("evmhogsmiaa: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1324:EVX:e500:evmhogumiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate
- unsigned32 a, b;
- unsigned64 t1, t2;
- a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
- b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+ uint32_t a, b;
+ uint64_t t1, t2;
+ a = (uint32_t)(uint16_t)EV_LOHALF(*rA);
+ b = (uint32_t)(uint16_t)EV_LOHALF(*rB);
t1 = a * b;
t2 = ACC + t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1323:EVX:e500:evmhegsmfaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate
- signed32 a, b;
- signed64 t1, t2;
- a = (signed32)(signed16)EV_HIHALF(*rA);
- b = (signed32)(signed16)EV_HIHALF(*rB);
+ int32_t a, b;
+ int64_t t1, t2;
+ a = (int32_t)(int16_t)EV_HIHALF(*rA);
+ b = (int32_t)(int16_t)EV_HIHALF(*rB);
t1 = EV_MUL16_SSF(a, b);
- if (t1 & ((unsigned64)1 << 32))
+ if (t1 & ((uint64_t)1 << 32))
t1 |= 0xfffffffe00000000;
t2 = ACC + t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1321:EVX:e500:evmhegsmiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate
- signed32 a, b;
- signed64 t1, t2;
- a = (signed32)(signed16)EV_HIHALF(*rA);
- b = (signed32)(signed16)EV_HIHALF(*rB);
- t1 = (signed64)(a * b);
+ int32_t a, b;
+ int64_t t1, t2;
+ a = (int32_t)(int16_t)EV_HIHALF(*rA);
+ b = (int32_t)(int16_t)EV_HIHALF(*rB);
+ t1 = (int64_t)(a * b);
t2 = ACC + t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1320:EVX:e500:evmhegumiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate
- unsigned32 a, b;
- unsigned64 t1, t2;
- a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+ uint32_t a, b;
+ uint64_t t1, t2;
+ a = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ b = (uint32_t)(uint16_t)EV_HIHALF(*rB);
t1 = a * b;
t2 = ACC + t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
0.4,6.RS,11.RA,16.RB,21.1455:EVX:e500:evmhogsmfan %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate Negative
- signed32 a, b;
- signed64 t1, t2;
- a = (signed32)(signed16)EV_LOHALF(*rA);
- b = (signed32)(signed16)EV_LOHALF(*rB);
+ int32_t a, b;
+ int64_t t1, t2;
+ a = (int32_t)(int16_t)EV_LOHALF(*rA);
+ b = (int32_t)(int16_t)EV_LOHALF(*rB);
t1 = EV_MUL16_SSF(a, b);
- if (t1 & ((unsigned64)1 << 32))
+ if (t1 & ((uint64_t)1 << 32))
t1 |= 0xfffffffe00000000;
t2 = ACC - t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1453:EVX:e500:evmhogsmian %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate Negative
- signed32 a, b;
- signed64 t1, t2;
- a = (signed32)(signed16)EV_LOHALF(*rA);
- b = (signed32)(signed16)EV_LOHALF(*rB);
- t1 = (signed64)a * (signed64)b;
+ int32_t a, b;
+ int64_t t1, t2;
+ a = (int32_t)(int16_t)EV_LOHALF(*rA);
+ b = (int32_t)(int16_t)EV_LOHALF(*rB);
+ t1 = (int64_t)a * (int64_t)b;
t2 = ACC - t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
//printf("evmhogsmian: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1452:EVX:e500:evmhogumian %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate Negative
- unsigned32 a, b;
- unsigned64 t1, t2;
- a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
- b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
- t1 = (unsigned64)a * (unsigned64)b;
+ uint32_t a, b;
+ uint64_t t1, t2;
+ a = (uint32_t)(uint16_t)EV_LOHALF(*rA);
+ b = (uint32_t)(uint16_t)EV_LOHALF(*rB);
+ t1 = (uint64_t)a * (uint64_t)b;
t2 = ACC - t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1451:EVX:e500:evmhegsmfan %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate Negative
- signed32 a, b;
- signed64 t1, t2;
- a = (signed32)(signed16)EV_HIHALF(*rA);
- b = (signed32)(signed16)EV_HIHALF(*rB);
+ int32_t a, b;
+ int64_t t1, t2;
+ a = (int32_t)(int16_t)EV_HIHALF(*rA);
+ b = (int32_t)(int16_t)EV_HIHALF(*rB);
t1 = EV_MUL16_SSF(a, b);
- if (t1 & ((unsigned64)1 << 32))
+ if (t1 & ((uint64_t)1 << 32))
t1 |= 0xfffffffe00000000;
t2 = ACC - t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1449:EVX:e500:evmhegsmian %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate Negative
- signed32 a, b;
- signed64 t1, t2;
- a = (signed32)(signed16)EV_HIHALF(*rA);
- b = (signed32)(signed16)EV_HIHALF(*rB);
- t1 = (signed64)a * (signed64)b;
+ int32_t a, b;
+ int64_t t1, t2;
+ a = (int32_t)(int16_t)EV_HIHALF(*rA);
+ b = (int32_t)(int16_t)EV_HIHALF(*rB);
+ t1 = (int64_t)a * (int64_t)b;
t2 = ACC - t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1448:EVX:e500:evmhegumian %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate Negative
- unsigned32 a, b;
- unsigned64 t1, t2;
- a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
- b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
- t1 = (unsigned64)a * (unsigned64)b;
+ uint32_t a, b;
+ uint64_t t1, t2;
+ a = (uint32_t)(uint16_t)EV_HIHALF(*rA);
+ b = (uint32_t)(uint16_t)EV_HIHALF(*rB);
+ t1 = (uint64_t)a * (uint64_t)b;
t2 = ACC - t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1095:EVX:e500:evmwhssf %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
int movl, movh;
al = *rA;
ah = *rAh;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1127:EVX:e500:evmwhssfa %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional and Accumulate
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
int movl, movh;
al = *rA;
ah = *rAh;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1103:EVX:e500:evmwhsmf %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1135:EVX:e500:evmwhsmfa %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional and Accumulate
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1101:EVX:e500:evmwhsmi %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (signed64)al * (signed64)bl;
- t2 = (signed64)ah * (signed64)bh;
+ t1 = (int64_t)al * (int64_t)bl;
+ t2 = (int64_t)ah * (int64_t)bh;
EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1133:EVX:e500:evmwhsmia %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer and Accumulate
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (signed64)al * (signed64)bl;
- t2 = (signed64)ah * (signed64)bh;
+ t1 = (int64_t)al * (int64_t)bl;
+ t2 = (int64_t)ah * (int64_t)bh;
EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1100:EVX:e500:evmwhumi %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (unsigned64)al * (unsigned64)bl;
- t2 = (unsigned64)ah * (unsigned64)bh;
+ t1 = (uint64_t)al * (uint64_t)bl;
+ t2 = (uint64_t)ah * (uint64_t)bh;
EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1132:EVX:e500:evmwhumia %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer and Accumulate
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (unsigned64)al * (unsigned64)bl;
- t2 = (unsigned64)ah * (unsigned64)bh;
+ t1 = (uint64_t)al * (uint64_t)bl;
+ t2 = (uint64_t)ah * (uint64_t)bh;
EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1091:EVX:e500:evmwlssf %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
int movl, movh;
al = *rA;
ah = *rAh;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1123:EVX:e500:evmwlssfa %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
int movl, movh;
al = *rA;
ah = *rAh;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1099:EVX:e500:evmwlsmf %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1131:EVX:e500:evmwlsmfa %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1096:EVX:e500:evmwlumi %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (unsigned64)al * (unsigned64)bl;
- t2 = (unsigned64)ah * (unsigned64)bh;
+ t1 = (uint64_t)al * (uint64_t)bl;
+ t2 = (uint64_t)ah * (uint64_t)bh;
EV_SET_REG2(*rSh, *rS, t2, t1);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1128:EVX:e500:evmwlumia %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (unsigned64)al * (unsigned64)bl;
- t2 = (unsigned64)ah * (unsigned64)bh;
+ t1 = (uint64_t)al * (uint64_t)bl;
+ t2 = (uint64_t)ah * (uint64_t)bh;
EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1347:EVX:e500:evmwlssfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate in Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
int movl, movh, ovl, ovh;
al = *rA;
ah = *rAh;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1345:EVX:e500:evmwlssiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate in Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
int ovl, ovh;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (signed64)ah * (signed64)bh;
- t2 = (signed64)al * (signed64)bl;
+ t1 = (int64_t)ah * (int64_t)bh;
+ t2 = (int64_t)al * (int64_t)bl;
th = EV_ACCHIGH + (t1 & 0xffffffff);
tl = EV_ACCLOW + (t2 & 0xffffffff);
ovh = EV_SAT_P_S32(th);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1355:EVX:e500:evmwlsmfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate in Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
int mov;
al = *rA;
ah = *rAh;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1353:EVX:e500:evmwlsmiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate in Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (signed64)ah * (signed64)bh;
- t2 = (signed64)al * (signed64)bl;
+ t1 = (int64_t)ah * (int64_t)bh;
+ t2 = (int64_t)al * (int64_t)bl;
EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
EV_ACCLOW + (t2 & 0xffffffff));
//printf("evmwlsmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd\n", al, ah, bl, bh, t1, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1344:EVX:e500:evmwlusiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate in Words
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2, tl, th;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2, tl, th;
int ovl, ovh;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (unsigned64)ah * (unsigned64)bh;
- t2 = (unsigned64)al * (unsigned64)bl;
+ t1 = (uint64_t)ah * (uint64_t)bh;
+ t2 = (uint64_t)al * (uint64_t)bl;
th = EV_ACCHIGH + (t1 & 0xffffffff);
tl = EV_ACCLOW + (t2 & 0xffffffff);
ovh = (th >> 32);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1352:EVX:e500:evmwlumiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate in Words
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (unsigned64)ah * (unsigned64)bh;
- t2 = (unsigned64)al * (unsigned64)bl;
+ t1 = (uint64_t)ah * (uint64_t)bh;
+ t2 = (uint64_t)al * (uint64_t)bl;
EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
EV_ACCLOW + (t2 & 0xffffffff));
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1475:EVX:e500:evmwlssfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate Negative in Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
int movl, movh, ovl, ovh;
al = *rA;
ah = *rAh;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1473:EVX:e500:evmwlssianw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate Negative in Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2, tl, th;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2, tl, th;
int ovl, ovh;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (signed64)ah * (signed64)bh;
- t2 = (signed64)al * (signed64)bl;
+ t1 = (int64_t)ah * (int64_t)bh;
+ t2 = (int64_t)al * (int64_t)bl;
th = EV_ACCHIGH - (t1 & 0xffffffff);
tl = EV_ACCLOW - (t2 & 0xffffffff);
ovh = EV_SAT_P_S32(th);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1483:EVX:e500:evmwlsmfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate Negative in Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
int mov;
al = *rA;
ah = *rAh;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1481:EVX:e500:evmwlsmianw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate Negative in Words
- signed32 al, ah, bl, bh;
- signed64 t1, t2;
+ int32_t al, ah, bl, bh;
+ int64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (signed64)ah * (signed64)bh;
- t2 = (signed64)al * (signed64)bl;
+ t1 = (int64_t)ah * (int64_t)bh;
+ t2 = (int64_t)al * (int64_t)bl;
EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
EV_ACCLOW - (t2 & 0xffffffff));
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1472:EVX:e500:evmwlusianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate Negative in Words
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2, tl, th;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2, tl, th;
int ovl, ovh;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (unsigned64)ah * (unsigned64)bh;
- t2 = (unsigned64)al * (unsigned64)bl;
+ t1 = (uint64_t)ah * (uint64_t)bh;
+ t2 = (uint64_t)al * (uint64_t)bl;
th = EV_ACCHIGH - (t1 & 0xffffffff);
tl = EV_ACCLOW - (t2 & 0xffffffff);
ovh = (th >> 32);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1480:EVX:e500:evmwlumianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate Negative in Words
- unsigned32 al, ah, bl, bh;
- unsigned64 t1, t2;
+ uint32_t al, ah, bl, bh;
+ uint64_t t1, t2;
al = *rA;
ah = *rAh;
bl = *rB;
bh = *rBh;
- t1 = (unsigned64)ah * (unsigned64)bh;
- t2 = (unsigned64)al * (unsigned64)bl;
+ t1 = (uint64_t)ah * (uint64_t)bh;
+ t2 = (uint64_t)al * (uint64_t)bl;
EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
EV_ACCLOW - (t2 & 0xffffffff));
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1107:EVX:e500:evmwssf %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional
- signed32 a, b;
- signed64 t;
+ int32_t a, b;
+ int64_t t;
int movl;
a = *rA;
b = *rB;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1139:EVX:e500:evmwssfa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate
- signed32 a, b;
- signed64 t;
+ int32_t a, b;
+ int64_t t;
int movl;
a = *rA;
b = *rB;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1115:EVX:e500:evmwsmf %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional
- signed32 a, b;
- signed64 t;
+ int32_t a, b;
+ int64_t t;
int movl;
a = *rA;
b = *rB;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1147:EVX:e500:evmwsmfa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate
- signed32 a, b;
- signed64 t;
+ int32_t a, b;
+ int64_t t;
int movl;
a = *rA;
b = *rB;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1113:EVX:e500:evmwsmi %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer
- signed32 a, b;
- signed64 t;
+ int32_t a, b;
+ int64_t t;
int movl;
a = *rA;
b = *rB;
- t = (signed64)a * (signed64)b;
+ t = (int64_t)a * (int64_t)b;
EV_SET_REG1(*rSh, *rS, t);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1145:EVX:e500:evmwsmia %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate
- signed32 a, b;
- signed64 t;
+ int32_t a, b;
+ int64_t t;
int movl;
a = *rA;
b = *rB;
- t = (signed64)a * (signed64)b;
+ t = (int64_t)a * (int64_t)b;
EV_SET_REG1_ACC(*rSh, *rS, t);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1112:EVX:e500:evmwumi %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer
- unsigned32 a, b;
- unsigned64 t;
+ uint32_t a, b;
+ uint64_t t;
int movl;
a = *rA;
b = *rB;
- t = (signed64)a * (signed64)b;
+ t = (int64_t)a * (int64_t)b;
EV_SET_REG1(*rSh, *rS, t);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1144:EVX:e500:evmwumia %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer and Accumulate
- unsigned32 a, b;
- unsigned64 t;
+ uint32_t a, b;
+ uint64_t t;
int movl;
a = *rA;
b = *rB;
- t = (signed64)a * (signed64)b;
+ t = (int64_t)a * (int64_t)b;
EV_SET_REG1_ACC(*rSh, *rS, t);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1363:EVX:e500:evmwssfaa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional Add and Accumulate
- signed64 t1, t2;
- signed32 a, b;
+ int64_t t1, t2;
+ int32_t a, b;
int movl;
a = *rA;
b = *rB;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1371:EVX:e500:evmwsmfaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional Add and Accumulate
- signed64 t1, t2;
- signed32 a, b;
+ int64_t t1, t2;
+ int32_t a, b;
int movl;
a = *rA;
b = *rB;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1369:EVX:e500:evmwsmiaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer And and Accumulate
- signed64 t1, t2;
- signed32 a, b;
+ int64_t t1, t2;
+ int32_t a, b;
a = *rA;
b = *rB;
- t1 = (signed64)a * (signed64)b;
+ t1 = (int64_t)a * (int64_t)b;
t2 = ACC + t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1368:EVX:e500:evmwumiaa %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer Add and Accumulate
- unsigned64 t1, t2;
- unsigned32 a, b;
+ uint64_t t1, t2;
+ uint32_t a, b;
a = *rA;
b = *rB;
- t1 = (unsigned64)a * (unsigned64)b;
+ t1 = (uint64_t)a * (uint64_t)b;
t2 = ACC + t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1491:EVX:e500:evmwssfan %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate Negative
- signed64 t1, t2;
- signed32 a, b;
+ int64_t t1, t2;
+ int32_t a, b;
int movl;
a = *rA;
b = *rB;
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.1499:EVX:e500:evmwsmfan %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate Negative
- signed64 t1, t2;
- signed32 a, b;
+ int64_t t1, t2;
+ int32_t a, b;
int movl;
a = *rA;
b = *rB;
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1497:EVX:e500:evmwsmian %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate Negative
- signed64 t1, t2;
- signed32 a, b;
+ int64_t t1, t2;
+ int32_t a, b;
a = *rA;
b = *rB;
- t1 = (signed64)a * (signed64)b;
+ t1 = (int64_t)a * (int64_t)b;
t2 = ACC - t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1496:EVX:e500:evmwumian %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer and Accumulate Negative
- unsigned64 t1, t2;
- unsigned32 a, b;
+ uint64_t t1, t2;
+ uint32_t a, b;
a = *rA;
b = *rB;
- t1 = (unsigned64)a * (unsigned64)b;
+ t1 = (uint64_t)a * (uint64_t)b;
t2 = ACC - t1;
EV_SET_REG1_ACC(*rSh, *rS, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.1217:EVX:e500:evaddssiaaw %RS,%RA:Vector Add Signed Saturate Integer to Accumulator Word
- signed64 t1, t2;
- signed32 al, ah;
+ int64_t t1, t2;
+ int32_t al, ah;
int ovl, ovh;
al = *rA;
ah = *rAh;
- t1 = (signed64)EV_ACCHIGH + (signed64)ah;
- t2 = (signed64)EV_ACCLOW + (signed64)al;
+ t1 = (int64_t)EV_ACCHIGH + (int64_t)ah;
+ t2 = (int64_t)EV_ACCLOW + (int64_t)al;
ovh = EV_SAT_P_S32(t1);
ovl = EV_SAT_P_S32(t2);
- EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t1),
- EV_SATURATE_ACC(ovl, t2 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t2));
+ EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1 & ((uint64_t)1 << 32), 0x80000000, 0x7fffffff, t1),
+ EV_SATURATE_ACC(ovl, t2 & ((uint64_t)1 << 32), 0x80000000, 0x7fffffff, t2));
EV_SET_SPEFSCR_OV(ovl, ovh);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.0,21.1225:EVX:e500:evaddsmiaaw %RS,%RA:Vector Add Signed Modulo Integer to Accumulator Word
- signed64 t1, t2;
- signed32 al, ah;
+ int64_t t1, t2;
+ int32_t al, ah;
al = *rA;
ah = *rAh;
- t1 = (signed64)EV_ACCHIGH + (signed64)ah;
- t2 = (signed64)EV_ACCLOW + (signed64)al;
+ t1 = (int64_t)EV_ACCHIGH + (int64_t)ah;
+ t2 = (int64_t)EV_ACCLOW + (int64_t)al;
EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
//printf("evaddsmiaaw: al %d ah %d t1 %qd t2 %qd\n", al, ah, t1, t2);
//printf("evaddsmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.1216:EVX:e500:evaddusiaaw %RS,%RA:Vector Add Unsigned Saturate Integer to Accumulator Word
- signed64 t1, t2;
- unsigned32 al, ah;
+ int64_t t1, t2;
+ uint32_t al, ah;
int ovl, ovh;
al = *rA;
ah = *rAh;
- t1 = (signed64)EV_ACCHIGH + (signed64)ah;
- t2 = (signed64)EV_ACCLOW + (signed64)al;
+ t1 = (int64_t)EV_ACCHIGH + (int64_t)ah;
+ t2 = (int64_t)EV_ACCLOW + (int64_t)al;
ovh = EV_SAT_P_U32(t1);
ovl = EV_SAT_P_U32(t2);
EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, t1),
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.0,21.1224:EVX:e500:evaddumiaaw %RS,%RA:Vector Add Unsigned Modulo Integer to Accumulator Word
- unsigned64 t1, t2;
- unsigned32 al, ah;
+ uint64_t t1, t2;
+ uint32_t al, ah;
al = *rA;
ah = *rAh;
- t1 = (unsigned64)EV_ACCHIGH + (unsigned64)ah;
+ t1 = (uint64_t)EV_ACCHIGH + (uint64_t)ah;
t2 = EV_ACCLOW + al;
EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.1219:EVX:e500:evsubfssiaaw %RS,%RA:Vector Subtract Signed Saturate Integer to Accumulator Word
- signed64 t1, t2;
- signed32 al, ah;
+ int64_t t1, t2;
+ int32_t al, ah;
int ovl, ovh;
al = *rA;
ah = *rAh;
- t1 = (signed64)EV_ACCHIGH - (signed64)ah;
- t2 = (signed64)EV_ACCLOW - (signed64)al;
+ t1 = (int64_t)EV_ACCHIGH - (int64_t)ah;
+ t2 = (int64_t)EV_ACCLOW - (int64_t)al;
ovh = EV_SAT_P_S32(t1);
ovl = EV_SAT_P_S32(t2);
EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1, 0x80000000, 0x7fffffff, t1),
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.0,21.1227:EVX:e500:evsubfsmiaaw %RS,%RA:Vector Subtract Signed Modulo Integer to Accumulator Word
- signed64 t1, t2;
- signed32 al, ah;
+ int64_t t1, t2;
+ int32_t al, ah;
al = *rA;
ah = *rAh;
- t1 = (signed64)EV_ACCHIGH - (signed64)ah;
- t2 = (signed64)EV_ACCLOW - (signed64)al;
+ t1 = (int64_t)EV_ACCHIGH - (int64_t)ah;
+ t2 = (int64_t)EV_ACCLOW - (int64_t)al;
EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.1218:EVX:e500:evsubfusiaaw %RS,%RA:Vector Subtract Unsigned Saturate Integer to Accumulator Word
- signed64 t1, t2;
- unsigned32 al, ah;
+ int64_t t1, t2;
+ uint32_t al, ah;
int ovl, ovh;
al = *rA;
ah = *rAh;
- t1 = (signed64)EV_ACCHIGH - (signed64)ah;
- t2 = (signed64)EV_ACCLOW - (signed64)al;
+ t1 = (int64_t)EV_ACCHIGH - (int64_t)ah;
+ t2 = (int64_t)EV_ACCLOW - (int64_t)al;
ovh = EV_SAT_P_U32(t1);
ovl = EV_SAT_P_U32(t2);
EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0, t1),
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.0,21.1226:EVX:e500:evsubfumiaaw %RS,%RA:Vector Subtract Unsigned Modulo Integer to Accumulator Word
- unsigned64 t1, t2;
- unsigned32 al, ah;
+ uint64_t t1, t2;
+ uint32_t al, ah;
al = *rA;
ah = *rAh;
- t1 = (unsigned64)EV_ACCHIGH - (unsigned64)ah;
- t2 = (unsigned64)EV_ACCLOW - (unsigned64)al;
+ t1 = (uint64_t)EV_ACCHIGH - (uint64_t)ah;
+ t2 = (uint64_t)EV_ACCLOW - (uint64_t)al;
EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.1222:EVX:e500:evdivws %RS,%RA,%RB:Vector Divide Word Signed
- signed32 dividendh, dividendl, divisorh, divisorl;
- signed32 w1, w2;
+ int32_t dividendh, dividendl, divisorh, divisorl;
+ int32_t w1, w2;
int ovh, ovl;
dividendh = *rAh;
dividendl = *rA;
0.4,6.RS,11.RA,16.RB,21.1223:EVX:e500:evdivwu %RS,%RA,%RB:Vector Divide Word Unsigned
- unsigned32 dividendh, dividendl, divisorh, divisorl;
- unsigned32 w1, w2;
+ uint32_t dividendh, dividendl, divisorh, divisorl;
+ uint32_t w1, w2;
int ovh, ovl;
dividendh = *rAh;
dividendl = *rA;
#
0.4,6.RS,11.RA,16.0,21.644:EVX:e500:evfsabs %RS,%RA:Vector Floating-Point Absolute Value
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rAh & 0x7fffffff;
w2 = *rA & 0x7fffffff;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.645:EVX:e500:evfsnabs %RS,%RA:Vector Floating-Point Negative Absolute Value
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rAh | 0x80000000;
w2 = *rA | 0x80000000;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.646:EVX:e500:evfsneg %RS,%RA:Vector Floating-Point Negate
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rAh;
w2 = *rA;
w1 = (w1 & 0x7fffffff) | ((~w1) & 0x80000000);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.640:EVX:e500:evfsadd %RS,%RA,%RB:Vector Floating-Point Add
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = ev_fs_add (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
w2 = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.641:EVX:e500:evfssub %RS,%RA,%RB:Vector Floating-Point Subtract
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = ev_fs_sub (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
w2 = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.648:EVX:e500:evfsmul %RS,%RA,%RB:Vector Floating-Point Multiply
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = ev_fs_mul (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
w2 = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.649:EVX:e500:evfsdiv %RS,%RA,%RB:Vector Floating-Point Divide
- signed32 w1, w2;
+ int32_t w1, w2;
w1 = ev_fs_div (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fdbzh, spefscr_fgh, spefscr_fxh, processor);
w2 = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
0.4,6.RS,11.0,16.RB,21.656:EVX:e500:evfscfui %RS,%RB:Vector Convert Floating-Point from Unsigned Integer
- unsigned32 f, w1, w2;
+ uint32_t f, w1, w2;
sim_fpu b;
sim_fpu_u32to (&b, *rBh, sim_fpu_round_default);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.664:EVX:e500:evfsctuiz %RS,%RB:Vector Convert Floating-Point to Unsigned Integer with Round toward Zero
- unsigned32 w1, w2;
+ uint32_t w1, w2;
sim_fpu b;
sim_fpu_32to (&b, *rBh);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.657:EVX:e500:evfscfsi %RS,%RB:Vector Convert Floating-Point from Signed Integer
- signed32 w1, w2;
+ int32_t w1, w2;
sim_fpu b, x, y;
sim_fpu_i32to (&b, *rBh, sim_fpu_round_default);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.658:EVX:e500:evfscfuf %RS,%RB:Vector Convert Floating-Point from Unsigned Fraction
- unsigned32 w1, w2, bh, bl;
+ uint32_t w1, w2, bh, bl;
sim_fpu b, x, y;
bh = *rBh;
if (bh == 0xffffffff)
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.659:EVX:e500:evfscfsf %RS,%RB:Vector Convert Floating-Point from Signed Fraction
- unsigned32 w1, w2;
+ uint32_t w1, w2;
sim_fpu b, x, y;
sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.660:EVX:e500:evfsctui %RS,%RB:Vector Convert Floating-Point to Unsigned Integer
- unsigned32 w1, w2;
+ uint32_t w1, w2;
sim_fpu b;
sim_fpu_32to (&b, *rBh);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.661:EVX:e500:evfsctsi %RS,%RB:Vector Convert Floating-Point to Signed Integer
- signed32 w1, w2;
+ int32_t w1, w2;
sim_fpu b;
sim_fpu_32to (&b, *rBh);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.666:EVX:e500:evfsctsiz %RS,%RB:Vector Convert Floating-Point to Signed Integer with Round toward Zero
- signed32 w1, w2;
+ int32_t w1, w2;
sim_fpu b;
sim_fpu_32to (&b, *rBh);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.662:EVX:e500:evfsctuf %RS,%RB:Vector Convert Floating-Point to Unsigned Fraction
- unsigned32 w1, w2;
+ uint32_t w1, w2;
sim_fpu b, x, y;
sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.663:EVX:e500:evfsctsf %RS,%RB:Vector Convert Floating-Point to Signed Fraction
- signed32 w1, w2;
+ int32_t w1, w2;
sim_fpu b, x, y;
sim_fpu_32to (&y, *rBh);
0.4,6.RS,11.RA,16.0,21.708:EVX:e500:efsabs %RS,%RA:Floating-Point Absolute Value
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rSh;
w2 = *rA & 0x7fffffff;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.709:EVX:e500:efsnabs %RS,%RA:Floating-Point Negative Absolute Value
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rSh;
w2 = *rA | 0x80000000;
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.0,21.710:EVX:e500:efsneg %RS,%RA:Floating-Point Negate
- unsigned32 w1, w2;
+ uint32_t w1, w2;
w1 = *rSh;
w2 = (*rA & 0x7fffffff) | ((~*rA) & 0x80000000);
EV_SET_REG2(*rSh, *rS, w1, w2);
PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
0.4,6.RS,11.RA,16.RB,21.704:EVX:e500:efsadd %RS,%RA,%RB:Floating-Point Add
- unsigned32 w;
+ uint32_t w;
w = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
EV_SET_REG(*rS, w);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.705:EVX:e500:efssub %RS,%RA,%RB:Floating-Point Subtract
- unsigned32 w;
+ uint32_t w;
w = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
EV_SET_REG(*rS, w);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.712:EVX:e500:efsmul %RS,%RA,%RB:Floating-Point Multiply
- unsigned32 w;
+ uint32_t w;
w = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
EV_SET_REG(*rS, w);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
0.4,6.RS,11.RA,16.RB,21.713:EVX:e500:efsdiv %RS,%RA,%RB:Floating-Point Divide
- unsigned32 w;
+ uint32_t w;
w = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
EV_SET_REG(*rS, w);
PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
0.4,6.RS,11.0,16.RB,21.721:EVX:e500:efscfsi %RS,%RB:Convert Floating-Point from Signed Integer
- signed32 f, w1, w2;
+ int32_t f, w1, w2;
sim_fpu b;
w1 = *rSh;
sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.720:EVX:e500:efscfui %RS,%RB:Convert Floating-Point from Unsigned Integer
- unsigned32 w1, w2;
+ uint32_t w1, w2;
sim_fpu b;
w1 = *rSh;
sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.723:EVX:e500:efscfsf %RS,%RB:Convert Floating-Point from Signed Fraction
- unsigned32 w1, w2;
+ uint32_t w1, w2;
sim_fpu b, x, y;
w1 = *rSh;
sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.722:EVX:e500:efscfuf %RS,%RB:Convert Floating-Point from Unsigned Fraction
- unsigned32 w1, w2, bl;
+ uint32_t w1, w2, bl;
sim_fpu b, x, y;
w1 = *rSh;
bl = *rB;
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.725:EVX:e500:efsctsi %RS,%RB:Convert Floating-Point to Signed Integer
- signed64 temp;
- signed32 w1, w2;
+ int64_t temp;
+ int32_t w1, w2;
sim_fpu b;
w1 = *rSh;
sim_fpu_32to (&b, *rB);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.730:EVX:e500:efsctsiz %RS,%RB:Convert Floating-Point to Signed Integer with Round toward Zero
- signed64 temp;
- signed32 w1, w2;
+ int64_t temp;
+ int32_t w1, w2;
sim_fpu b;
w1 = *rSh;
sim_fpu_32to (&b, *rB);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.724:EVX:e500:efsctui %RS,%RB:Convert Floating-Point to Unsigned Integer
- unsigned64 temp;
- signed32 w1, w2;
+ uint64_t temp;
+ int32_t w1, w2;
sim_fpu b;
w1 = *rSh;
sim_fpu_32to (&b, *rB);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.728:EVX:e500:efsctuiz %RS,%RB:Convert Floating-Point to Unsigned Integer with Round toward Zero
- unsigned64 temp;
- signed32 w1, w2;
+ uint64_t temp;
+ int32_t w1, w2;
sim_fpu b;
w1 = *rSh;
sim_fpu_32to (&b, *rB);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.727:EVX:e500:efsctsf %RS,%RB:Convert Floating-Point to Signed Fraction
- unsigned32 w1, w2;
+ uint32_t w1, w2;
sim_fpu b, x, y;
w1 = *rSh;
sim_fpu_32to (&y, *rB);
PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
0.4,6.RS,11.0,16.RB,21.726:EVX:e500:efsctuf %RS,%RB:Convert Floating-Point to Unsigned Fraction
- unsigned32 w1, w2;
+ uint32_t w1, w2;
sim_fpu b, x, y;
w1 = *rSh;
sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
#
0.4,6.RS,11.RA,16.UIMM,21.769:EVX:e500:evldd %RS,%RA,%UIMM:Vector Load Double Word into Double Word
- unsigned64 m;
+ uint64_t m;
unsigned_word b;
unsigned_word EA;
if (RA_is_0) b = 0;
PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
0.4,6.RS,11.RA,16.RB,21.768:EVX:e500:evlddx %RS,%RA,%RB:Vector Load Double Word into Double Word Indexed
- unsigned64 m;
+ uint64_t m;
unsigned_word b;
unsigned_word EA;
if (RA_is_0) b = 0;
0.4,6.RS,11.RA,16.UIMM,21.771:EVX:e500:evldw %RS,%RA,%UIMM:Vector Load Double into Two Words
unsigned_word b;
unsigned_word EA;
- unsigned32 w1, w2;
+ uint32_t w1, w2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 3);
0.4,6.RS,11.RA,16.RB,21.770:EVX:e500:evldwx %RS,%RA,%RB:Vector Load Double into Two Words Indexed
unsigned_word b;
unsigned_word EA;
- unsigned32 w1, w2;
+ uint32_t w1, w2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.773:EVX:e500:evldh %RS,%RA,%UIMM:Vector Load Double into 4 Half Words
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 3);
0.4,6.RS,11.RA,16.RB,21.772:EVX:e500:evldhx %RS,%RA,%RB:Vector Load Double into 4 Half Words Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.785:EVX:e500:evlwhe %RS,%RA,%UIMM:Vector Load Word into Two Half Words Even
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 2);
0.4,6.RS,11.RA,16.RB,21.784:EVX:e500:evlwhex %RS,%RA,%RB:Vector Load Word into Two Half Words Even Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.789:EVX:e500:evlwhou %RS,%RA,%UIMM:Vector Load Word into Two Half Words Odd Unsigned zero-extended
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 2);
0.4,6.RS,11.RA,16.RB,21.788:EVX:e500:evlwhoux %RS,%RA,%RB:Vector Load Word into Two Half Words Odd Unsigned Indexed zero-extended
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.791:EVX:e500:evlwhos %RS,%RA,%UIMM:Vector Load Word into Half Words Odd Signed with sign extension
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 2);
0.4,6.RS,11.RA,16.RB,21.790:EVX:e500:evlwhosx %RS,%RA,%RB:Vector Load Word into Half Words Odd Signed Indexed with sign extension
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.793:EVX:e500:evlwwsplat %RS,%RA,%UIMM:Vector Load Word into Word and Splat
unsigned_word b;
unsigned_word EA;
- unsigned32 w1;
+ uint32_t w1;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 2);
0.4,6.RS,11.RA,16.RB,21.792:EVX:e500:evlwwsplatx %RS,%RA,%RB:Vector Load Word into Word and Splat Indexed
unsigned_word b;
unsigned_word EA;
- unsigned32 w1;
+ uint32_t w1;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.797:EVX:e500:evlwhsplat %RS,%RA,%UIMM:Vector Load Word into 2 Half Words and Splat
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2;
+ uint16_t h1, h2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 2);
0.4,6.RS,11.RA,16.RB,21.796:EVX:e500:evlwhsplatx %RS,%RA,%RB:Vector Load Word into 2 Half Words and Splat Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2;
+ uint16_t h1, h2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.777:EVX:e500:evlhhesplat %RS,%RA,%UIMM:Vector Load Half Word into Half Words Even and Splat
unsigned_word b;
unsigned_word EA;
- unsigned16 h;
+ uint16_t h;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 1);
0.4,6.RS,11.RA,16.RB,21.776:EVX:e500:evlhhesplatx %RS,%RA,%RB:Vector Load Half Word into Half Words Even and Splat Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h;
+ uint16_t h;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.781:EVX:e500:evlhhousplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Unsigned and Splat
unsigned_word b;
unsigned_word EA;
- unsigned16 h;
+ uint16_t h;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 1);
0.4,6.RS,11.RA,16.RB,21.780:EVX:e500:evlhhousplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h;
+ uint16_t h;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.783:EVX:e500:evlhhossplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Signed and Splat
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2;
+ uint16_t h1, h2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 1);
0.4,6.RS,11.RA,16.RB,21.782:EVX:e500:evlhhossplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Signed and Splat Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2;
+ uint16_t h1, h2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.803:EVX:e500:evstdw %RS,%RA,%UIMM:Vector Store Double of Two Words
unsigned_word b;
unsigned_word EA;
- unsigned32 w1, w2;
+ uint32_t w1, w2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 3);
0.4,6.RS,11.RA,16.RB,21.802:EVX:e500:evstdwx %RS,%RA,%RB:Vector Store Double of Two Words Indexed
unsigned_word b;
unsigned_word EA;
- unsigned32 w1, w2;
+ uint32_t w1, w2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.805:EVX:e500:evstdh %RS,%RA,%UIMM:Vector Store Double of Four Half Words
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 3);
0.4,6.RS,11.RA,16.RB,21.804:EVX:e500:evstdhx %RS,%RA,%RB:Vector Store Double of Four Half Words Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2, h3, h4;
+ uint16_t h1, h2, h3, h4;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.825:EVX:e500:evstwwe %RS,%RA,%UIMM:Vector Store Word of Word from Even
unsigned_word b;
unsigned_word EA;
- unsigned32 w;
+ uint32_t w;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 3);
0.4,6.RS,11.RA,16.RB,21.824:EVX:e500:evstwwex %RS,%RA,%RB:Vector Store Word of Word from Even Indexed
unsigned_word b;
unsigned_word EA;
- unsigned32 w;
+ uint32_t w;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.829:EVX:e500:evstwwo %RS,%RA,%UIMM:Vector Store Word of Word from Odd
unsigned_word b;
unsigned_word EA;
- unsigned32 w;
+ uint32_t w;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 3);
0.4,6.RS,11.RA,16.RB,21.828:EVX:e500:evstwwox %RS,%RA,%RB:Vector Store Word of Word from Odd Indexed
unsigned_word b;
unsigned_word EA;
- unsigned32 w;
+ uint32_t w;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.817:EVX:e500:evstwhe %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Even
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2;
+ uint16_t h1, h2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 3);
0.4,6.RS,11.RA,16.RB,21.816:EVX:e500:evstwhex %RS,%RA,%RB:Vector Store Word of Two Half Words from Even Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2;
+ uint16_t h1, h2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
0.4,6.RS,11.RA,16.UIMM,21.821:EVX:e500:evstwho %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Odd
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2;
+ uint16_t h1, h2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + (UIMM << 3);
0.4,6.RS,11.RA,16.RB,21.820:EVX:e500:evstwhox %RS,%RA,%RB:Vector Store Word of Two Half Words from Odd Indexed
unsigned_word b;
unsigned_word EA;
- unsigned16 h1, h2;
+ uint16_t h1, h2;
if (RA_is_0) b = 0;
else b = *rA;
EA = b + *rB;
:cache::::RA:RA:
:cache:::signed_word *:rA:RA:(cpu_registers(processor)->gpr + RA)
-:cache:::unsigned32:RA_BITMASK:RA:(1 << RA)
+:cache:::uint32_t:RA_BITMASK:RA:(1 << RA)
:compute:::int:RA_is_0:RA:(RA == 0)
:cache::::RT:RT:
:cache:::signed_word *:rT:RT:(cpu_registers(processor)->gpr + RT)
-:cache:::unsigned32:RT_BITMASK:RT:(1 << RT)
+:cache:::uint32_t:RT_BITMASK:RT:(1 << RT)
:cache::::RS:RS:
:cache:::signed_word *:rS:RS:(cpu_registers(processor)->gpr + RS)
-:cache:::unsigned32:RS_BITMASK:RS:(1 << RS)
+:cache:::uint32_t:RS_BITMASK:RS:(1 << RS)
:cache::::RB:RB:
:cache:::signed_word *:rB:RB:(cpu_registers(processor)->gpr + RB)
-:cache:::unsigned32:RB_BITMASK:RB:(1 << RB)
+:cache:::uint32_t:RB_BITMASK:RB:(1 << RB)
:scratch::::FRA:FRA:
-:cache:::unsigned64 *:frA:FRA:(cpu_registers(processor)->fpr + FRA)
-:cache:::unsigned32:FRA_BITMASK:FRA:(1 << FRA)
+:cache:::uint64_t *:frA:FRA:(cpu_registers(processor)->fpr + FRA)
+:cache:::uint32_t:FRA_BITMASK:FRA:(1 << FRA)
:scratch::::FRB:FRB:
-:cache:::unsigned64 *:frB:FRB:(cpu_registers(processor)->fpr + FRB)
-:cache:::unsigned32:FRB_BITMASK:FRB:(1 << FRB)
+:cache:::uint64_t *:frB:FRB:(cpu_registers(processor)->fpr + FRB)
+:cache:::uint32_t:FRB_BITMASK:FRB:(1 << FRB)
:scratch::::FRC:FRC:
-:cache:::unsigned64 *:frC:FRC:(cpu_registers(processor)->fpr + FRC)
-:cache:::unsigned32:FRC_BITMASK:FRC:(1 << FRC)
+:cache:::uint64_t *:frC:FRC:(cpu_registers(processor)->fpr + FRC)
+:cache:::uint32_t:FRC_BITMASK:FRC:(1 << FRC)
:scratch::::FRS:FRS:
-:cache:::unsigned64 *:frS:FRS:(cpu_registers(processor)->fpr + FRS)
-:cache:::unsigned32:FRS_BITMASK:FRS:(1 << FRS)
+:cache:::uint64_t *:frS:FRS:(cpu_registers(processor)->fpr + FRS)
+:cache:::uint32_t:FRS_BITMASK:FRS:(1 << FRS)
:scratch::::FRT:FRT:
-:cache:::unsigned64 *:frT:FRT:(cpu_registers(processor)->fpr + FRT)
-:cache:::unsigned32:FRT_BITMASK:FRT:(1 << FRT)
-:cache:::unsigned_word:EXTS_SI:SI:((signed_word)(signed16)instruction)
+:cache:::uint64_t *:frT:FRT:(cpu_registers(processor)->fpr + FRT)
+:cache:::uint32_t:FRT_BITMASK:FRT:(1 << FRT)
+:cache:::unsigned_word:EXTS_SI:SI:((signed_word)(int16_t)instruction)
:scratch::::BI:BI:
:cache::::BIT32_BI:BI:BIT32(BI)
:cache::::BF:BF:
-:cache:::unsigned32:BF_BITMASK:BF:(1 << BF)
+:cache:::uint32_t:BF_BITMASK:BF:(1 << BF)
:scratch::::BA:BA:
:cache::::BIT32_BA:BA:BIT32(BA)
-:cache:::unsigned32:BA_BITMASK:BA:(1 << BA)
+:cache:::uint32_t:BA_BITMASK:BA:(1 << BA)
:scratch::::BB:BB:
:cache::::BIT32_BB:BB:BIT32(BB)
-:cache:::unsigned32:BB_BITMASK:BB:(1 << BB)
+:cache:::uint32_t:BB_BITMASK:BB:(1 << BB)
:cache::::BT:BT:
-:cache:::unsigned32:BT_BITMASK:BT:(1 << BT)
-:cache:::unsigned_word:EXTS_BD_0b00:BD:(((signed_word)(signed16)instruction) & ~3)
-:cache:::unsigned_word:EXTS_LI_0b00:LI:((((signed_word)(signed32)(instruction << 6)) >> 6) & ~0x3)
-:cache:::unsigned_word:EXTS_D:D:((signed_word)(signed16)(instruction))
-:cache:::unsigned_word:EXTS_DS_0b00:DS:(((signed_word)(signed16)instruction) & ~0x3)
+:cache:::uint32_t:BT_BITMASK:BT:(1 << BT)
+:cache:::unsigned_word:EXTS_BD_0b00:BD:(((signed_word)(int16_t)instruction) & ~3)
+:cache:::unsigned_word:EXTS_LI_0b00:LI:((((signed_word)(int32_t)(instruction << 6)) >> 6) & ~0x3)
+:cache:::unsigned_word:EXTS_D:D:((signed_word)(int16_t)(instruction))
+:cache:::unsigned_word:EXTS_DS_0b00:DS:(((signed_word)(int16_t)instruction) & ~0x3)
#:compute:::int:SPR_is_256:SPR:(SPR == 256)
\f
# PowerPC models
struct _model_time {
ppc_function_unit first_unit; /* first functional unit this insn could use */
ppc_function_unit second_unit; /* second functional unit this insn could use */
- signed16 issue; /* # cycles before function unit can process other insns */
- signed16 done; /* # cycles before insn is done */
- unsigned32 flags; /* any flags that are needed */
+ int16_t issue; /* # cycles before function unit can process other insns */
+ int16_t done; /* # cycles before insn is done */
+ uint32_t flags; /* any flags that are needed */
};
/* Register mappings in status masks */
struct _model_busy {
model_busy *next; /* next function unit */
ppc_function_unit unit; /* function unit name */
- unsigned32 int_busy; /* int registers that are busy */
- unsigned32 fp_busy; /* floating point registers that are busy */
- unsigned32 cr_fpscr_busy; /* CR/FPSCR registers that are busy */
- signed16 spr_busy; /* SPR register that is busy or PPC_NO_SPR */
- unsigned32 vr_busy; /* AltiVec registers that are busy */
- signed16 vscr_busy; /* AltiVec status register busy */
- signed16 issue; /* # of cycles until unit can accept another insn */
- signed16 done; /* # of cycles until insn is done */
- signed16 nr_writebacks; /* # of registers this unit writes back */
+ uint32_t int_busy; /* int registers that are busy */
+ uint32_t fp_busy; /* floating point registers that are busy */
+ uint32_t cr_fpscr_busy; /* CR/FPSCR registers that are busy */
+ int16_t spr_busy; /* SPR register that is busy or PPC_NO_SPR */
+ uint32_t vr_busy; /* AltiVec registers that are busy */
+ int16_t vscr_busy; /* AltiVec status register busy */
+ int16_t issue; /* # of cycles until unit can accept another insn */
+ int16_t done; /* # of cycles until insn is done */
+ int16_t nr_writebacks; /* # of registers this unit writes back */
};
/* Structure to hold the current state information for the simulated CPU model */
count_type nr_stalls_writeback; /* # of stalls waiting for a writeback slot */
count_type nr_units[nr_ppc_function_units]; /* function unit counts */
int max_nr_writebacks; /* max # of writeback slots available */
- unsigned32 int_busy; /* int registers that are busy */
- unsigned32 fp_busy; /* floating point registers that are busy */
- unsigned32 cr_fpscr_busy; /* CR/FPSCR registers that are busy */
- unsigned8 spr_busy[nr_of_sprs]; /* SPR registers that are busy */
- unsigned32 vr_busy; /* AltiVec registers that are busy */
- unsigned8 vscr_busy; /* AltiVec SC register busy */
- unsigned8 busy[nr_ppc_function_units]; /* whether a function is busy or not */
+ uint32_t int_busy; /* int registers that are busy */
+ uint32_t fp_busy; /* floating point registers that are busy */
+ uint32_t cr_fpscr_busy; /* CR/FPSCR registers that are busy */
+ uint8_t spr_busy[nr_of_sprs]; /* SPR registers that are busy */
+ uint32_t vr_busy; /* AltiVec registers that are busy */
+ uint8_t vscr_busy; /* AltiVec SC register busy */
+ uint8_t busy[nr_ppc_function_units]; /* whether a function is busy or not */
};
static const char *const ppc_function_unit_name[ (int)nr_ppc_function_units ] = {
TRACE(trace_model, ("VSCR Register %s is now available.\n", spr_name(busy->spr_busy)));
# Trace making registers busy
-void::model-static::model_trace_make_busy:model_data *model_ptr, unsigned32 int_mask, unsigned32 fp_mask, unsigned32 cr_mask
+void::model-static::model_trace_make_busy:model_data *model_ptr, uint32_t int_mask, uint32_t fp_mask, uint32_t cr_mask
int i;
if (int_mask) {
for(i = 0; i < 32; i++) {
}
# Trace waiting for registers to become available
-void::model-static::model_trace_busy_p:model_data *model_ptr, unsigned32 int_busy, unsigned32 fp_busy, unsigned32 cr_or_fpscr_busy, int spr_busy
+void::model-static::model_trace_busy_p:model_data *model_ptr, uint32_t int_busy, uint32_t fp_busy, uint32_t cr_or_fpscr_busy, int spr_busy
int i;
if (int_busy) {
int_busy &= model_ptr->int_busy;
# Wait for a CR to become unbusy
void::model-function::model_wait_for_cr:model_data *model_ptr, unsigned CRBIT
unsigned u;
- unsigned32 cr_mask;
+ uint32_t cr_mask;
int cr_var = 0;
for (u = 0xc0000000; (u != 0) && (CRBIT & u) == 0; u >>= 4 )
cr_var++;
}
# Schedule an instruction that takes integer input registers and produces output registers
-void::model-function::ppc_insn_int:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask
- const unsigned32 int_mask = out_mask | in_mask;
+void::model-function::ppc_insn_int:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask
+ const uint32_t int_mask = out_mask | in_mask;
model_busy *busy_ptr;
if ((model_ptr->int_busy & int_mask) != 0) {
model_trace_make_busy(model_ptr, out_mask, 0, 0);
# Schedule an instruction that takes integer input registers and produces output registers & sets a CR register
-void::model-function::ppc_insn_int_cr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned32 cr_mask
- const unsigned32 int_mask = out_mask | in_mask;
+void::model-function::ppc_insn_int_cr:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask, const uint32_t cr_mask
+ const uint32_t int_mask = out_mask | in_mask;
model_busy *busy_ptr;
if ((model_ptr->int_busy & int_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
# Schedule an instruction that takes CR input registers and produces output CR registers
-void::model-function::ppc_insn_cr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask
- const unsigned32 cr_mask = out_mask | in_mask;
+void::model-function::ppc_insn_cr:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask
+ const uint32_t cr_mask = out_mask | in_mask;
model_busy *busy_ptr;
if ((model_ptr->cr_fpscr_busy & cr_mask) != 0) {
# Schedule an instruction that takes floating point input registers and produces an output fp register
-void::model-function::ppc_insn_float:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask
- const unsigned32 fp_mask = out_mask | in_mask;
+void::model-function::ppc_insn_float:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask
+ const uint32_t fp_mask = out_mask | in_mask;
model_busy *busy_ptr;
if ((model_ptr->fp_busy & fp_mask) != 0) {
# Schedule an instruction that takes floating point input registers and produces an output fp register & sets a CR reg
-void::model-function::ppc_insn_float_cr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned32 cr_mask
- const unsigned32 fp_mask = out_mask | in_mask;
+void::model-function::ppc_insn_float_cr:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask, const uint32_t cr_mask
+ const uint32_t fp_mask = out_mask | in_mask;
model_busy *busy_ptr;
if ((model_ptr->fp_busy & fp_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
# Schedule an instruction that takes both int/float input registers and produces output int/float registers
-void::model-function::ppc_insn_int_float:itable_index index, model_data *model_ptr, const unsigned32 out_int_mask, const unsigned32 out_fp_mask, const unsigned32 in_int_mask, const unsigned32 in_fp_mask
- const unsigned32 int_mask = out_int_mask | in_int_mask;
- const unsigned32 fp_mask = out_fp_mask | in_fp_mask;
+void::model-function::ppc_insn_int_float:itable_index index, model_data *model_ptr, const uint32_t out_int_mask, const uint32_t out_fp_mask, const uint32_t in_int_mask, const uint32_t in_fp_mask
+ const uint32_t int_mask = out_int_mask | in_int_mask;
+ const uint32_t fp_mask = out_fp_mask | in_fp_mask;
model_busy *busy_ptr;
if ((model_ptr->int_busy & int_mask) || (model_ptr->fp_busy & fp_mask)) {
}
# Schedule an MFSPR instruction that takes 1 special purpose register and produces an integer output register
-void::model-function::ppc_insn_from_spr:itable_index index, model_data *model_ptr, const unsigned32 int_mask, const unsigned nSPR
+void::model-function::ppc_insn_from_spr:itable_index index, model_data *model_ptr, const uint32_t int_mask, const unsigned nSPR
model_busy *busy_ptr;
while ((model_ptr->int_busy & int_mask) != 0 || model_ptr->spr_busy[nSPR] != 0) {
model_trace_make_busy(model_ptr, int_mask, 0, 0);
# Schedule an MTSPR instruction that takes 1 integer register and produces a special purpose output register
-void::model-function::ppc_insn_to_spr:itable_index index, model_data *model_ptr, const unsigned32 int_mask, const unsigned nSPR
+void::model-function::ppc_insn_to_spr:itable_index index, model_data *model_ptr, const uint32_t int_mask, const unsigned nSPR
model_busy *busy_ptr;
while ((model_ptr->int_busy & int_mask) != 0 || model_ptr->spr_busy[nSPR] != 0) {
TRACE(trace_model,("Making register %s busy.\n", spr_name(nSPR)));
# Schedule a MFCR instruction that moves the CR into an integer register
-void::model-function::ppc_insn_mfcr:itable_index index, model_data *model_ptr, unsigned32 int_mask
- const unsigned32 cr_mask = 0xff;
+void::model-function::ppc_insn_mfcr:itable_index index, model_data *model_ptr, uint32_t int_mask
+ const uint32_t cr_mask = 0xff;
model_busy *busy_ptr;
while (((model_ptr->int_busy & int_mask) | (model_ptr->cr_fpscr_busy & cr_mask)) != 0) {
model_trace_make_busy(model_ptr, int_mask, 0, 0);
# Schedule a MTCR instruction that moves an integer register into the CR
-void::model-function::ppc_insn_mtcr:itable_index index, model_data *model_ptr, unsigned32 int_mask, unsigned FXM
+void::model-function::ppc_insn_mtcr:itable_index index, model_data *model_ptr, uint32_t int_mask, unsigned FXM
int f;
int nr_crs = 0;
- unsigned32 cr_mask = 0;
+ uint32_t cr_mask = 0;
const model_time *normal_time = &model_ptr->timing[index];
static const model_time ppc604_1bit_time = { PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0 };
model_busy *busy_ptr;
#
# Convert 32bit single to 64bit double
-unsigned64::function::DOUBLE:unsigned32 WORD
- unsigned64 FRT;
+uint64_t::function::DOUBLE:uint32_t WORD
+ uint64_t FRT;
if (EXTRACTED32(WORD, 1, 8) > 0
&& EXTRACTED32(WORD, 1, 8) < 255) {
/* normalized operand */
/* denormalized operand */
int sign = EXTRACTED32(WORD, 0, 0);
int exp = -126;
- unsigned64 frac = INSERTED64(EXTRACTED32(WORD, 9, 31), 1, (52 - 29));
+ uint64_t frac = INSERTED64(EXTRACTED32(WORD, 9, 31), 1, (52 - 29));
/* normalize the operand */
while (MASKED64(frac, 0, 0) == 0) {
frac <<= 1;
return FRT;
# Convert 64bit single to 32bit double
-unsigned32::function::SINGLE:unsigned64 FRS
- unsigned32 WORD;
+uint32_t::function::SINGLE:uint64_t FRS
+ uint32_t WORD;
if (EXTRACTED64(FRS, 1, 11) > 896
|| EXTRACTED64(FRS, 1, 63) == 0) {
/* no denormalization required (includes Zero/Infinity/NaN) */
/* denormalization required */
int sign = EXTRACTED64(FRS, 0, 0);
int exp = EXTRACTED64(FRS, 1, 11) - 1023;
- unsigned64 frac = (BIT64(0)
+ uint64_t frac = (BIT64(0)
| INSERTED64(EXTRACTED64(FRS, 12, 63), 1, 52));
/* denormalize the operand */
while (exp < -126) {
# round 64bit double to 64bit but single
-void::function::Round_Single:cpu *processor, int sign, int *exp, unsigned64 *frac_grx
+void::function::Round_Single:cpu *processor, int sign, int *exp, uint64_t *frac_grx
/* comparisons ignore u bits */
- unsigned64 out;
+ uint64_t out;
int inc = 0;
int lsb = EXTRACTED64(*frac_grx, 23, 23);
int gbit = EXTRACTED64(*frac_grx, 24, 24);
#
-void::function::Round_Integer:cpu *processor, int sign, unsigned64 *frac, int *frac64, int gbit, int rbit, int xbit, fpscreg round_mode
+void::function::Round_Integer:cpu *processor, int sign, uint64_t *frac, int *frac64, int gbit, int rbit, int xbit, fpscreg round_mode
int inc = 0;
if (round_mode == fpscr_rn_round_to_nearest) {
if (*frac64 == 1 && gbit == 1) inc = 1;
FPSCR_SET_FI(gbit | rbit | xbit);
-void::function::Round_Float:cpu *processor, int sign, int *exp, unsigned64 *frac, fpscreg round_mode
+void::function::Round_Float:cpu *processor, int sign, int *exp, uint64_t *frac, fpscreg round_mode
int carry_out;
int inc = 0;
int lsb = EXTRACTED64(*frac, 52, 52);
# conversion of FP to integer
-void::function::convert_to_integer:cpu *processor, unsigned_word cia, unsigned64 *frt, unsigned64 frb, fpscreg round_mode, int tgt_precision
+void::function::convert_to_integer:cpu *processor, unsigned_word cia, uint64_t *frt, uint64_t frb, fpscreg round_mode, int tgt_precision
int i;
int exp = 0;
- unsigned64 frac = 0;
+ uint64_t frac = 0;
int frac64 = 0;
int gbit = 0;
int rbit = 0;
frac64 = (frac64 + 1) & 0x1;
}
if (tgt_precision == 32 /* can ignore frac64 in compare */
- && (signed64)frac > (signed64)MASK64(33+1, 63)/*2^31-1 >>1*/)
+ && (int64_t)frac > (int64_t)MASK64(33+1, 63)/*2^31-1 >>1*/)
GOTO(Large_Operand);
if (tgt_precision == 64 /* can ignore frac64 in compare */
- && (signed64)frac > (signed64)MASK64(1+1, 63)/*2^63-1 >>1*/)
+ && (int64_t)frac > (int64_t)MASK64(1+1, 63)/*2^63-1 >>1*/)
GOTO(Large_Operand);
if (tgt_precision == 32 /* can ignore frac64 in compare */
- && (signed64)frac < (signed64)MASK64(0, 32+1)/*-2^31 >>1*/)
+ && (int64_t)frac < (int64_t)MASK64(0, 32+1)/*-2^31 >>1*/)
GOTO(Large_Operand);
if (tgt_precision == 64 /* can ignore frac64 in compare */
- && (signed64)frac < (signed64)MASK64(0, 0+1)/*-2^63 >>1*/)
+ && (int64_t)frac < (int64_t)MASK64(0, 0+1)/*-2^63 >>1*/)
GOTO(Large_Operand);
FPSCR_SET_XX(FPSCR & fpscr_fi);
if (tgt_precision == 32)
# extract out raw fields of a FP number
-int::function::sign:unsigned64 FRS
+int::function::sign:uint64_t FRS
return (MASKED64(FRS, 0, 0)
? -1
: 1);
-int::function::biased_exp:unsigned64 frs, int single
+int::function::biased_exp:uint64_t frs, int single
if (single)
return EXTRACTED64(frs, 1, 8);
else
return EXTRACTED64(frs, 1, 11);
-unsigned64::function::fraction:unsigned64 frs, int single
+uint64_t::function::fraction:uint64_t frs, int single
if (single)
return EXTRACTED64(frs, 9, 31);
else
# a number?, each of the below return +1 or -1 (based on sign bit)
# if true.
-int::function::is_nor:unsigned64 frs, int single
+int::function::is_nor:uint64_t frs, int single
int exp = biased_exp(frs, single);
return (exp >= 1
&& exp <= (single ? 254 : 2046));
-int::function::is_zero:unsigned64 FRS
+int::function::is_zero:uint64_t FRS
return (MASKED64(FRS, 1, 63) == 0
? sign(FRS)
: 0);
-int::function::is_den:unsigned64 frs, int single
+int::function::is_den:uint64_t frs, int single
int exp = biased_exp(frs, single);
- unsigned64 frac = fraction(frs, single);
+ uint64_t frac = fraction(frs, single);
return (exp == 0 && frac != 0
? sign(frs)
: 0);
-int::function::is_inf:unsigned64 frs, int single
+int::function::is_inf:uint64_t frs, int single
int exp = biased_exp(frs, single);
- unsigned64 frac = fraction(frs, single);
+ uint64_t frac = fraction(frs, single);
return (exp == (single ? 255 : 2047) && frac == 0
? sign(frs)
: 0);
-int::function::is_NaN:unsigned64 frs, int single
+int::function::is_NaN:uint64_t frs, int single
int exp = biased_exp(frs, single);
- unsigned64 frac = fraction(frs, single);
+ uint64_t frac = fraction(frs, single);
return (exp == (single ? 255 : 2047) && frac != 0
? sign(frs)
: 0);
-int::function::is_SNaN:unsigned64 frs, int single
+int::function::is_SNaN:uint64_t frs, int single
return (is_NaN(frs, single)
&& !(frs & (single ? MASK64(9, 9) : MASK64(12, 12)))
? sign(frs)
: 0);
-int::function::is_QNaN:unsigned64 frs, int single
+int::function::is_QNaN:uint64_t frs, int single
return (is_NaN(frs, single) && !is_SNaN(frs, single));
-int::function::is_less_than:unsigned64 *fra, unsigned64 *frb
+int::function::is_less_than:uint64_t *fra, uint64_t *frb
return *(double*)fra < *(double*)frb;
-int::function::is_greater_than:unsigned64 *fra, unsigned64 *frb
+int::function::is_greater_than:uint64_t *fra, uint64_t *frb
return *(double*)fra > *(double*)frb;
-int::function::is_equan_to:unsigned64 *fra, unsigned64 *frb
+int::function::is_equan_to:uint64_t *fra, uint64_t *frb
return *(double*)fra == *(double*)frb;
# which quiet nan should become the result
-unsigned64::function::select_qnan:unsigned64 fra, unsigned64 frb, unsigned64 frc, int instruction_is_frsp, int generate_qnan, int single
- unsigned64 frt = 0;
+uint64_t::function::select_qnan:uint64_t fra, uint64_t frb, uint64_t frc, int instruction_is_frsp, int generate_qnan, int single
+ uint64_t frt = 0;
if (is_NaN(fra, single))
frt = fra;
else if (is_NaN(frb, single))
# detect invalid operation
-int::function::is_invalid_operation:cpu *processor, unsigned_word cia, unsigned64 fra, unsigned64 frb, fpscreg check, int single, int negate
+int::function::is_invalid_operation:cpu *processor, unsigned_word cia, uint64_t fra, uint64_t frb, fpscreg check, int single, int negate
int fail = 0;
if ((check & fpscr_vxsnan)
&& (is_SNaN(fra, single) || is_SNaN(frb, single))) {
# handle case of invalid operation
-void::function::invalid_arithemetic_operation:cpu *processor, unsigned_word cia, unsigned64 *frt, unsigned64 fra, unsigned64 frb, unsigned64 frc, int instruction_is_frsp, int instruction_is_convert_to_64bit, int instruction_is_convert_to_32bit, int single
+void::function::invalid_arithemetic_operation:cpu *processor, unsigned_word cia, uint64_t *frt, uint64_t fra, uint64_t frb, uint64_t frc, int instruction_is_frsp, int instruction_is_convert_to_64bit, int instruction_is_convert_to_32bit, int single
if (FPSCR & fpscr_ve) {
/* invalid operation exception enabled */
/* FRT unchaged */
# detect divide by zero
-int::function::is_invalid_zero_divide:cpu *processor, unsigned_word cia, unsigned64 fra, unsigned64 frb, int single
+int::function::is_invalid_zero_divide:cpu *processor, unsigned_word cia, uint64_t fra, uint64_t frb, int single
int fail = 0;
if (is_zero (frb)) {
FPSCR_SET_ZX (1);
# handle case of invalid operation
-void::function::invalid_zero_divide_operation:cpu *processor, unsigned_word cia, unsigned64 *frt, unsigned64 fra, unsigned64 frb, int single
+void::function::invalid_zero_divide_operation:cpu *processor, unsigned_word cia, uint64_t *frt, uint64_t fra, uint64_t frb, int single
if (FPSCR & fpscr_ze) {
/* zero-divide exception enabled */
/* FRT unchaged */
*603: PPC_UNIT_IU, PPC_UNIT_IU, 5, 5, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 5, 5, 0
*604: PPC_UNIT_MCIU, PPC_UNIT_MCIU, 4, 4, 0
- signed64 a = (signed32)(*rA);
- signed64 b = (signed32)(*rB);
- signed64 prod = a * b;
+ int64_t a = (int32_t)(*rA);
+ int64_t b = (int32_t)(*rB);
+ int64_t prod = a * b;
signed_word t = prod;
*rT = *rA * *rB;
if (t != prod && OE)
*603: PPC_UNIT_IU, PPC_UNIT_IU, 5, 5, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 5, 5, 0
*604: PPC_UNIT_MCIU, PPC_UNIT_MCIU, 4, 4, 0
- signed64 a = (signed32)(*rA);
- signed64 b = (signed32)(*rB);
- signed64 prod = a * b;
+ int64_t a = (int32_t)(*rA);
+ int64_t b = (int32_t)(*rB);
+ int64_t prod = a * b;
signed_word t = EXTRACTED64(prod, 0, 31);
*rT = t;
CR0_COMPARE(t, 0, Rc);
*603: PPC_UNIT_IU, PPC_UNIT_IU, 6, 6, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 6, 6, 0
*604: PPC_UNIT_MCIU, PPC_UNIT_MCIU, 4, 4, 0
- unsigned64 a = (unsigned32)(*rA);
- unsigned64 b = (unsigned32)(*rB);
- unsigned64 prod = a * b;
+ uint64_t a = (uint32_t)(*rA);
+ uint64_t b = (uint32_t)(*rB);
+ uint64_t prod = a * b;
signed_word t = EXTRACTED64(prod, 0, 31);
*rT = t;
CR0_COMPARE(t, 0, Rc);
*603: PPC_UNIT_IU, PPC_UNIT_IU, 37, 37, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 37, 37, 0
*604: PPC_UNIT_MCIU, PPC_UNIT_MCIU, 20, 20, 0
- signed64 dividend = (signed32)(*rA);
- signed64 divisor = (signed32)(*rB);
+ int64_t dividend = (int32_t)(*rA);
+ int64_t divisor = (int32_t)(*rB);
if (divisor == 0 /* nb 0x8000..0 is sign extended */
|| (dividend == 0x80000000 && divisor == -1)) {
if (OE)
CR0_COMPARE(0, 0, Rc);
}
else {
- signed64 quotent = dividend / divisor;
+ int64_t quotent = dividend / divisor;
*rT = quotent;
CR0_COMPARE((signed_word)quotent, 0, Rc);
}
*603: PPC_UNIT_IU, PPC_UNIT_IU, 37, 37, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 37, 37, 0
*604: PPC_UNIT_MCIU, PPC_UNIT_MCIU, 20, 20, 0
- unsigned64 dividend = (unsigned32)(*rA);
- unsigned64 divisor = (unsigned32)(*rB);
+ uint64_t dividend = (uint32_t)(*rA);
+ uint64_t divisor = (uint32_t)(*rB);
if (divisor == 0) {
if (OE)
XER |= (xer_overflow | xer_summary_overflow);
CR0_COMPARE(0, 0, Rc);
}
else {
- unsigned64 quotent = dividend / divisor;
+ uint64_t quotent = dividend / divisor;
*rT = quotent;
CR0_COMPARE((signed_word)quotent, 0, Rc);
}
*603: PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
- *rA = (signed_word)(signed8)*rS;
+ *rA = (signed_word)(int8_t)*rS;
CR0_COMPARE(*rA, 0, Rc);
ITRACE(trace_alu, (" Result = %ld (0x%lx)\n", (long)*rA, (long)*rA));
PPC_INSN_INT(RA_BITMASK, RS_BITMASK, Rc);
*603: PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
- *rA = (signed_word)(signed16)*rS;
+ *rA = (signed_word)(int16_t)*rS;
CR0_COMPARE(*rA, 0, Rc);
ITRACE(trace_alu, (" Result = %ld (0x%lx)\n", (long)*rA, (long)*rA));
PPC_INSN_INT(RA_BITMASK, RS_BITMASK, Rc);
*603: PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
-# *rA = (signed_word)(signed32)*rS;
+# *rA = (signed_word)(int32_t)*rS;
# CR0_COMPARE(*rA, 0, Rc);
0.31,6.RS,11.RA,16./,21.58,31.Rc:X:64::Count Leading Zeros Doubleword
# int count = 0;
-# unsigned64 mask = BIT64(0);
-# unsigned64 source = *rS;
+# uint64_t mask = BIT64(0);
+# uint64_t source = *rS;
# while (!(source & mask) && mask != 0) {
# mask >>= 1;
# count++;
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
int count = 0;
- unsigned32 mask = BIT32(0);
- unsigned32 source = *rS;
+ uint32_t mask = BIT32(0);
+ uint32_t source = *rS;
while (!(source & mask) && mask != 0) {
mask >>= 1;
count++;
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
long n = SH;
- unsigned32 s = *rS;
- unsigned32 r = ROTL32(s, n);
- unsigned32 m = MASK(MB+32, ME+32);
+ uint32_t s = *rS;
+ uint32_t r = ROTL32(s, n);
+ uint32_t m = MASK(MB+32, ME+32);
signed_word result = r & m;
*rA = result;
CR0_COMPARE(result, 0, Rc);
0.23,6.RS,11.RA,16.RB,21.MB,26.ME,31.Rc:M:::Rotate Left Word then AND with Mask
long n = MASKED(*rB, 59, 63);
- unsigned32 r = ROTL32(*rS, n);
- unsigned32 m = MASK(MB+32, ME+32);
+ uint32_t r = ROTL32(*rS, n);
+ uint32_t m = MASK(MB+32, ME+32);
signed_word result = r & m;
*rA = result;
CR0_COMPARE(result, 0, Rc);
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
long n = SH;
- unsigned32 r = ROTL32(*rS, n);
- unsigned32 m = MASK(MB+32, ME+32);
+ uint32_t r = ROTL32(*rS, n);
+ uint32_t m = MASK(MB+32, ME+32);
signed_word result = (r & m) | (*rA & ~m);
*rA = result;
ITRACE(trace_alu, (": n=%ld *rS=0x%lx r=0x%lx m=0x%lx result=0x%lx\n",
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
int n = MASKED(*rB, 58, 63);
- unsigned32 source = *rS;
+ uint32_t source = *rS;
signed_word shifted;
if (n < 32)
shifted = (source << n);
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
int n = MASKED(*rB, 58, 63);
- unsigned32 source = *rS;
+ uint32_t source = *rS;
signed_word shifted;
if (n < 32)
shifted = (source >> n);
*603: PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*603e:PPC_UNIT_IU, PPC_UNIT_IU, 1, 1, 0
*604: PPC_UNIT_SCIU1, PPC_UNIT_SCIU2, 1, 1, 0
- unsigned64 mask;
+ uint64_t mask;
int n = MASKED(*rB, 59, 63);
- signed32 source = (signed32)*rS; /* signed to keep sign bit */
+ int32_t source = (int32_t)*rS; /* signed to keep sign bit */
int S = (MASKED(*rS,32,32) != 0);
- signed64 r = ((unsigned64) source);
- r = ((unsigned64) source) << 32 | (unsigned32) source;
+ int64_t r = ((uint64_t) source);
+ r = ((uint64_t) source) << 32 | (uint32_t) source;
r = ROTL64(r,64-n);
if (MASKED(*rB,58,58) == 0)
- mask = (unsigned64) MASK64(n+32,63);
+ mask = (uint64_t) MASK64(n+32,63);
else
- mask = (unsigned64) 0;
- *rA = (signed_word) ((r & mask) | (((signed64) -1*S) & ~mask)); /* if 64bit will sign extend */
+ mask = (uint64_t) 0;
+ *rA = (signed_word) ((r & mask) | (((int64_t) -1*S) & ~mask)); /* if 64bit will sign extend */
if (S && (MASKED(r & ~mask,32,63)!=0))
XER |= xer_carry;
else
*603: PPC_UNIT_SRU, PPC_UNIT_SRU, 1, 1, 0
*603e:PPC_UNIT_SRU, PPC_UNIT_SRU, 1, 1, 0
*604: PPC_UNIT_MCIU, PPC_UNIT_MCIU, 3, 3, 0
- *rT = (unsigned32)CR;
+ *rT = (uint32_t)CR;
PPC_INSN_MFCR(RT_BITMASK);
#
fpscr_vxsnan | fpscr_vximz,
0, /*single?*/
0) /*negate?*/) {
- union { double d; unsigned64 u; } tmp;
+ union { double d; uint64_t u; } tmp;
invalid_arithemetic_operation(processor, cia,
&tmp.u, *frA, 0, *frC,
0, /*instruction_is_frsp*/
fpscr_vxsnan | fpscr_vximz,
1, /*single?*/
0) /*negate?*/) {
- union { double d; unsigned64 u; } tmp;
+ union { double d; uint64_t u; } tmp;
invalid_arithemetic_operation(processor, cia,
&tmp.u, *frA, 0, *frC,
0, /*instruction_is_frsp*/
fpscr_vxsnan | fpscr_vximz,
0, /*single?*/
0) /*negate?*/) {
- union { double d; unsigned64 u; } tmp;
+ union { double d; uint64_t u; } tmp;
invalid_arithemetic_operation(processor, cia,
&tmp.u, *frA, 0, *frC,
0, /*instruction_is_frsp*/
fpscr_vxsnan | fpscr_vximz,
1, /*single?*/
0) /*negate?*/) {
- union { double d; unsigned64 u; } tmp;
+ union { double d; uint64_t u; } tmp;
invalid_arithemetic_operation(processor, cia,
&tmp.u, *frA, 0, *frC,
0, /*instruction_is_frsp*/
fpscr_vxsnan | fpscr_vximz,
0, /*single?*/
0) /*negate?*/) {
- union { double d; unsigned64 u; } tmp;
+ union { double d; uint64_t u; } tmp;
invalid_arithemetic_operation(processor, cia,
&tmp.u, *frA, 0, *frC,
0, /*instruction_is_frsp*/
fpscr_vxsnan | fpscr_vximz,
1, /*single?*/
0) /*negate?*/) {
- union { double d; unsigned64 u; } tmp;
+ union { double d; uint64_t u; } tmp;
invalid_arithemetic_operation(processor, cia,
&tmp.u, *frA, 0, *frC,
0, /*instruction_is_frsp*/
fpscr_vxsnan | fpscr_vximz,
0, /*single?*/
0) /*negate?*/) {
- union { double d; unsigned64 u; } tmp;
+ union { double d; uint64_t u; } tmp;
invalid_arithemetic_operation(processor, cia,
&tmp.u, *frA, 0, *frC,
0, /*instruction_is_frsp*/
fpscr_vxsnan | fpscr_vximz,
1, /*single?*/
0) /*negate?*/) {
- union { double d; unsigned64 u; } tmp;
+ union { double d; uint64_t u; } tmp;
invalid_arithemetic_operation(processor, cia,
&tmp.u, *frA, 0, *frC,
0, /*instruction_is_frsp*/
*604: PPC_UNIT_FPU, PPC_UNIT_FPU, 1, 3, 0
int sign;
int exp;
- unsigned64 frac_grx;
+ uint64_t frac_grx;
/***/
/* split off cases for what to do */
if (EXTRACTED64(*frB, 1, 11) < 897
0.63,6.FRT,11./,16.FRB,21.846,31.Rc:X:64,f::Floating Convert from Integer Doubleword
int sign = EXTRACTED64(*frB, 0, 0);
int exp = 63;
- unsigned64 frac = *frB;
+ uint64_t frac = *frB;
/***/
if (frac == 0) GOTO(Zero_Operand);
if (sign == 1) frac = ~frac + 1;
0.63,6.BT,11./,16./,21.70,31.Rc:X:f::Move To FPSCR Bit 0
FPSCR_BEGIN;
- unsigned32 bit = BIT32(BT);
+ uint32_t bit = BIT32(BT);
FPSCR &= ~bit;
FPSCR_END(Rc);
0.63,6.BT,11./,16./,21.38,31.Rc:X:f::Move To FPSCR Bit 1
FPSCR_BEGIN;
- unsigned32 bit = BIT32(BT);
+ uint32_t bit = BIT32(BT);
if (bit & fpscr_fi)
bit |= fpscr_xx;
if ((bit & fpscr_vx_bits))
if (CURRENT_MODEL == MODEL_ppc601) {
program_interrupt(processor, cia, optional_instruction_program_interrupt);
} else {
- unsigned64 zero = 0;
+ uint64_t zero = 0;
FPSCR_BEGIN;
if (is_NaN(*frA, 0) || is_less_than (frA, &zero)) *frT = *frB;
else *frT = *frC;