From: Andrew Cagney Date: Thu, 15 May 1997 16:39:38 +0000 (+0000) Subject: Remove some of the flake from the c80 floating point. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=07b4c0a66c431fdd3fae16977ce862b657569ffb;p=binutils-gdb.git Remove some of the flake from the c80 floating point. --- diff --git a/sim/common/ChangeLog b/sim/common/ChangeLog index 4f6c9a0cf8d..963db711d9f 100644 --- a/sim/common/ChangeLog +++ b/sim/common/ChangeLog @@ -1,7 +1,14 @@ Thu May 15 10:58:52 1997 Andrew Cagney - * sim-fpu.h, sim-fpu.c (sim_fpu_[iu]{32,64}to): New integer + * sim-fpu.c (pack_fpu, unpack_fpu): New functions - decode a + float. + + * sim-inline.c (SIM_INLINE_C): Rename from _SIM_INLINE_C_. + * sim-lnline.h: Update. + + * sim-fpu.h, sim-fpu.c (sim_fpu_[iu]{32,64}to): New int2fp conversion functions. + (sim_fpu_to{32,64}[iu]): New fp2int functions. * sim-fpu.h, sim-fpu.c (sim_fpu_is_{lt,le,eq,ne,ge,gt}): New fp compare functions. Replacing. diff --git a/sim/common/sim-fpu.c b/sim/common/sim-fpu.c index 66fe5d160ff..d2b926608e5 100644 --- a/sim/common/sim-fpu.c +++ b/sim/common/sim-fpu.c @@ -1,5 +1,5 @@ /* Simulator Floating-point support. - Copyright (C) 1997 Free Software Foundation, Inc. + Copyright (C) 1994, 1997 Free Software Foundation, Inc. Contributed by Cygnus Support. This file is part of GDB, the GNU debugger. @@ -20,23 +20,412 @@ with this program; if not, write to the Free Software Foundation, Inc., -#ifndef _SIM_FPU_C_ -#define _SIM_FPU_C_ +#ifndef SIM_FPU_C +#define SIM_FPU_C #include "sim-main.h" #include "sim-fpu.h" +#include "sim-assert.h" #include +/* Floating point number is */ + +#define SP_NGARDS 7L +#define SP_GARDROUND 0x3f +#define SP_GARDMASK 0x7f +#define SP_GARDMSB 0x40 +#define SP_EXPBITS 8 +#define SP_EXPBIAS 127 +#define SP_FRACBITS 23 +#define SP_EXPMAX (0xff) +#define SP_QUIET_NAN 0x100000L +#define SP_FRAC_NBITS 32 +#define SP_FRACHIGH 0x80000000L +#define SP_FRACHIGH2 0xc0000000L + +#define DP_NGARDS 8L +#define DP_GARDROUND 0x7f +#define DP_GARDMASK 0xff +#define DP_GARDMSB 0x80 +#define DP_EXPBITS 11 +#define DP_EXPBIAS 1023 +#define DP_FRACBITS 52 +#define DP_EXPMAX (0x7ff) +#define DP_QUIET_NAN 0x8000000000000LL +#define DP_FRAC_NBITS 64 +#define DP_FRACHIGH 0x8000000000000000LL +#define DP_FRACHIGH2 0xc000000000000000LL + +#define EXPMAX (is_double ? DP_EXPMAX : SP_EXPMAX) +#define EXPBITS (is_double ? DP_EXPBITS : SP_EXPBITS) +#define EXPBIAS (is_double ? DP_EXPBIAS : SP_EXPBIAS) +#define FRACBITS (is_double ? DP_FRACBITS : SP_FRACBITS) +#define NGARDS (is_double ? DP_NGARDS : (SP_NGARDS )) +#define SIGNBIT (1LL << (EXPBITS + FRACBITS)) +#define FRAC_NBITS (is_double ? DP_FRAC_NBITS : SP_FRAC_NBITS) +#define GARDMASK (is_double ? DP_GARDMASK : SP_GARDMASK) +#define GARDMSB (is_double ? DP_GARDMSB : SP_GARDMSB) +#define GARDROUND (is_double ? DP_GARDROUND : SP_GARDROUND) + +/* F_D_BITOFF is the number of bits offset between the MSB of the mantissa + of a float and of a double. Assumes there are only two float types. + (double::FRAC_BITS+double::NGARGS-(float::FRAC_BITS-float::NGARDS)) + */ +#define F_D_BITOFF (is_double ? 0 : (52+8-(23+7))) + + +#if 0 +#define (is_double ? DP_ : SP_) +#endif + +#define NORMAL_EXPMIN (-(EXPBIAS)+1) + +#define IMPLICIT_1 (1LL<<(FRACBITS+NGARDS)) +#define IMPLICIT_2 (1LL<<(FRACBITS+1+NGARDS)) + +#define MAX_SI_INT (is_double ? LSMASK64 (63) : LSMASK64 (31)) +#define MAX_USI_INT (is_double ? LSMASK64 (64) : LSMASK64 (32)) + + +typedef enum +{ + sim_fpu_class_snan, + sim_fpu_class_qnan, + sim_fpu_class_zero, + sim_fpu_class_number, + sim_fpu_class_infinity, +} sim_fpu_class; + +typedef struct _sim_ufpu { + sim_fpu_class class; + int normal_exp; + int sign; + unsigned64 fraction; + union { + double d; + unsigned64 i; + } val; +} sim_ufpu; + + +STATIC_INLINE_SIM_FPU (unsigned64) +pack_fpu (const sim_ufpu *src, int is_double) +{ + unsigned64 fraction; + unsigned64 exp; + int sign; + + switch (src->class) + { + default: + /* create a NaN */ + case sim_fpu_class_qnan: + case sim_fpu_class_snan: + sign = 1; /* fixme - always a qNaN */ + exp = EXPMAX; + fraction = src->fraction; + break; + case sim_fpu_class_infinity: + sign = src->sign; + exp = EXPMAX; + fraction = 0; + break; + case sim_fpu_class_zero: + sign = src->sign; + exp = 0; + fraction = 0; + break; + case sim_fpu_class_number: + if (src->normal_exp < NORMAL_EXPMIN) + { + /* This number's exponent is too low to fit into the bits + available in the number, so we'll store 0 in the exponent and + shift the fraction to the right to make up for it. */ + + int shift = NORMAL_EXPMIN - src->normal_exp; + + sign = src->sign; + exp = 0; + + if (shift > (FRAC_NBITS - NGARDS)) + { + /* No point shifting, since it's more that 64 out. */ + fraction = 0; + } + else + { + /* Shift by the value */ + fraction = src->fraction >> F_D_BITOFF; + fraction >>= shift; + fraction >>= NGARDS; + } + } + else if (src->normal_exp > EXPBIAS) + { + /* Infinity */ + sign = src->sign; + exp = EXPMAX; + fraction = 0; + } + else + { + sign = src->sign; + exp = (src->normal_exp + EXPBIAS); + fraction = src->fraction >> F_D_BITOFF; + /* IF the gard bits are the all zero, but the first, then we're + half way between two numbers, choose the one which makes the + lsb of the answer 0. */ + if ((fraction & GARDMASK) == GARDMSB) + { + if (fraction & (1 << NGARDS)) + fraction += GARDROUND + 1; + } + else + { + /* Add a one to the guards to round up */ + fraction += GARDROUND; + } + if (fraction >= IMPLICIT_2) + { + fraction >>= 1; + exp += 1; + } + fraction >>= NGARDS; + } + } + + return ((sign ? SIGNBIT : 0) + | (exp << FRACBITS) + | LSMASKED64 (fraction, FRACBITS)); +} + + +STATIC_INLINE_SIM_FPU (void) +unpack_fpu (sim_ufpu *dst, unsigned64 s, int is_double) +{ + unsigned64 fraction = LSMASKED64 (s, FRACBITS); + unsigned exp = LSMASKED64 (s >> FRACBITS, EXPBITS); + + dst->sign = (s & SIGNBIT) != 0; + + if (exp == 0) + { + /* Hmm. Looks like 0 */ + if (fraction == 0) + { + /* tastes like zero */ + dst->class = sim_fpu_class_zero; + } + else + { + /* Zero exponent with non zero fraction - it's denormalized, + so there isn't a leading implicit one - we'll shift it so + it gets one. */ + dst->normal_exp = exp - EXPBIAS + 1; + fraction <<= NGARDS; + + dst->class = sim_fpu_class_number; + while (fraction < IMPLICIT_1) + { + fraction <<= 1; + dst->normal_exp--; + } + dst->fraction = fraction << F_D_BITOFF; + } + } + else if (exp == EXPMAX) + { + /* Huge exponent*/ + if (fraction == 0) + { + /* Attached to a zero fraction - means infinity */ + dst->class = sim_fpu_class_infinity; + } + else + { + /* Non zero fraction, means nan */ + if (dst->sign) + { + dst->class = sim_fpu_class_snan; + } + else + { + dst->class = sim_fpu_class_qnan; + } + /* Keep the fraction part as the nan number */ + dst->fraction = fraction << F_D_BITOFF; + } + } + else + { + /* Nothing strange about this number */ + dst->normal_exp = exp - EXPBIAS; + dst->class = sim_fpu_class_number; + dst->fraction = ((fraction << NGARDS) | IMPLICIT_1) << F_D_BITOFF; + } + + /* sanity checks */ + dst->val.i = -1; + dst->val.i = pack_fpu (dst, 1); + { + if (is_double) + { + ASSERT (dst->val.i == s); + } + else + { + unsigned32 val = pack_fpu (dst, 0); + unsigned32 org = s; + ASSERT (val == org); + } + } +} + +STATIC_INLINE_SIM_FPU (sim_fpu) +ufpu2fpu (const sim_ufpu *d) +{ + sim_fpu ans; + ans.val.i = pack_fpu (d, 1); + return ans; +} + + +STATIC_INLINE_SIM_FPU (sim_ufpu) +fpu2ufpu (const sim_fpu *d) +{ + sim_ufpu ans; + unpack_fpu (&ans, d->val.i, 1); + return ans; +} + +STATIC_INLINE_SIM_FPU (int) +is_ufpu_number (const sim_ufpu *d) +{ + switch (d->class) + { + case sim_fpu_class_zero: + case sim_fpu_class_number: + return 1; + default: + return 0; + } +} + + +STATIC_INLINE_SIM_FPU (int) +is_ufpu_nan (const sim_ufpu *d) +{ + switch (d->class) + { + case sim_fpu_class_qnan: + case sim_fpu_class_snan: + return 1; + default: + return 0; + } +} + + +STATIC_INLINE_SIM_FPU (int) +is_ufpu_zero (const sim_ufpu *d) +{ + switch (d->class) + { + case sim_fpu_class_zero: + return 1; + default: + return 0; + } +} + + +STATIC_INLINE_SIM_FPU (int) +is_ufpu_inf (const sim_ufpu *d) +{ + switch (d->class) + { + case sim_fpu_class_infinity: + return 1; + default: + return 0; + } +} + + +STATIC_INLINE_SIM_FPU (sim_fpu) +fpu_nan (void) +{ + sim_ufpu tmp; + tmp.class = sim_fpu_class_snan; + tmp.fraction = 0; + tmp.sign = 1; + tmp.normal_exp = 0; + return ufpu2fpu (&tmp); +} + + +STATIC_INLINE_SIM_FPU (signed64) +fpu2i (sim_fpu s, int is_double) +{ + sim_ufpu a = fpu2ufpu (&s); + unsigned64 tmp; + if (is_ufpu_zero (&a)) + return 0; + if (is_ufpu_nan (&a)) + return 0; + /* get reasonable MAX_SI_INT... */ + if (is_ufpu_inf (&a)) + return a.sign ? MAX_SI_INT : (-MAX_SI_INT)-1; + /* it is a number, but a small one */ + if (a.normal_exp < 0) + return 0; + if (a.normal_exp > (FRAC_NBITS - 2)) + return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT; + if (a.normal_exp > (FRACBITS + NGARDS + F_D_BITOFF)) + tmp = (a.fraction << (a.normal_exp - (FRACBITS + NGARDS))); + else + tmp = (a.fraction >> ((FRACBITS + NGARDS + F_D_BITOFF) - a.normal_exp)); + return a.sign ? (-tmp) : (tmp); +} + +STATIC_INLINE_SIM_FPU (unsigned64) +fpu2u (sim_fpu s, int is_double) +{ + sim_ufpu a = fpu2ufpu (&s); + unsigned64 tmp; + if (is_ufpu_zero (&a)) + return 0; + if (is_ufpu_nan (&a)) + return 0; + /* get reasonable MAX_USI_INT... */ + if (is_ufpu_inf (&a)) + return a.sign ? MAX_USI_INT : 0; + /* it is a negative number */ + if (a.sign) + return 0; + /* it is a number, but a small one */ + if (a.normal_exp < 0) + return 0; + if (a.normal_exp > (FRAC_NBITS - 1)) + return MAX_USI_INT; + if (a.normal_exp > (FRACBITS + NGARDS + F_D_BITOFF)) + tmp = (a.fraction << (a.normal_exp - (FRACBITS + NGARDS + F_D_BITOFF))); + else + tmp = (a.fraction >> ((FRACBITS + NGARDS + F_D_BITOFF) - a.normal_exp)); + return tmp; +} + + /* register <-> sim_fpu */ INLINE_SIM_FPU (sim_fpu) sim_fpu_32to (unsigned32 s) { - sim_fpu ans; - ans.val = *(float*) &s; - return ans; + sim_ufpu tmp; + unpack_fpu (&tmp, s, 0); + return ufpu2fpu (&tmp); } @@ -44,7 +433,7 @@ INLINE_SIM_FPU (sim_fpu) sim_fpu_64to (unsigned64 s) { sim_fpu ans; - ans.val = *(double*) &s; + ans.val.i = s; return ans; } @@ -52,15 +441,16 @@ sim_fpu_64to (unsigned64 s) INLINE_SIM_FPU (unsigned32) sim_fpu_to32 (sim_fpu l) { - float s = l.val; - return *(unsigned32*) &s; + /* convert to single safely */ + sim_ufpu tmp = fpu2ufpu (&l); + return pack_fpu (&tmp, 0); } INLINE_SIM_FPU (unsigned64) sim_fpu_to64 (sim_fpu s) { - return *(unsigned64*) &s.val; + return s.val.i; } @@ -71,7 +461,7 @@ sim_fpu_add (sim_fpu l, sim_fpu r) { sim_fpu ans; - ans.val = l.val + r.val; + ans.val.d = l.val.d + r.val.d; return ans; } @@ -81,7 +471,7 @@ sim_fpu_sub (sim_fpu l, sim_fpu r) { sim_fpu ans; - ans.val = l.val - r.val; + ans.val.d = l.val.d - r.val.d; return ans; } @@ -91,7 +481,7 @@ sim_fpu_mul (sim_fpu l, sim_fpu r) { sim_fpu ans; - ans.val = l.val * r.val; + ans.val.d = l.val.d * r.val.d; return ans; } @@ -100,9 +490,90 @@ INLINE_SIM_FPU (sim_fpu) sim_fpu_div (sim_fpu l, sim_fpu r) { - sim_fpu ans; - ans.val = l.val / r.val; - return ans; + const int is_double = 1; + sim_ufpu a = fpu2ufpu (&l); + sim_ufpu b = fpu2ufpu (&r); + unsigned64 bit; + unsigned64 numerator; + unsigned64 denominator; + unsigned64 quotient; + + if (is_ufpu_nan (&a)) + { + return ufpu2fpu (&a); + } + if (is_ufpu_nan (&b)) + { + return ufpu2fpu (&b); + } + if (is_ufpu_inf (&a) || is_ufpu_zero (&a)) + { + if (a.class == b.class) + return fpu_nan (); + return l; + } + a.sign = a.sign ^ b.sign; + + if (is_ufpu_inf (&b)) + { + a.fraction = 0; + a.normal_exp = 0; + return ufpu2fpu (&a); + } + if (is_ufpu_zero (&b)) + { + a.class = sim_fpu_class_infinity; + return ufpu2fpu (&a); + } + + /* Calculate the mantissa by multiplying both 64bit numbers to get a + 128 bit number */ + { + /* quotient = + ( numerator / denominator) * 2^(numerator exponent - denominator exponent) + */ + + a.normal_exp = a.normal_exp - b.normal_exp; + numerator = a.fraction; + denominator = b.fraction; + + if (numerator < denominator) + { + /* Fraction will be less than 1.0 */ + numerator *= 2; + a.normal_exp--; + } + bit = IMPLICIT_1; + quotient = 0; + /* ??? Does divide one bit at a time. Optimize. */ + while (bit) + { + if (numerator >= denominator) + { + quotient |= bit; + numerator -= denominator; + } + bit >>= 1; + numerator *= 2; + } + + if ((quotient & GARDMASK) == GARDMSB) + { + if (quotient & (1 << NGARDS)) + { + /* half way, so round to even */ + quotient += GARDROUND + 1; + } + else if (numerator) + { + /* but we really weren't half way, more bits exist */ + quotient += GARDROUND + 1; + } + } + + a.fraction = quotient; + return ufpu2fpu (&a); + } } @@ -110,7 +581,7 @@ INLINE_SIM_FPU (sim_fpu) sim_fpu_inv (sim_fpu r) { sim_fpu ans; - ans.val = 1 / r.val; + ans.val.d = 1 / r.val.d; return ans; } @@ -119,7 +590,7 @@ INLINE_SIM_FPU (sim_fpu) sim_fpu_sqrt (sim_fpu r) { sim_fpu ans; - ans.val = sqrt (r.val); + ans.val.d = sqrt (r.val.d); return ans; } @@ -130,75 +601,98 @@ INLINE_SIM_FPU (sim_fpu) sim_fpu_i32to (signed32 s) { sim_fpu ans; - ans.val = s; + ans.val.d = s; return ans; } +INLINE_SIM_FPU (signed32) +sim_fpu_to32i (sim_fpu s) +{ + return fpu2i (s, 0); +} + + INLINE_SIM_FPU (sim_fpu) sim_fpu_u32to (unsigned32 s) { sim_fpu ans; - ans.val = s; + ans.val.d = s; return ans; } +INLINE_SIM_FPU (unsigned32) +sim_fpu_to32u (sim_fpu s) +{ + return fpu2u (s, 0); +} + + INLINE_SIM_FPU (sim_fpu) sim_fpu_i64to (signed64 s) { sim_fpu ans; - ans.val = s; + ans.val.d = s; return ans; } +INLINE_SIM_FPU (signed64) +sim_fpu_to64i (sim_fpu s) +{ + return fpu2i (s, 1); +} + + INLINE_SIM_FPU (sim_fpu) sim_fpu_u64to (unsigned64 s) { sim_fpu ans; - ans.val = s; + ans.val.d = s; return ans; } +INLINE_SIM_FPU (unsigned64) +sim_fpu_to64u (sim_fpu s) +{ + return fpu2u (s, 1); +} + + /* sim_fpu -> host format */ INLINE_SIM_FPU (float) sim_fpu_2f (sim_fpu f) { - return f.val; + return f.val.d; } INLINE_SIM_FPU (double) sim_fpu_2d (sim_fpu s) { - return s.val; + return s.val.d; } -#if 0 INLINE_SIM_FPU (sim_fpu) sim_fpu_f2 (float f) { sim_fpu ans; - ans.val = f; + ans.val.d = f; return ans; } -#endif -#if 0 INLINE_SIM_FPU (sim_fpu) sim_fpu_d2 (double d) { sim_fpu ans; - ans.val = d; + ans.val.d = d; return ans; } -#endif - /* General */ @@ -206,7 +700,8 @@ sim_fpu_d2 (double d) INLINE_SIM_FPU (int) sim_fpu_is_nan (sim_fpu d) { - return 0; /* FIXME - detect NaN */ + sim_ufpu tmp = fpu2ufpu (&d); + return is_ufpu_nan (&tmp); } @@ -216,42 +711,72 @@ INLINE_SIM_FPU (int) sim_fpu_is_lt (sim_fpu l, sim_fpu r) { - return (l.val < r.val); + sim_ufpu tl = fpu2ufpu (&l); + sim_ufpu tr = fpu2ufpu (&r); + if (is_ufpu_number (&tl) && is_ufpu_number (&tr)) + return (l.val.d < r.val.d); + else + return 0; } INLINE_SIM_FPU (int) sim_fpu_is_le (sim_fpu l, sim_fpu r) { - return (l.val <= r.val); + sim_ufpu tl = fpu2ufpu (&l); + sim_ufpu tr = fpu2ufpu (&r); + if (is_ufpu_number (&tl) && is_ufpu_number (&tr)) + return (l.val.d <= r.val.d); + else + return 0; } INLINE_SIM_FPU (int) sim_fpu_is_eq (sim_fpu l, sim_fpu r) { - return (l.val == r.val); + sim_ufpu tl = fpu2ufpu (&l); + sim_ufpu tr = fpu2ufpu (&r); + if (is_ufpu_number (&tl) && is_ufpu_number (&tr)) + return (l.val.d == r.val.d); + else + return 0; } INLINE_SIM_FPU (int) sim_fpu_is_ne (sim_fpu l, sim_fpu r) { - return (l.val != r.val); + sim_ufpu tl = fpu2ufpu (&l); + sim_ufpu tr = fpu2ufpu (&r); + if (is_ufpu_number (&tl) && is_ufpu_number (&tr)) + return (l.val.d != r.val.d); + else + return 0; } INLINE_SIM_FPU (int) sim_fpu_is_ge (sim_fpu l, sim_fpu r) { - return (l.val >= r.val); + sim_ufpu tl = fpu2ufpu (&l); + sim_ufpu tr = fpu2ufpu (&r); + if (is_ufpu_number (&tl) && is_ufpu_number (&tr)) + return (l.val.d >= r.val.d); + else + return 0; } INLINE_SIM_FPU (int) sim_fpu_is_gt (sim_fpu l, sim_fpu r) { - return (l.val > r.val); + sim_ufpu tl = fpu2ufpu (&l); + sim_ufpu tr = fpu2ufpu (&r); + if (is_ufpu_number (&tl) && is_ufpu_number (&tr)) + return (l.val.d > r.val.d); + else + return 0; } #endif diff --git a/sim/tic80/ChangeLog b/sim/tic80/ChangeLog index 89ebd48539b..8f9ffd0df69 100644 --- a/sim/tic80/ChangeLog +++ b/sim/tic80/ChangeLog @@ -1,5 +1,13 @@ Thu May 15 11:45:37 1997 Andrew Cagney + * insns (do_shift): When rot==0 and zero/sign merge treat it as + 32. + (set_fp_reg): For interger conversion, use sim-fpu fpu2i + functions. + (do_fmpy): Perform iii and uuu using integer arithmetic. + + * Makefile.in (ENGINE_H): Assume everything depends on the fpu. + * insns (get_fp_reg): Use sim_fpu_u32to to perform unsigned conversion. (do_fcmp): Update to use new fp compare functions. Make reg nr arg diff --git a/sim/tic80/Makefile.in b/sim/tic80/Makefile.in index eb3f943333d..0e9dc25c6aa 100644 --- a/sim/tic80/Makefile.in +++ b/sim/tic80/Makefile.in @@ -140,6 +140,7 @@ ENGINE_H = \ $(srcdir)/../common/sim-alu.h \ $(srcdir)/../common/sim-core.h \ $(srcdir)/../common/sim-events.h \ + $(srcdir)/../common/sim-fpu.h \ idecode.o: $(ENGINE_H) semantics.o: $(ENGINE_H) diff --git a/sim/tic80/insns b/sim/tic80/insns index ff80bffc3fe..7a15fc7f6d7 100644 --- a/sim/tic80/insns +++ b/sim/tic80/insns @@ -434,13 +434,15 @@ void::function::set_fp_reg:int Dest, sim_fpu val, int PD break; } case 2: /* signed */ - /* FIXME - rounding */ - GPR (Dest) = sim_fpu_2d (val); - break; + { + GPR (Dest) = sim_fpu_to32i (val); + break; + } case 3: /* unsigned */ - /* FIXME - rounding */ - GPR (Dest) = sim_fpu_2d (val); - break; + { + GPR (Dest) = sim_fpu_to32u (val); + break; + } default: engine_error (SD, CPU, cia, "Unsupported FP precision"); } @@ -516,9 +518,27 @@ void::function::do_fdiv:int Dest, int PD, sim_fpu s1, sim_fpu s2 // fmpy.{s|d|i|u}{s|d|i|u}{s|d|i|u} void::function::do_fmpy:int Dest, int PD, sim_fpu s1, sim_fpu s2 - sim_fpu ans = sim_fpu_mul (s1, s2); - TRACE_FPU3 (MY_INDEX, ans, s1, s2); - set_fp_reg (_SD, Dest, ans, PD); + switch (PD) + { + case 2: /* signed */ + { + GPR (Dest) = sim_fpu_to64i (s1) * sim_fpu_to64i (s2); + TRACE_FPU2I (MY_INDEX, GPR (Dest), s1, s2); + break; + } + case 3: /* unsigned */ + { + GPR (Dest) = sim_fpu_to64u (s1) * sim_fpu_to64u (s2); + TRACE_FPU2I (MY_INDEX, GPR (Dest), s1, s2); + break; + } + default: + { + sim_fpu ans = sim_fpu_mul (s1, s2); + set_fp_reg (_SD, Dest, ans, PD); + TRACE_FPU3 (MY_INDEX, ans, s1, s2); + } + } 31.Dest,26.Source2,21.0b111110010,12.0,11./,10.PD,8.P2,6.P1,4.Source1::f::fmpy r do_fmpy (_SD, Dest, PD, get_fp_reg (_SD, Source1, rSource1, P1), @@ -832,8 +852,14 @@ void::function::do_shift:int Dest, int Source, int Merge, int i, int n, int EndM case 0: case 1: case 2: shiftmask = ~ (unsigned32)0; /* disabled */ break; - case 3: case 4: case 5: - shiftmask = ((1 << nRotate) - 1); /* enabled */ + case 3: case 5: /* enabled - 0 -> 32 */ + if (nRotate == 0) + shiftmask = ~ (unsigned32)0; + else + shiftmask = ((1 << nRotate) - 1); /* enabled - 0 -> 0 */ + break; + case 4: + shiftmask = ((1 << nRotate) - 1); /* enabled - 0 -> 0 */ break; case 6: case 7: shiftmask = ~((1 << nRotate) - 1); /* inverted */