sim: clean up C11 header includes
[binutils-gdb.git] / sim / common / sim-fpu.c
1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
4
5 /* Copyright 1994-2021 Free Software Foundation, Inc.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* As a special exception, if you link this library with other files,
21 some of which are compiled with GCC, to produce an executable,
22 this library does not by itself cause the resulting executable
23 to be covered by the GNU General Public License.
24 This exception does not however invalidate any other reasons why
25 the executable file might be covered by the GNU General Public License. */
26
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28 mechanism for setting the rounding mode, or for generating or handling
29 exceptions.
30
31 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32 Wilson, all of Cygnus Support. */
33
34
35 #ifndef SIM_FPU_C
36 #define SIM_FPU_C
37
38 #include "sim-basics.h"
39 #include "sim-fpu.h"
40
41 #include "sim-io.h"
42 #include "sim-assert.h"
43
44 #include <stdlib.h>
45
46 /* Debugging support.
47 If digits is -1, then print all digits. */
48
49 static void
50 print_bits (unsigned64 x,
51 int msbit,
52 int digits,
53 sim_fpu_print_func print,
54 void *arg)
55 {
56 unsigned64 bit = LSBIT64 (msbit);
57 int i = 4;
58 while (bit && digits)
59 {
60 if (i == 0)
61 print (arg, ",");
62
63 if ((x & bit))
64 print (arg, "1");
65 else
66 print (arg, "0");
67 bit >>= 1;
68
69 if (digits > 0)
70 digits--;
71 i = (i + 1) % 4;
72 }
73 }
74
75
76
77 /* Quick and dirty conversion between a host double and host 64bit int. */
78
79 typedef union
80 {
81 double d;
82 unsigned64 i;
83 } sim_fpu_map;
84
85
86 /* A packed IEEE floating point number.
87
88 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
89 32 and 64 bit numbers. This number is interpreted as:
90
91 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
92 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
93
94 Denormalized (0 == BIASEDEXP && FRAC != 0):
95 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
96
97 Zero (0 == BIASEDEXP && FRAC == 0):
98 (sign ? "-" : "+") 0.0
99
100 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
101 (sign ? "-" : "+") "infinity"
102
103 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
104 SNaN.FRAC
105
106 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
107 QNaN.FRAC
108
109 */
110
111 #define NR_EXPBITS (is_double ? 11 : 8)
112 #define NR_FRACBITS (is_double ? 52 : 23)
113 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
114
115 #define EXPMAX32 (255)
116 #define EXMPAX64 (2047)
117 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
118
119 #define EXPBIAS32 (127)
120 #define EXPBIAS64 (1023)
121 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
122
123 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
124
125
126
127 /* An unpacked floating point number.
128
129 When unpacked, the fraction of both a 32 and 64 bit floating point
130 number is stored using the same format:
131
132 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
133 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
134
135 #define NR_PAD32 (30)
136 #define NR_PAD64 (0)
137 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
138 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
139
140 #define NR_GUARDS32 (7 + NR_PAD32)
141 #define NR_GUARDS64 (8 + NR_PAD64)
142 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
143 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
144
145 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
146 #define GUARDLSB LSBIT64 (NR_PAD)
147 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
148
149 #define NR_FRAC_GUARD (60)
150 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
151 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
152 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
153 #define NR_SPARE 2
154
155 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
156
157 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
158
159 #define NORMAL_EXPMAX32 (EXPBIAS32)
160 #define NORMAL_EXPMAX64 (EXPBIAS64)
161 #define NORMAL_EXPMAX (EXPBIAS)
162
163
164 /* Integer constants */
165
166 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
167 #define MAX_UINT32 LSMASK64 (31, 0)
168 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
169
170 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
171 #define MAX_UINT64 LSMASK64 (63, 0)
172 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
173
174 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
175 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
176 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
177 #define NR_INTBITS (is_64bit ? 64 : 32)
178
179 /* Squeeze an unpacked sim_fpu struct into a 32/64 bit integer. */
180 STATIC_INLINE_SIM_FPU (unsigned64)
181 pack_fpu (const sim_fpu *src,
182 int is_double)
183 {
184 int sign;
185 unsigned64 exp;
186 unsigned64 fraction;
187 unsigned64 packed;
188
189 switch (src->class)
190 {
191 /* Create a NaN. */
192 case sim_fpu_class_qnan:
193 sign = src->sign;
194 exp = EXPMAX;
195 /* Force fraction to correct class. */
196 fraction = src->fraction;
197 fraction >>= NR_GUARDS;
198 #ifdef SIM_QUIET_NAN_NEGATED
199 fraction |= QUIET_NAN - 1;
200 #else
201 fraction |= QUIET_NAN;
202 #endif
203 break;
204 case sim_fpu_class_snan:
205 sign = src->sign;
206 exp = EXPMAX;
207 /* Force fraction to correct class. */
208 fraction = src->fraction;
209 fraction >>= NR_GUARDS;
210 #ifdef SIM_QUIET_NAN_NEGATED
211 fraction |= QUIET_NAN;
212 #else
213 fraction &= ~QUIET_NAN;
214 #endif
215 break;
216 case sim_fpu_class_infinity:
217 sign = src->sign;
218 exp = EXPMAX;
219 fraction = 0;
220 break;
221 case sim_fpu_class_zero:
222 sign = src->sign;
223 exp = 0;
224 fraction = 0;
225 break;
226 case sim_fpu_class_number:
227 case sim_fpu_class_denorm:
228 ASSERT (src->fraction >= IMPLICIT_1);
229 ASSERT (src->fraction < IMPLICIT_2);
230 if (src->normal_exp < NORMAL_EXPMIN)
231 {
232 /* This number's exponent is too low to fit into the bits
233 available in the number We'll denormalize the number by
234 storing zero in the exponent and shift the fraction to
235 the right to make up for it. */
236 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
237 if (nr_shift > NR_FRACBITS)
238 {
239 /* Underflow, just make the number zero. */
240 sign = src->sign;
241 exp = 0;
242 fraction = 0;
243 }
244 else
245 {
246 sign = src->sign;
247 exp = 0;
248 /* Shift by the value. */
249 fraction = src->fraction;
250 fraction >>= NR_GUARDS;
251 fraction >>= nr_shift;
252 }
253 }
254 else if (src->normal_exp > NORMAL_EXPMAX)
255 {
256 /* Infinity */
257 sign = src->sign;
258 exp = EXPMAX;
259 fraction = 0;
260 }
261 else
262 {
263 exp = (src->normal_exp + EXPBIAS);
264 sign = src->sign;
265 fraction = src->fraction;
266 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
267 or some such. */
268 /* Round to nearest: If the guard bits are the all zero, but
269 the first, then we're half way between two numbers,
270 choose the one which makes the lsb of the answer 0. */
271 if ((fraction & GUARDMASK) == GUARDMSB)
272 {
273 if ((fraction & (GUARDMSB << 1)))
274 fraction += (GUARDMSB << 1);
275 }
276 else
277 {
278 /* Add a one to the guards to force round to nearest. */
279 fraction += GUARDROUND;
280 }
281 if ((fraction & IMPLICIT_2)) /* Rounding resulted in carry. */
282 {
283 exp += 1;
284 fraction >>= 1;
285 }
286 fraction >>= NR_GUARDS;
287 /* When exp == EXPMAX (overflow from carry) fraction must
288 have been made zero. */
289 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
290 }
291 break;
292 default:
293 abort ();
294 }
295
296 packed = ((sign ? SIGNBIT : 0)
297 | (exp << NR_FRACBITS)
298 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
299
300 /* Trace operation. */
301 #if 0
302 if (is_double)
303 {
304 }
305 else
306 {
307 printf ("pack_fpu: ");
308 printf ("-> %c%0lX.%06lX\n",
309 LSMASKED32 (packed, 31, 31) ? '8' : '0',
310 (long) LSEXTRACTED32 (packed, 30, 23),
311 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
312 }
313 #endif
314
315 return packed;
316 }
317
318
319 /* Unpack a 32/64 bit integer into a sim_fpu structure. */
320 STATIC_INLINE_SIM_FPU (void)
321 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
322 {
323 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
324 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
325 int sign = (packed & SIGNBIT) != 0;
326
327 if (exp == 0)
328 {
329 /* Hmm. Looks like 0 */
330 if (fraction == 0)
331 {
332 /* Tastes like zero. */
333 dst->class = sim_fpu_class_zero;
334 dst->sign = sign;
335 dst->normal_exp = 0;
336 }
337 else
338 {
339 /* Zero exponent with non zero fraction - it's denormalized,
340 so there isn't a leading implicit one - we'll shift it so
341 it gets one. */
342 dst->normal_exp = exp - EXPBIAS + 1;
343 dst->class = sim_fpu_class_denorm;
344 dst->sign = sign;
345 fraction <<= NR_GUARDS;
346 while (fraction < IMPLICIT_1)
347 {
348 fraction <<= 1;
349 dst->normal_exp--;
350 }
351 dst->fraction = fraction;
352 }
353 }
354 else if (exp == EXPMAX)
355 {
356 /* Huge exponent*/
357 if (fraction == 0)
358 {
359 /* Attached to a zero fraction - means infinity. */
360 dst->class = sim_fpu_class_infinity;
361 dst->sign = sign;
362 /* dst->normal_exp = EXPBIAS; */
363 /* dst->fraction = 0; */
364 }
365 else
366 {
367 int qnan;
368
369 /* Non zero fraction, means NaN. */
370 dst->sign = sign;
371 dst->fraction = (fraction << NR_GUARDS);
372 #ifdef SIM_QUIET_NAN_NEGATED
373 qnan = (fraction & QUIET_NAN) == 0;
374 #else
375 qnan = fraction >= QUIET_NAN;
376 #endif
377 if (qnan)
378 dst->class = sim_fpu_class_qnan;
379 else
380 dst->class = sim_fpu_class_snan;
381 }
382 }
383 else
384 {
385 /* Nothing strange about this number. */
386 dst->class = sim_fpu_class_number;
387 dst->sign = sign;
388 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
389 dst->normal_exp = exp - EXPBIAS;
390 }
391
392 /* Trace operation. */
393 #if 0
394 if (is_double)
395 {
396 }
397 else
398 {
399 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
400 LSMASKED32 (packed, 31, 31) ? '8' : '0',
401 (long) LSEXTRACTED32 (packed, 30, 23),
402 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
403 }
404 #endif
405
406 /* sanity checks */
407 {
408 sim_fpu_map val;
409 val.i = pack_fpu (dst, 1);
410 if (is_double)
411 {
412 ASSERT (val.i == packed);
413 }
414 else
415 {
416 unsigned32 val = pack_fpu (dst, 0);
417 unsigned32 org = packed;
418 ASSERT (val == org);
419 }
420 }
421 }
422
423
424 /* Convert a floating point into an integer. */
425 STATIC_INLINE_SIM_FPU (int)
426 fpu2i (signed64 *i,
427 const sim_fpu *s,
428 int is_64bit,
429 sim_fpu_round round)
430 {
431 unsigned64 tmp;
432 int shift;
433 int status = 0;
434 if (sim_fpu_is_zero (s))
435 {
436 *i = 0;
437 return 0;
438 }
439 if (sim_fpu_is_snan (s))
440 {
441 *i = MIN_INT; /* FIXME */
442 return sim_fpu_status_invalid_cvi;
443 }
444 if (sim_fpu_is_qnan (s))
445 {
446 *i = MIN_INT; /* FIXME */
447 return sim_fpu_status_invalid_cvi;
448 }
449 /* Map infinity onto MAX_INT... */
450 if (sim_fpu_is_infinity (s))
451 {
452 *i = s->sign ? MIN_INT : MAX_INT;
453 return sim_fpu_status_invalid_cvi;
454 }
455 /* It is a number, but a small one. */
456 if (s->normal_exp < 0)
457 {
458 *i = 0;
459 return sim_fpu_status_inexact;
460 }
461 /* Is the floating point MIN_INT or just close? */
462 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
463 {
464 *i = MIN_INT;
465 ASSERT (s->fraction >= IMPLICIT_1);
466 if (s->fraction == IMPLICIT_1)
467 return 0; /* exact */
468 if (is_64bit) /* can't round */
469 return sim_fpu_status_invalid_cvi; /* must be overflow */
470 /* For a 32bit with MAX_INT, rounding is possible. */
471 switch (round)
472 {
473 case sim_fpu_round_default:
474 abort ();
475 case sim_fpu_round_zero:
476 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
477 return sim_fpu_status_invalid_cvi;
478 else
479 return sim_fpu_status_inexact;
480 break;
481 case sim_fpu_round_near:
482 {
483 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
484 return sim_fpu_status_invalid_cvi;
485 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
486 return sim_fpu_status_invalid_cvi;
487 else
488 return sim_fpu_status_inexact;
489 }
490 case sim_fpu_round_up:
491 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
492 return sim_fpu_status_inexact;
493 else
494 return sim_fpu_status_invalid_cvi;
495 case sim_fpu_round_down:
496 return sim_fpu_status_invalid_cvi;
497 }
498 }
499 /* Would right shifting result in the FRAC being shifted into
500 (through) the integer's sign bit? */
501 if (s->normal_exp > (NR_INTBITS - 2))
502 {
503 *i = s->sign ? MIN_INT : MAX_INT;
504 return sim_fpu_status_invalid_cvi;
505 }
506 /* Normal number, shift it into place. */
507 tmp = s->fraction;
508 shift = (s->normal_exp - (NR_FRAC_GUARD));
509 if (shift > 0)
510 {
511 tmp <<= shift;
512 }
513 else
514 {
515 shift = -shift;
516 if (tmp & ((SIGNED64 (1) << shift) - 1))
517 status |= sim_fpu_status_inexact;
518 tmp >>= shift;
519 }
520 *i = s->sign ? (-tmp) : (tmp);
521 return status;
522 }
523
524 /* Convert an integer into a floating point. */
525 STATIC_INLINE_SIM_FPU (int)
526 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
527 {
528 int status = 0;
529 if (i == 0)
530 {
531 f->class = sim_fpu_class_zero;
532 f->sign = 0;
533 f->normal_exp = 0;
534 }
535 else
536 {
537 f->class = sim_fpu_class_number;
538 f->sign = (i < 0);
539 f->normal_exp = NR_FRAC_GUARD;
540
541 if (f->sign)
542 {
543 /* Special case for minint, since there is no corresponding
544 +ve integer representation for it. */
545 if (i == MIN_INT)
546 {
547 f->fraction = IMPLICIT_1;
548 f->normal_exp = NR_INTBITS - 1;
549 }
550 else
551 f->fraction = (-i);
552 }
553 else
554 f->fraction = i;
555
556 if (f->fraction >= IMPLICIT_2)
557 {
558 do
559 {
560 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
561 f->normal_exp += 1;
562 }
563 while (f->fraction >= IMPLICIT_2);
564 }
565 else if (f->fraction < IMPLICIT_1)
566 {
567 do
568 {
569 f->fraction <<= 1;
570 f->normal_exp -= 1;
571 }
572 while (f->fraction < IMPLICIT_1);
573 }
574 }
575
576 /* trace operation */
577 #if 0
578 {
579 printf ("i2fpu: 0x%08lX ->\n", (long) i);
580 }
581 #endif
582
583 /* sanity check */
584 {
585 signed64 val;
586 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
587 if (i >= MIN_INT32 && i <= MAX_INT32)
588 {
589 ASSERT (val == i);
590 }
591 }
592
593 return status;
594 }
595
596
597 /* Convert a floating point into an integer. */
598 STATIC_INLINE_SIM_FPU (int)
599 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
600 {
601 const int is_double = 1;
602 unsigned64 tmp;
603 int shift;
604 if (sim_fpu_is_zero (s))
605 {
606 *u = 0;
607 return 0;
608 }
609 if (sim_fpu_is_nan (s))
610 {
611 *u = 0;
612 return 0;
613 }
614 /* It is a negative number. */
615 if (s->sign)
616 {
617 *u = 0;
618 return 0;
619 }
620 /* Get reasonable MAX_USI_INT... */
621 if (sim_fpu_is_infinity (s))
622 {
623 *u = MAX_UINT;
624 return 0;
625 }
626 /* It is a number, but a small one. */
627 if (s->normal_exp < 0)
628 {
629 *u = 0;
630 return 0;
631 }
632 /* overflow */
633 if (s->normal_exp > (NR_INTBITS - 1))
634 {
635 *u = MAX_UINT;
636 return 0;
637 }
638 /* normal number */
639 tmp = (s->fraction & ~PADMASK);
640 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
641 if (shift > 0)
642 {
643 tmp <<= shift;
644 }
645 else
646 {
647 shift = -shift;
648 tmp >>= shift;
649 }
650 *u = tmp;
651 return 0;
652 }
653
654 /* Convert an unsigned integer into a floating point. */
655 STATIC_INLINE_SIM_FPU (int)
656 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
657 {
658 if (u == 0)
659 {
660 f->class = sim_fpu_class_zero;
661 f->sign = 0;
662 f->normal_exp = 0;
663 }
664 else
665 {
666 f->class = sim_fpu_class_number;
667 f->sign = 0;
668 f->normal_exp = NR_FRAC_GUARD;
669 f->fraction = u;
670
671 while (f->fraction < IMPLICIT_1)
672 {
673 f->fraction <<= 1;
674 f->normal_exp -= 1;
675 }
676 }
677 return 0;
678 }
679
680
681 /* register <-> sim_fpu */
682
683 INLINE_SIM_FPU (void)
684 sim_fpu_32to (sim_fpu *f, unsigned32 s)
685 {
686 unpack_fpu (f, s, 0);
687 }
688
689
690 INLINE_SIM_FPU (void)
691 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
692 {
693 unsigned64 s = h;
694 s = (s << 32) | l;
695 unpack_fpu (f, s, 1);
696 }
697
698
699 INLINE_SIM_FPU (void)
700 sim_fpu_64to (sim_fpu *f, unsigned64 s)
701 {
702 unpack_fpu (f, s, 1);
703 }
704
705
706 INLINE_SIM_FPU (void)
707 sim_fpu_to32 (unsigned32 *s,
708 const sim_fpu *f)
709 {
710 *s = pack_fpu (f, 0);
711 }
712
713
714 INLINE_SIM_FPU (void)
715 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
716 const sim_fpu *f)
717 {
718 unsigned64 s = pack_fpu (f, 1);
719 *l = s;
720 *h = (s >> 32);
721 }
722
723
724 INLINE_SIM_FPU (void)
725 sim_fpu_to64 (unsigned64 *u,
726 const sim_fpu *f)
727 {
728 *u = pack_fpu (f, 1);
729 }
730
731
732 INLINE_SIM_FPU (void)
733 sim_fpu_fractionto (sim_fpu *f,
734 int sign,
735 int normal_exp,
736 unsigned64 fraction,
737 int precision)
738 {
739 int shift = (NR_FRAC_GUARD - precision);
740 f->class = sim_fpu_class_number;
741 f->sign = sign;
742 f->normal_exp = normal_exp;
743 /* Shift the fraction to where sim-fpu expects it. */
744 if (shift >= 0)
745 f->fraction = (fraction << shift);
746 else
747 f->fraction = (fraction >> -shift);
748 f->fraction |= IMPLICIT_1;
749 }
750
751
752 INLINE_SIM_FPU (unsigned64)
753 sim_fpu_tofraction (const sim_fpu *d,
754 int precision)
755 {
756 /* We have NR_FRAC_GUARD bits, we want only PRECISION bits. */
757 int shift = (NR_FRAC_GUARD - precision);
758 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
759 if (shift >= 0)
760 return fraction >> shift;
761 else
762 return fraction << -shift;
763 }
764
765
766 /* Rounding */
767
768 STATIC_INLINE_SIM_FPU (int)
769 do_normal_overflow (sim_fpu *f,
770 int is_double,
771 sim_fpu_round round)
772 {
773 switch (round)
774 {
775 case sim_fpu_round_default:
776 return 0;
777 case sim_fpu_round_near:
778 f->class = sim_fpu_class_infinity;
779 break;
780 case sim_fpu_round_up:
781 if (!f->sign)
782 f->class = sim_fpu_class_infinity;
783 break;
784 case sim_fpu_round_down:
785 if (f->sign)
786 f->class = sim_fpu_class_infinity;
787 break;
788 case sim_fpu_round_zero:
789 break;
790 }
791 f->normal_exp = NORMAL_EXPMAX;
792 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
793 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
794 }
795
796 STATIC_INLINE_SIM_FPU (int)
797 do_normal_underflow (sim_fpu *f,
798 int is_double,
799 sim_fpu_round round)
800 {
801 switch (round)
802 {
803 case sim_fpu_round_default:
804 return 0;
805 case sim_fpu_round_near:
806 f->class = sim_fpu_class_zero;
807 break;
808 case sim_fpu_round_up:
809 if (f->sign)
810 f->class = sim_fpu_class_zero;
811 break;
812 case sim_fpu_round_down:
813 if (!f->sign)
814 f->class = sim_fpu_class_zero;
815 break;
816 case sim_fpu_round_zero:
817 f->class = sim_fpu_class_zero;
818 break;
819 }
820 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
821 f->fraction = IMPLICIT_1;
822 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
823 }
824
825
826
827 /* Round a number using NR_GUARDS.
828 Will return the rounded number or F->FRACTION == 0 when underflow. */
829
830 STATIC_INLINE_SIM_FPU (int)
831 do_normal_round (sim_fpu *f,
832 int nr_guards,
833 sim_fpu_round round)
834 {
835 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
836 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
837 unsigned64 fraclsb = guardmsb << 1;
838 if ((f->fraction & guardmask))
839 {
840 int status = sim_fpu_status_inexact;
841 switch (round)
842 {
843 case sim_fpu_round_default:
844 return 0;
845 case sim_fpu_round_near:
846 if ((f->fraction & guardmsb))
847 {
848 if ((f->fraction & fraclsb))
849 {
850 status |= sim_fpu_status_rounded;
851 }
852 else if ((f->fraction & (guardmask >> 1)))
853 {
854 status |= sim_fpu_status_rounded;
855 }
856 }
857 break;
858 case sim_fpu_round_up:
859 if (!f->sign)
860 status |= sim_fpu_status_rounded;
861 break;
862 case sim_fpu_round_down:
863 if (f->sign)
864 status |= sim_fpu_status_rounded;
865 break;
866 case sim_fpu_round_zero:
867 break;
868 }
869 f->fraction &= ~guardmask;
870 /* Round if needed, handle resulting overflow. */
871 if ((status & sim_fpu_status_rounded))
872 {
873 f->fraction += fraclsb;
874 if ((f->fraction & IMPLICIT_2))
875 {
876 f->fraction >>= 1;
877 f->normal_exp += 1;
878 }
879 }
880 return status;
881 }
882 else
883 return 0;
884 }
885
886
887 STATIC_INLINE_SIM_FPU (int)
888 do_round (sim_fpu *f,
889 int is_double,
890 sim_fpu_round round,
891 sim_fpu_denorm denorm)
892 {
893 switch (f->class)
894 {
895 case sim_fpu_class_qnan:
896 case sim_fpu_class_zero:
897 case sim_fpu_class_infinity:
898 return 0;
899 break;
900 case sim_fpu_class_snan:
901 /* Quieten a SignalingNaN. */
902 f->class = sim_fpu_class_qnan;
903 return sim_fpu_status_invalid_snan;
904 break;
905 case sim_fpu_class_number:
906 case sim_fpu_class_denorm:
907 {
908 int status;
909 ASSERT (f->fraction < IMPLICIT_2);
910 ASSERT (f->fraction >= IMPLICIT_1);
911 if (f->normal_exp < NORMAL_EXPMIN)
912 {
913 /* This number's exponent is too low to fit into the bits
914 available in the number. Round off any bits that will be
915 discarded as a result of denormalization. Edge case is
916 the implicit bit shifted to GUARD0 and then rounded
917 up. */
918 int shift = NORMAL_EXPMIN - f->normal_exp;
919 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
920 && !(denorm & sim_fpu_denorm_zero))
921 {
922 status = do_normal_round (f, shift + NR_GUARDS, round);
923 if (f->fraction == 0) /* Rounding underflowed. */
924 {
925 status |= do_normal_underflow (f, is_double, round);
926 }
927 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
928 {
929 status |= sim_fpu_status_denorm;
930 /* Any loss of precision when denormalizing is
931 underflow. Some processors check for underflow
932 before rounding, some after! */
933 if (status & sim_fpu_status_inexact)
934 status |= sim_fpu_status_underflow;
935 /* Flag that resultant value has been denormalized. */
936 f->class = sim_fpu_class_denorm;
937 }
938 else if ((denorm & sim_fpu_denorm_underflow_inexact))
939 {
940 if ((status & sim_fpu_status_inexact))
941 status |= sim_fpu_status_underflow;
942 }
943 }
944 else
945 {
946 status = do_normal_underflow (f, is_double, round);
947 }
948 }
949 else if (f->normal_exp > NORMAL_EXPMAX)
950 {
951 /* Infinity */
952 status = do_normal_overflow (f, is_double, round);
953 }
954 else
955 {
956 status = do_normal_round (f, NR_GUARDS, round);
957 if (f->fraction == 0)
958 /* f->class = sim_fpu_class_zero; */
959 status |= do_normal_underflow (f, is_double, round);
960 else if (f->normal_exp > NORMAL_EXPMAX)
961 /* Oops! rounding caused overflow. */
962 status |= do_normal_overflow (f, is_double, round);
963 }
964 ASSERT ((f->class == sim_fpu_class_number
965 || f->class == sim_fpu_class_denorm)
966 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
967 return status;
968 }
969 }
970 return 0;
971 }
972
973 INLINE_SIM_FPU (int)
974 sim_fpu_round_32 (sim_fpu *f,
975 sim_fpu_round round,
976 sim_fpu_denorm denorm)
977 {
978 return do_round (f, 0, round, denorm);
979 }
980
981 INLINE_SIM_FPU (int)
982 sim_fpu_round_64 (sim_fpu *f,
983 sim_fpu_round round,
984 sim_fpu_denorm denorm)
985 {
986 return do_round (f, 1, round, denorm);
987 }
988
989
990
991 /* Arithmetic ops */
992
993 INLINE_SIM_FPU (int)
994 sim_fpu_add (sim_fpu *f,
995 const sim_fpu *l,
996 const sim_fpu *r)
997 {
998 if (sim_fpu_is_snan (l))
999 {
1000 *f = *l;
1001 f->class = sim_fpu_class_qnan;
1002 return sim_fpu_status_invalid_snan;
1003 }
1004 if (sim_fpu_is_snan (r))
1005 {
1006 *f = *r;
1007 f->class = sim_fpu_class_qnan;
1008 return sim_fpu_status_invalid_snan;
1009 }
1010 if (sim_fpu_is_qnan (l))
1011 {
1012 *f = *l;
1013 return 0;
1014 }
1015 if (sim_fpu_is_qnan (r))
1016 {
1017 *f = *r;
1018 return 0;
1019 }
1020 if (sim_fpu_is_infinity (l))
1021 {
1022 if (sim_fpu_is_infinity (r)
1023 && l->sign != r->sign)
1024 {
1025 *f = sim_fpu_qnan;
1026 return sim_fpu_status_invalid_isi;
1027 }
1028 *f = *l;
1029 return 0;
1030 }
1031 if (sim_fpu_is_infinity (r))
1032 {
1033 *f = *r;
1034 return 0;
1035 }
1036 if (sim_fpu_is_zero (l))
1037 {
1038 if (sim_fpu_is_zero (r))
1039 {
1040 *f = sim_fpu_zero;
1041 f->sign = l->sign & r->sign;
1042 }
1043 else
1044 *f = *r;
1045 return 0;
1046 }
1047 if (sim_fpu_is_zero (r))
1048 {
1049 *f = *l;
1050 return 0;
1051 }
1052 {
1053 int status = 0;
1054 int shift = l->normal_exp - r->normal_exp;
1055 unsigned64 lfraction;
1056 unsigned64 rfraction;
1057 /* use exp of larger */
1058 if (shift >= NR_FRAC_GUARD)
1059 {
1060 /* left has much bigger magnitude */
1061 *f = *l;
1062 return sim_fpu_status_inexact;
1063 }
1064 if (shift <= - NR_FRAC_GUARD)
1065 {
1066 /* right has much bigger magnitude */
1067 *f = *r;
1068 return sim_fpu_status_inexact;
1069 }
1070 lfraction = l->fraction;
1071 rfraction = r->fraction;
1072 if (shift > 0)
1073 {
1074 f->normal_exp = l->normal_exp;
1075 if (rfraction & LSMASK64 (shift - 1, 0))
1076 {
1077 status |= sim_fpu_status_inexact;
1078 rfraction |= LSBIT64 (shift); /* Stick LSBit. */
1079 }
1080 rfraction >>= shift;
1081 }
1082 else if (shift < 0)
1083 {
1084 f->normal_exp = r->normal_exp;
1085 if (lfraction & LSMASK64 (- shift - 1, 0))
1086 {
1087 status |= sim_fpu_status_inexact;
1088 lfraction |= LSBIT64 (- shift); /* Stick LSBit. */
1089 }
1090 lfraction >>= -shift;
1091 }
1092 else
1093 {
1094 f->normal_exp = r->normal_exp;
1095 }
1096
1097 /* Perform the addition. */
1098 if (l->sign)
1099 lfraction = - lfraction;
1100 if (r->sign)
1101 rfraction = - rfraction;
1102 f->fraction = lfraction + rfraction;
1103
1104 /* zero? */
1105 if (f->fraction == 0)
1106 {
1107 *f = sim_fpu_zero;
1108 return 0;
1109 }
1110
1111 /* sign? */
1112 f->class = sim_fpu_class_number;
1113 if (((signed64) f->fraction) >= 0)
1114 f->sign = 0;
1115 else
1116 {
1117 f->sign = 1;
1118 f->fraction = - f->fraction;
1119 }
1120
1121 /* Normalize it. */
1122 if ((f->fraction & IMPLICIT_2))
1123 {
1124 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1125 f->normal_exp ++;
1126 }
1127 else if (f->fraction < IMPLICIT_1)
1128 {
1129 do
1130 {
1131 f->fraction <<= 1;
1132 f->normal_exp --;
1133 }
1134 while (f->fraction < IMPLICIT_1);
1135 }
1136 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1137 return status;
1138 }
1139 }
1140
1141
1142 INLINE_SIM_FPU (int)
1143 sim_fpu_sub (sim_fpu *f,
1144 const sim_fpu *l,
1145 const sim_fpu *r)
1146 {
1147 if (sim_fpu_is_snan (l))
1148 {
1149 *f = *l;
1150 f->class = sim_fpu_class_qnan;
1151 return sim_fpu_status_invalid_snan;
1152 }
1153 if (sim_fpu_is_snan (r))
1154 {
1155 *f = *r;
1156 f->class = sim_fpu_class_qnan;
1157 return sim_fpu_status_invalid_snan;
1158 }
1159 if (sim_fpu_is_qnan (l))
1160 {
1161 *f = *l;
1162 return 0;
1163 }
1164 if (sim_fpu_is_qnan (r))
1165 {
1166 *f = *r;
1167 return 0;
1168 }
1169 if (sim_fpu_is_infinity (l))
1170 {
1171 if (sim_fpu_is_infinity (r)
1172 && l->sign == r->sign)
1173 {
1174 *f = sim_fpu_qnan;
1175 return sim_fpu_status_invalid_isi;
1176 }
1177 *f = *l;
1178 return 0;
1179 }
1180 if (sim_fpu_is_infinity (r))
1181 {
1182 *f = *r;
1183 f->sign = !r->sign;
1184 return 0;
1185 }
1186 if (sim_fpu_is_zero (l))
1187 {
1188 if (sim_fpu_is_zero (r))
1189 {
1190 *f = sim_fpu_zero;
1191 f->sign = l->sign & !r->sign;
1192 }
1193 else
1194 {
1195 *f = *r;
1196 f->sign = !r->sign;
1197 }
1198 return 0;
1199 }
1200 if (sim_fpu_is_zero (r))
1201 {
1202 *f = *l;
1203 return 0;
1204 }
1205 {
1206 int status = 0;
1207 int shift = l->normal_exp - r->normal_exp;
1208 unsigned64 lfraction;
1209 unsigned64 rfraction;
1210 /* use exp of larger */
1211 if (shift >= NR_FRAC_GUARD)
1212 {
1213 /* left has much bigger magnitude */
1214 *f = *l;
1215 return sim_fpu_status_inexact;
1216 }
1217 if (shift <= - NR_FRAC_GUARD)
1218 {
1219 /* right has much bigger magnitude */
1220 *f = *r;
1221 f->sign = !r->sign;
1222 return sim_fpu_status_inexact;
1223 }
1224 lfraction = l->fraction;
1225 rfraction = r->fraction;
1226 if (shift > 0)
1227 {
1228 f->normal_exp = l->normal_exp;
1229 if (rfraction & LSMASK64 (shift - 1, 0))
1230 {
1231 status |= sim_fpu_status_inexact;
1232 rfraction |= LSBIT64 (shift); /* Stick LSBit. */
1233 }
1234 rfraction >>= shift;
1235 }
1236 else if (shift < 0)
1237 {
1238 f->normal_exp = r->normal_exp;
1239 if (lfraction & LSMASK64 (- shift - 1, 0))
1240 {
1241 status |= sim_fpu_status_inexact;
1242 lfraction |= LSBIT64 (- shift); /* Stick LSBit. */
1243 }
1244 lfraction >>= -shift;
1245 }
1246 else
1247 {
1248 f->normal_exp = r->normal_exp;
1249 }
1250
1251 /* Perform the subtraction. */
1252 if (l->sign)
1253 lfraction = - lfraction;
1254 if (!r->sign)
1255 rfraction = - rfraction;
1256 f->fraction = lfraction + rfraction;
1257
1258 /* zero? */
1259 if (f->fraction == 0)
1260 {
1261 *f = sim_fpu_zero;
1262 return 0;
1263 }
1264
1265 /* sign? */
1266 f->class = sim_fpu_class_number;
1267 if (((signed64) f->fraction) >= 0)
1268 f->sign = 0;
1269 else
1270 {
1271 f->sign = 1;
1272 f->fraction = - f->fraction;
1273 }
1274
1275 /* Normalize it. */
1276 if ((f->fraction & IMPLICIT_2))
1277 {
1278 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1279 f->normal_exp ++;
1280 }
1281 else if (f->fraction < IMPLICIT_1)
1282 {
1283 do
1284 {
1285 f->fraction <<= 1;
1286 f->normal_exp --;
1287 }
1288 while (f->fraction < IMPLICIT_1);
1289 }
1290 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1291 return status;
1292 }
1293 }
1294
1295
1296 INLINE_SIM_FPU (int)
1297 sim_fpu_mul (sim_fpu *f,
1298 const sim_fpu *l,
1299 const sim_fpu *r)
1300 {
1301 if (sim_fpu_is_snan (l))
1302 {
1303 *f = *l;
1304 f->class = sim_fpu_class_qnan;
1305 return sim_fpu_status_invalid_snan;
1306 }
1307 if (sim_fpu_is_snan (r))
1308 {
1309 *f = *r;
1310 f->class = sim_fpu_class_qnan;
1311 return sim_fpu_status_invalid_snan;
1312 }
1313 if (sim_fpu_is_qnan (l))
1314 {
1315 *f = *l;
1316 return 0;
1317 }
1318 if (sim_fpu_is_qnan (r))
1319 {
1320 *f = *r;
1321 return 0;
1322 }
1323 if (sim_fpu_is_infinity (l))
1324 {
1325 if (sim_fpu_is_zero (r))
1326 {
1327 *f = sim_fpu_qnan;
1328 return sim_fpu_status_invalid_imz;
1329 }
1330 *f = *l;
1331 f->sign = l->sign ^ r->sign;
1332 return 0;
1333 }
1334 if (sim_fpu_is_infinity (r))
1335 {
1336 if (sim_fpu_is_zero (l))
1337 {
1338 *f = sim_fpu_qnan;
1339 return sim_fpu_status_invalid_imz;
1340 }
1341 *f = *r;
1342 f->sign = l->sign ^ r->sign;
1343 return 0;
1344 }
1345 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1346 {
1347 *f = sim_fpu_zero;
1348 f->sign = l->sign ^ r->sign;
1349 return 0;
1350 }
1351 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1352 128 bit number. */
1353 {
1354 unsigned64 low;
1355 unsigned64 high;
1356 unsigned64 nl = l->fraction & 0xffffffff;
1357 unsigned64 nh = l->fraction >> 32;
1358 unsigned64 ml = r->fraction & 0xffffffff;
1359 unsigned64 mh = r->fraction >>32;
1360 unsigned64 pp_ll = ml * nl;
1361 unsigned64 pp_hl = mh * nl;
1362 unsigned64 pp_lh = ml * nh;
1363 unsigned64 pp_hh = mh * nh;
1364 unsigned64 res2 = 0;
1365 unsigned64 res0 = 0;
1366 unsigned64 ps_hh__ = pp_hl + pp_lh;
1367 if (ps_hh__ < pp_hl)
1368 res2 += UNSIGNED64 (0x100000000);
1369 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1370 res0 = pp_ll + pp_hl;
1371 if (res0 < pp_ll)
1372 res2++;
1373 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1374 high = res2;
1375 low = res0;
1376
1377 f->normal_exp = l->normal_exp + r->normal_exp;
1378 f->sign = l->sign ^ r->sign;
1379 f->class = sim_fpu_class_number;
1380
1381 /* Input is bounded by [1,2) ; [2^60,2^61)
1382 Output is bounded by [1,4) ; [2^120,2^122) */
1383
1384 /* Adjust the exponent according to where the decimal point ended
1385 up in the high 64 bit word. In the source the decimal point
1386 was at NR_FRAC_GUARD. */
1387 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1388
1389 /* The high word is bounded according to the above. Consequently
1390 it has never overflowed into IMPLICIT_2. */
1391 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1392 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1393 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1394
1395 /* Normalize. */
1396 do
1397 {
1398 f->normal_exp--;
1399 high <<= 1;
1400 if (low & LSBIT64 (63))
1401 high |= 1;
1402 low <<= 1;
1403 }
1404 while (high < IMPLICIT_1);
1405
1406 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1407 if (low != 0)
1408 {
1409 f->fraction = (high | 1); /* sticky */
1410 return sim_fpu_status_inexact;
1411 }
1412 else
1413 {
1414 f->fraction = high;
1415 return 0;
1416 }
1417 return 0;
1418 }
1419 }
1420
1421 INLINE_SIM_FPU (int)
1422 sim_fpu_div (sim_fpu *f,
1423 const sim_fpu *l,
1424 const sim_fpu *r)
1425 {
1426 if (sim_fpu_is_snan (l))
1427 {
1428 *f = *l;
1429 f->class = sim_fpu_class_qnan;
1430 return sim_fpu_status_invalid_snan;
1431 }
1432 if (sim_fpu_is_snan (r))
1433 {
1434 *f = *r;
1435 f->class = sim_fpu_class_qnan;
1436 return sim_fpu_status_invalid_snan;
1437 }
1438 if (sim_fpu_is_qnan (l))
1439 {
1440 *f = *l;
1441 f->class = sim_fpu_class_qnan;
1442 return 0;
1443 }
1444 if (sim_fpu_is_qnan (r))
1445 {
1446 *f = *r;
1447 f->class = sim_fpu_class_qnan;
1448 return 0;
1449 }
1450 if (sim_fpu_is_infinity (l))
1451 {
1452 if (sim_fpu_is_infinity (r))
1453 {
1454 *f = sim_fpu_qnan;
1455 return sim_fpu_status_invalid_idi;
1456 }
1457 else
1458 {
1459 *f = *l;
1460 f->sign = l->sign ^ r->sign;
1461 return 0;
1462 }
1463 }
1464 if (sim_fpu_is_zero (l))
1465 {
1466 if (sim_fpu_is_zero (r))
1467 {
1468 *f = sim_fpu_qnan;
1469 return sim_fpu_status_invalid_zdz;
1470 }
1471 else
1472 {
1473 *f = *l;
1474 f->sign = l->sign ^ r->sign;
1475 return 0;
1476 }
1477 }
1478 if (sim_fpu_is_infinity (r))
1479 {
1480 *f = sim_fpu_zero;
1481 f->sign = l->sign ^ r->sign;
1482 return 0;
1483 }
1484 if (sim_fpu_is_zero (r))
1485 {
1486 f->class = sim_fpu_class_infinity;
1487 f->sign = l->sign ^ r->sign;
1488 return sim_fpu_status_invalid_div0;
1489 }
1490
1491 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1492 128 bit number. */
1493 {
1494 /* quotient = ( ( numerator / denominator)
1495 x 2^(numerator exponent - denominator exponent)
1496 */
1497 unsigned64 numerator;
1498 unsigned64 denominator;
1499 unsigned64 quotient;
1500 unsigned64 bit;
1501
1502 f->class = sim_fpu_class_number;
1503 f->sign = l->sign ^ r->sign;
1504 f->normal_exp = l->normal_exp - r->normal_exp;
1505
1506 numerator = l->fraction;
1507 denominator = r->fraction;
1508
1509 /* Fraction will be less than 1.0 */
1510 if (numerator < denominator)
1511 {
1512 numerator <<= 1;
1513 f->normal_exp--;
1514 }
1515 ASSERT (numerator >= denominator);
1516
1517 /* Gain extra precision, already used one spare bit. */
1518 numerator <<= NR_SPARE;
1519 denominator <<= NR_SPARE;
1520
1521 /* Does divide one bit at a time. Optimize??? */
1522 quotient = 0;
1523 bit = (IMPLICIT_1 << NR_SPARE);
1524 while (bit)
1525 {
1526 if (numerator >= denominator)
1527 {
1528 quotient |= bit;
1529 numerator -= denominator;
1530 }
1531 bit >>= 1;
1532 numerator <<= 1;
1533 }
1534
1535 /* Discard (but save) the extra bits. */
1536 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1537 quotient = (quotient >> NR_SPARE) | 1;
1538 else
1539 quotient = (quotient >> NR_SPARE);
1540
1541 f->fraction = quotient;
1542 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1543 if (numerator != 0)
1544 {
1545 f->fraction |= 1; /* Stick remaining bits. */
1546 return sim_fpu_status_inexact;
1547 }
1548 else
1549 return 0;
1550 }
1551 }
1552
1553
1554 INLINE_SIM_FPU (int)
1555 sim_fpu_rem (sim_fpu *f,
1556 const sim_fpu *l,
1557 const sim_fpu *r)
1558 {
1559 if (sim_fpu_is_snan (l))
1560 {
1561 *f = *l;
1562 f->class = sim_fpu_class_qnan;
1563 return sim_fpu_status_invalid_snan;
1564 }
1565 if (sim_fpu_is_snan (r))
1566 {
1567 *f = *r;
1568 f->class = sim_fpu_class_qnan;
1569 return sim_fpu_status_invalid_snan;
1570 }
1571 if (sim_fpu_is_qnan (l))
1572 {
1573 *f = *l;
1574 f->class = sim_fpu_class_qnan;
1575 return 0;
1576 }
1577 if (sim_fpu_is_qnan (r))
1578 {
1579 *f = *r;
1580 f->class = sim_fpu_class_qnan;
1581 return 0;
1582 }
1583 if (sim_fpu_is_infinity (l))
1584 {
1585 *f = sim_fpu_qnan;
1586 return sim_fpu_status_invalid_irx;
1587 }
1588 if (sim_fpu_is_zero (r))
1589 {
1590 *f = sim_fpu_qnan;
1591 return sim_fpu_status_invalid_div0;
1592 }
1593 if (sim_fpu_is_zero (l))
1594 {
1595 *f = *l;
1596 return 0;
1597 }
1598 if (sim_fpu_is_infinity (r))
1599 {
1600 *f = *l;
1601 return 0;
1602 }
1603 {
1604 sim_fpu n, tmp;
1605
1606 /* Remainder is calculated as l-n*r, where n is l/r rounded to the
1607 nearest integer. The variable n is rounded half even. */
1608
1609 sim_fpu_div (&n, l, r);
1610 sim_fpu_round_64 (&n, 0, 0);
1611
1612 if (n.normal_exp < -1) /* If n looks like zero just return l. */
1613 {
1614 *f = *l;
1615 return 0;
1616 }
1617 else if (n.class == sim_fpu_class_number
1618 && n.normal_exp <= (NR_FRAC_GUARD)) /* If not too large round. */
1619 do_normal_round (&n, (NR_FRAC_GUARD) - n.normal_exp, sim_fpu_round_near);
1620
1621 /* Mark 0's as zero so multiply can detect zero. */
1622 if (n.fraction == 0)
1623 n.class = sim_fpu_class_zero;
1624
1625 /* Calculate n*r. */
1626 sim_fpu_mul (&tmp, &n, r);
1627 sim_fpu_round_64 (&tmp, 0, 0);
1628
1629 /* Finally calculate l-n*r. */
1630 sim_fpu_sub (f, l, &tmp);
1631
1632 return 0;
1633 }
1634 }
1635
1636
1637 INLINE_SIM_FPU (int)
1638 sim_fpu_max (sim_fpu *f,
1639 const sim_fpu *l,
1640 const sim_fpu *r)
1641 {
1642 if (sim_fpu_is_snan (l))
1643 {
1644 *f = *l;
1645 f->class = sim_fpu_class_qnan;
1646 return sim_fpu_status_invalid_snan;
1647 }
1648 if (sim_fpu_is_snan (r))
1649 {
1650 *f = *r;
1651 f->class = sim_fpu_class_qnan;
1652 return sim_fpu_status_invalid_snan;
1653 }
1654 if (sim_fpu_is_qnan (l))
1655 {
1656 *f = *l;
1657 return 0;
1658 }
1659 if (sim_fpu_is_qnan (r))
1660 {
1661 *f = *r;
1662 return 0;
1663 }
1664 if (sim_fpu_is_infinity (l))
1665 {
1666 if (sim_fpu_is_infinity (r)
1667 && l->sign == r->sign)
1668 {
1669 *f = sim_fpu_qnan;
1670 return sim_fpu_status_invalid_isi;
1671 }
1672 if (l->sign)
1673 *f = *r; /* -inf < anything */
1674 else
1675 *f = *l; /* +inf > anything */
1676 return 0;
1677 }
1678 if (sim_fpu_is_infinity (r))
1679 {
1680 if (r->sign)
1681 *f = *l; /* anything > -inf */
1682 else
1683 *f = *r; /* anything < +inf */
1684 return 0;
1685 }
1686 if (l->sign > r->sign)
1687 {
1688 *f = *r; /* -ve < +ve */
1689 return 0;
1690 }
1691 if (l->sign < r->sign)
1692 {
1693 *f = *l; /* +ve > -ve */
1694 return 0;
1695 }
1696 ASSERT (l->sign == r->sign);
1697 if (l->normal_exp > r->normal_exp
1698 || (l->normal_exp == r->normal_exp
1699 && l->fraction > r->fraction))
1700 {
1701 /* |l| > |r| */
1702 if (l->sign)
1703 *f = *r; /* -ve < -ve */
1704 else
1705 *f = *l; /* +ve > +ve */
1706 return 0;
1707 }
1708 else
1709 {
1710 /* |l| <= |r| */
1711 if (l->sign)
1712 *f = *l; /* -ve > -ve */
1713 else
1714 *f = *r; /* +ve < +ve */
1715 return 0;
1716 }
1717 }
1718
1719
1720 INLINE_SIM_FPU (int)
1721 sim_fpu_min (sim_fpu *f,
1722 const sim_fpu *l,
1723 const sim_fpu *r)
1724 {
1725 if (sim_fpu_is_snan (l))
1726 {
1727 *f = *l;
1728 f->class = sim_fpu_class_qnan;
1729 return sim_fpu_status_invalid_snan;
1730 }
1731 if (sim_fpu_is_snan (r))
1732 {
1733 *f = *r;
1734 f->class = sim_fpu_class_qnan;
1735 return sim_fpu_status_invalid_snan;
1736 }
1737 if (sim_fpu_is_qnan (l))
1738 {
1739 *f = *l;
1740 return 0;
1741 }
1742 if (sim_fpu_is_qnan (r))
1743 {
1744 *f = *r;
1745 return 0;
1746 }
1747 if (sim_fpu_is_infinity (l))
1748 {
1749 if (sim_fpu_is_infinity (r)
1750 && l->sign == r->sign)
1751 {
1752 *f = sim_fpu_qnan;
1753 return sim_fpu_status_invalid_isi;
1754 }
1755 if (l->sign)
1756 *f = *l; /* -inf < anything */
1757 else
1758 *f = *r; /* +inf > anthing */
1759 return 0;
1760 }
1761 if (sim_fpu_is_infinity (r))
1762 {
1763 if (r->sign)
1764 *f = *r; /* anything > -inf */
1765 else
1766 *f = *l; /* anything < +inf */
1767 return 0;
1768 }
1769 if (l->sign > r->sign)
1770 {
1771 *f = *l; /* -ve < +ve */
1772 return 0;
1773 }
1774 if (l->sign < r->sign)
1775 {
1776 *f = *r; /* +ve > -ve */
1777 return 0;
1778 }
1779 ASSERT (l->sign == r->sign);
1780 if (l->normal_exp > r->normal_exp
1781 || (l->normal_exp == r->normal_exp
1782 && l->fraction > r->fraction))
1783 {
1784 /* |l| > |r| */
1785 if (l->sign)
1786 *f = *l; /* -ve < -ve */
1787 else
1788 *f = *r; /* +ve > +ve */
1789 return 0;
1790 }
1791 else
1792 {
1793 /* |l| <= |r| */
1794 if (l->sign)
1795 *f = *r; /* -ve > -ve */
1796 else
1797 *f = *l; /* +ve < +ve */
1798 return 0;
1799 }
1800 }
1801
1802
1803 INLINE_SIM_FPU (int)
1804 sim_fpu_neg (sim_fpu *f,
1805 const sim_fpu *r)
1806 {
1807 if (sim_fpu_is_snan (r))
1808 {
1809 *f = *r;
1810 f->class = sim_fpu_class_qnan;
1811 return sim_fpu_status_invalid_snan;
1812 }
1813 if (sim_fpu_is_qnan (r))
1814 {
1815 *f = *r;
1816 return 0;
1817 }
1818 *f = *r;
1819 f->sign = !r->sign;
1820 return 0;
1821 }
1822
1823
1824 INLINE_SIM_FPU (int)
1825 sim_fpu_abs (sim_fpu *f,
1826 const sim_fpu *r)
1827 {
1828 *f = *r;
1829 f->sign = 0;
1830 if (sim_fpu_is_snan (r))
1831 {
1832 f->class = sim_fpu_class_qnan;
1833 return sim_fpu_status_invalid_snan;
1834 }
1835 return 0;
1836 }
1837
1838
1839 INLINE_SIM_FPU (int)
1840 sim_fpu_inv (sim_fpu *f,
1841 const sim_fpu *r)
1842 {
1843 return sim_fpu_div (f, &sim_fpu_one, r);
1844 }
1845
1846
1847 INLINE_SIM_FPU (int)
1848 sim_fpu_sqrt (sim_fpu *f,
1849 const sim_fpu *r)
1850 {
1851 if (sim_fpu_is_snan (r))
1852 {
1853 *f = sim_fpu_qnan;
1854 return sim_fpu_status_invalid_snan;
1855 }
1856 if (sim_fpu_is_qnan (r))
1857 {
1858 *f = sim_fpu_qnan;
1859 return 0;
1860 }
1861 if (sim_fpu_is_zero (r))
1862 {
1863 f->class = sim_fpu_class_zero;
1864 f->sign = r->sign;
1865 f->normal_exp = 0;
1866 return 0;
1867 }
1868 if (sim_fpu_is_infinity (r))
1869 {
1870 if (r->sign)
1871 {
1872 *f = sim_fpu_qnan;
1873 return sim_fpu_status_invalid_sqrt;
1874 }
1875 else
1876 {
1877 f->class = sim_fpu_class_infinity;
1878 f->sign = 0;
1879 f->sign = 0;
1880 return 0;
1881 }
1882 }
1883 if (r->sign)
1884 {
1885 *f = sim_fpu_qnan;
1886 return sim_fpu_status_invalid_sqrt;
1887 }
1888
1889 /* @(#)e_sqrt.c 5.1 93/09/24 */
1890 /*
1891 * ====================================================
1892 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1893 *
1894 * Developed at SunPro, a Sun Microsystems, Inc. business.
1895 * Permission to use, copy, modify, and distribute this
1896 * software is freely granted, provided that this notice
1897 * is preserved.
1898 * ====================================================
1899 */
1900
1901 /* __ieee754_sqrt(x)
1902 * Return correctly rounded sqrt.
1903 * ------------------------------------------
1904 * | Use the hardware sqrt if you have one |
1905 * ------------------------------------------
1906 * Method:
1907 * Bit by bit method using integer arithmetic. (Slow, but portable)
1908 * 1. Normalization
1909 * Scale x to y in [1,4) with even powers of 2:
1910 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1911 * sqrt(x) = 2^k * sqrt(y)
1912 -
1913 - Since:
1914 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1915 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1916 - Define:
1917 - y = ((m even) ? x : 2.x)
1918 - Then:
1919 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1920 - And:
1921 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1922 -
1923 * 2. Bit by bit computation
1924 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1925 * i 0
1926 * i+1 2
1927 * s = 2*q , and y = 2 * ( y - q ). (1)
1928 * i i i i
1929 *
1930 * To compute q from q , one checks whether
1931 * i+1 i
1932 *
1933 * -(i+1) 2
1934 * (q + 2 ) <= y. (2)
1935 * i
1936 * -(i+1)
1937 * If (2) is false, then q = q ; otherwise q = q + 2 .
1938 * i+1 i i+1 i
1939 *
1940 * With some algebraic manipulation, it is not difficult to see
1941 * that (2) is equivalent to
1942 * -(i+1)
1943 * s + 2 <= y (3)
1944 * i i
1945 *
1946 * The advantage of (3) is that s and y can be computed by
1947 * i i
1948 * the following recurrence formula:
1949 * if (3) is false
1950 *
1951 * s = s , y = y ; (4)
1952 * i+1 i i+1 i
1953 *
1954 -
1955 - NOTE: y = 2*y
1956 - i+1 i
1957 -
1958 * otherwise,
1959 * -i -(i+1)
1960 * s = s + 2 , y = y - s - 2 (5)
1961 * i+1 i i+1 i i
1962 *
1963 -
1964 - -(i+1)
1965 - NOTE: y = 2 (y - s - 2 )
1966 - i+1 i i
1967 -
1968 * One may easily use induction to prove (4) and (5).
1969 * Note. Since the left hand side of (3) contain only i+2 bits,
1970 * it does not necessary to do a full (53-bit) comparison
1971 * in (3).
1972 * 3. Final rounding
1973 * After generating the 53 bits result, we compute one more bit.
1974 * Together with the remainder, we can decide whether the
1975 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1976 * (it will never equal to 1/2ulp).
1977 * The rounding mode can be detected by checking whether
1978 * huge + tiny is equal to huge, and whether huge - tiny is
1979 * equal to huge for some floating point number "huge" and "tiny".
1980 *
1981 * Special cases:
1982 * sqrt(+-0) = +-0 ... exact
1983 * sqrt(inf) = inf
1984 * sqrt(-ve) = NaN ... with invalid signal
1985 * sqrt(NaN) = NaN ... with invalid signal for signalling NaN
1986 *
1987 * Other methods : see the appended file at the end of the program below.
1988 *---------------
1989 */
1990
1991 {
1992 /* Generate sqrt(x) bit by bit. */
1993 unsigned64 y;
1994 unsigned64 q;
1995 unsigned64 s;
1996 unsigned64 b;
1997
1998 f->class = sim_fpu_class_number;
1999 f->sign = 0;
2000 y = r->fraction;
2001 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
2002
2003 /* Odd exp, double x to make it even. */
2004 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
2005 if ((r->normal_exp & 1))
2006 {
2007 y += y;
2008 }
2009 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
2010
2011 /* Let loop determine first value of s (either 1 or 2) */
2012 b = IMPLICIT_1;
2013 q = 0;
2014 s = 0;
2015
2016 while (b)
2017 {
2018 unsigned64 t = s + b;
2019 if (t <= y)
2020 {
2021 s |= (b << 1);
2022 y -= t;
2023 q |= b;
2024 }
2025 y <<= 1;
2026 b >>= 1;
2027 }
2028
2029 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
2030 f->fraction = q;
2031 if (y != 0)
2032 {
2033 f->fraction |= 1; /* Stick remaining bits. */
2034 return sim_fpu_status_inexact;
2035 }
2036 else
2037 return 0;
2038 }
2039 }
2040
2041
2042 /* int/long <-> sim_fpu */
2043
2044 INLINE_SIM_FPU (int)
2045 sim_fpu_i32to (sim_fpu *f,
2046 signed32 i,
2047 sim_fpu_round round)
2048 {
2049 i2fpu (f, i, 0);
2050 return 0;
2051 }
2052
2053 INLINE_SIM_FPU (int)
2054 sim_fpu_u32to (sim_fpu *f,
2055 unsigned32 u,
2056 sim_fpu_round round)
2057 {
2058 u2fpu (f, u, 0);
2059 return 0;
2060 }
2061
2062 INLINE_SIM_FPU (int)
2063 sim_fpu_i64to (sim_fpu *f,
2064 signed64 i,
2065 sim_fpu_round round)
2066 {
2067 i2fpu (f, i, 1);
2068 return 0;
2069 }
2070
2071 INLINE_SIM_FPU (int)
2072 sim_fpu_u64to (sim_fpu *f,
2073 unsigned64 u,
2074 sim_fpu_round round)
2075 {
2076 u2fpu (f, u, 1);
2077 return 0;
2078 }
2079
2080
2081 INLINE_SIM_FPU (int)
2082 sim_fpu_to32i (signed32 *i,
2083 const sim_fpu *f,
2084 sim_fpu_round round)
2085 {
2086 signed64 i64;
2087 int status = fpu2i (&i64, f, 0, round);
2088 *i = i64;
2089 return status;
2090 }
2091
2092 INLINE_SIM_FPU (int)
2093 sim_fpu_to32u (unsigned32 *u,
2094 const sim_fpu *f,
2095 sim_fpu_round round)
2096 {
2097 unsigned64 u64;
2098 int status = fpu2u (&u64, f, 0);
2099 *u = u64;
2100 return status;
2101 }
2102
2103 INLINE_SIM_FPU (int)
2104 sim_fpu_to64i (signed64 *i,
2105 const sim_fpu *f,
2106 sim_fpu_round round)
2107 {
2108 return fpu2i (i, f, 1, round);
2109 }
2110
2111
2112 INLINE_SIM_FPU (int)
2113 sim_fpu_to64u (unsigned64 *u,
2114 const sim_fpu *f,
2115 sim_fpu_round round)
2116 {
2117 return fpu2u (u, f, 1);
2118 }
2119
2120
2121
2122 /* sim_fpu -> host format */
2123
2124 #if 0
2125 INLINE_SIM_FPU (float)
2126 sim_fpu_2f (const sim_fpu *f)
2127 {
2128 return fval.d;
2129 }
2130 #endif
2131
2132
2133 INLINE_SIM_FPU (double)
2134 sim_fpu_2d (const sim_fpu *s)
2135 {
2136 sim_fpu_map val;
2137 if (sim_fpu_is_snan (s))
2138 {
2139 /* gag SNaN's */
2140 sim_fpu n = *s;
2141 n.class = sim_fpu_class_qnan;
2142 val.i = pack_fpu (&n, 1);
2143 }
2144 else
2145 {
2146 val.i = pack_fpu (s, 1);
2147 }
2148 return val.d;
2149 }
2150
2151
2152 #if 0
2153 INLINE_SIM_FPU (void)
2154 sim_fpu_f2 (sim_fpu *f,
2155 float s)
2156 {
2157 sim_fpu_map val;
2158 val.d = s;
2159 unpack_fpu (f, val.i, 1);
2160 }
2161 #endif
2162
2163
2164 INLINE_SIM_FPU (void)
2165 sim_fpu_d2 (sim_fpu *f,
2166 double d)
2167 {
2168 sim_fpu_map val;
2169 val.d = d;
2170 unpack_fpu (f, val.i, 1);
2171 }
2172
2173
2174 /* General */
2175
2176 INLINE_SIM_FPU (int)
2177 sim_fpu_is_nan (const sim_fpu *d)
2178 {
2179 switch (d->class)
2180 {
2181 case sim_fpu_class_qnan:
2182 case sim_fpu_class_snan:
2183 return 1;
2184 default:
2185 return 0;
2186 }
2187 }
2188
2189 INLINE_SIM_FPU (int)
2190 sim_fpu_is_qnan (const sim_fpu *d)
2191 {
2192 switch (d->class)
2193 {
2194 case sim_fpu_class_qnan:
2195 return 1;
2196 default:
2197 return 0;
2198 }
2199 }
2200
2201 INLINE_SIM_FPU (int)
2202 sim_fpu_is_snan (const sim_fpu *d)
2203 {
2204 switch (d->class)
2205 {
2206 case sim_fpu_class_snan:
2207 return 1;
2208 default:
2209 return 0;
2210 }
2211 }
2212
2213 INLINE_SIM_FPU (int)
2214 sim_fpu_is_zero (const sim_fpu *d)
2215 {
2216 switch (d->class)
2217 {
2218 case sim_fpu_class_zero:
2219 return 1;
2220 default:
2221 return 0;
2222 }
2223 }
2224
2225 INLINE_SIM_FPU (int)
2226 sim_fpu_is_infinity (const sim_fpu *d)
2227 {
2228 switch (d->class)
2229 {
2230 case sim_fpu_class_infinity:
2231 return 1;
2232 default:
2233 return 0;
2234 }
2235 }
2236
2237 INLINE_SIM_FPU (int)
2238 sim_fpu_is_number (const sim_fpu *d)
2239 {
2240 switch (d->class)
2241 {
2242 case sim_fpu_class_denorm:
2243 case sim_fpu_class_number:
2244 return 1;
2245 default:
2246 return 0;
2247 }
2248 }
2249
2250 INLINE_SIM_FPU (int)
2251 sim_fpu_is_denorm (const sim_fpu *d)
2252 {
2253 switch (d->class)
2254 {
2255 case sim_fpu_class_denorm:
2256 return 1;
2257 default:
2258 return 0;
2259 }
2260 }
2261
2262
2263 INLINE_SIM_FPU (int)
2264 sim_fpu_sign (const sim_fpu *d)
2265 {
2266 return d->sign;
2267 }
2268
2269
2270 INLINE_SIM_FPU (int)
2271 sim_fpu_exp (const sim_fpu *d)
2272 {
2273 return d->normal_exp;
2274 }
2275
2276
2277 INLINE_SIM_FPU (unsigned64)
2278 sim_fpu_fraction (const sim_fpu *d)
2279 {
2280 return d->fraction;
2281 }
2282
2283
2284 INLINE_SIM_FPU (unsigned64)
2285 sim_fpu_guard (const sim_fpu *d, int is_double)
2286 {
2287 unsigned64 rv;
2288 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2289 rv = (d->fraction & guardmask) >> NR_PAD;
2290 return rv;
2291 }
2292
2293
2294 INLINE_SIM_FPU (int)
2295 sim_fpu_is (const sim_fpu *d)
2296 {
2297 switch (d->class)
2298 {
2299 case sim_fpu_class_qnan:
2300 return SIM_FPU_IS_QNAN;
2301 case sim_fpu_class_snan:
2302 return SIM_FPU_IS_SNAN;
2303 case sim_fpu_class_infinity:
2304 if (d->sign)
2305 return SIM_FPU_IS_NINF;
2306 else
2307 return SIM_FPU_IS_PINF;
2308 case sim_fpu_class_number:
2309 if (d->sign)
2310 return SIM_FPU_IS_NNUMBER;
2311 else
2312 return SIM_FPU_IS_PNUMBER;
2313 case sim_fpu_class_denorm:
2314 if (d->sign)
2315 return SIM_FPU_IS_NDENORM;
2316 else
2317 return SIM_FPU_IS_PDENORM;
2318 case sim_fpu_class_zero:
2319 if (d->sign)
2320 return SIM_FPU_IS_NZERO;
2321 else
2322 return SIM_FPU_IS_PZERO;
2323 default:
2324 return -1;
2325 abort ();
2326 }
2327 }
2328
2329 INLINE_SIM_FPU (int)
2330 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2331 {
2332 sim_fpu res;
2333 sim_fpu_sub (&res, l, r);
2334 return sim_fpu_is (&res);
2335 }
2336
2337 INLINE_SIM_FPU (int)
2338 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2339 {
2340 int status;
2341 sim_fpu_lt (&status, l, r);
2342 return status;
2343 }
2344
2345 INLINE_SIM_FPU (int)
2346 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2347 {
2348 int is;
2349 sim_fpu_le (&is, l, r);
2350 return is;
2351 }
2352
2353 INLINE_SIM_FPU (int)
2354 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2355 {
2356 int is;
2357 sim_fpu_eq (&is, l, r);
2358 return is;
2359 }
2360
2361 INLINE_SIM_FPU (int)
2362 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2363 {
2364 int is;
2365 sim_fpu_ne (&is, l, r);
2366 return is;
2367 }
2368
2369 INLINE_SIM_FPU (int)
2370 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2371 {
2372 int is;
2373 sim_fpu_ge (&is, l, r);
2374 return is;
2375 }
2376
2377 INLINE_SIM_FPU (int)
2378 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2379 {
2380 int is;
2381 sim_fpu_gt (&is, l, r);
2382 return is;
2383 }
2384
2385
2386 /* Compare operators */
2387
2388 INLINE_SIM_FPU (int)
2389 sim_fpu_lt (int *is,
2390 const sim_fpu *l,
2391 const sim_fpu *r)
2392 {
2393 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2394 {
2395 sim_fpu_map lval;
2396 sim_fpu_map rval;
2397 lval.i = pack_fpu (l, 1);
2398 rval.i = pack_fpu (r, 1);
2399 (*is) = (lval.d < rval.d);
2400 return 0;
2401 }
2402 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2403 {
2404 *is = 0;
2405 return sim_fpu_status_invalid_snan;
2406 }
2407 else
2408 {
2409 *is = 0;
2410 return sim_fpu_status_invalid_qnan;
2411 }
2412 }
2413
2414 INLINE_SIM_FPU (int)
2415 sim_fpu_le (int *is,
2416 const sim_fpu *l,
2417 const sim_fpu *r)
2418 {
2419 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2420 {
2421 sim_fpu_map lval;
2422 sim_fpu_map rval;
2423 lval.i = pack_fpu (l, 1);
2424 rval.i = pack_fpu (r, 1);
2425 *is = (lval.d <= rval.d);
2426 return 0;
2427 }
2428 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2429 {
2430 *is = 0;
2431 return sim_fpu_status_invalid_snan;
2432 }
2433 else
2434 {
2435 *is = 0;
2436 return sim_fpu_status_invalid_qnan;
2437 }
2438 }
2439
2440 INLINE_SIM_FPU (int)
2441 sim_fpu_eq (int *is,
2442 const sim_fpu *l,
2443 const sim_fpu *r)
2444 {
2445 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2446 {
2447 sim_fpu_map lval;
2448 sim_fpu_map rval;
2449 lval.i = pack_fpu (l, 1);
2450 rval.i = pack_fpu (r, 1);
2451 (*is) = (lval.d == rval.d);
2452 return 0;
2453 }
2454 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2455 {
2456 *is = 0;
2457 return sim_fpu_status_invalid_snan;
2458 }
2459 else
2460 {
2461 *is = 0;
2462 return sim_fpu_status_invalid_qnan;
2463 }
2464 }
2465
2466 INLINE_SIM_FPU (int)
2467 sim_fpu_ne (int *is,
2468 const sim_fpu *l,
2469 const sim_fpu *r)
2470 {
2471 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2472 {
2473 sim_fpu_map lval;
2474 sim_fpu_map rval;
2475 lval.i = pack_fpu (l, 1);
2476 rval.i = pack_fpu (r, 1);
2477 (*is) = (lval.d != rval.d);
2478 return 0;
2479 }
2480 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2481 {
2482 *is = 0;
2483 return sim_fpu_status_invalid_snan;
2484 }
2485 else
2486 {
2487 *is = 0;
2488 return sim_fpu_status_invalid_qnan;
2489 }
2490 }
2491
2492 INLINE_SIM_FPU (int)
2493 sim_fpu_ge (int *is,
2494 const sim_fpu *l,
2495 const sim_fpu *r)
2496 {
2497 return sim_fpu_le (is, r, l);
2498 }
2499
2500 INLINE_SIM_FPU (int)
2501 sim_fpu_gt (int *is,
2502 const sim_fpu *l,
2503 const sim_fpu *r)
2504 {
2505 return sim_fpu_lt (is, r, l);
2506 }
2507
2508
2509 /* A number of useful constants */
2510
2511 #if EXTERN_SIM_FPU_P
2512 const sim_fpu sim_fpu_zero = {
2513 sim_fpu_class_zero, 0, 0, 0
2514 };
2515 const sim_fpu sim_fpu_qnan = {
2516 sim_fpu_class_qnan, 0, 0, 0
2517 };
2518 const sim_fpu sim_fpu_one = {
2519 sim_fpu_class_number, 0, IMPLICIT_1, 0
2520 };
2521 const sim_fpu sim_fpu_two = {
2522 sim_fpu_class_number, 0, IMPLICIT_1, 1
2523 };
2524 const sim_fpu sim_fpu_max32 = {
2525 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2526 };
2527 const sim_fpu sim_fpu_max64 = {
2528 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2529 };
2530 #endif
2531
2532
2533 /* For debugging */
2534
2535 INLINE_SIM_FPU (void)
2536 sim_fpu_print_fpu (const sim_fpu *f,
2537 sim_fpu_print_func *print,
2538 void *arg)
2539 {
2540 sim_fpu_printn_fpu (f, print, -1, arg);
2541 }
2542
2543 INLINE_SIM_FPU (void)
2544 sim_fpu_printn_fpu (const sim_fpu *f,
2545 sim_fpu_print_func *print,
2546 int digits,
2547 void *arg)
2548 {
2549 print (arg, "%s", f->sign ? "-" : "+");
2550 switch (f->class)
2551 {
2552 case sim_fpu_class_qnan:
2553 print (arg, "0.");
2554 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2555 print (arg, "*QuietNaN");
2556 break;
2557 case sim_fpu_class_snan:
2558 print (arg, "0.");
2559 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2560 print (arg, "*SignalNaN");
2561 break;
2562 case sim_fpu_class_zero:
2563 print (arg, "0.0");
2564 break;
2565 case sim_fpu_class_infinity:
2566 print (arg, "INF");
2567 break;
2568 case sim_fpu_class_number:
2569 case sim_fpu_class_denorm:
2570 print (arg, "1.");
2571 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2572 print (arg, "*2^%+d", f->normal_exp);
2573 ASSERT (f->fraction >= IMPLICIT_1);
2574 ASSERT (f->fraction < IMPLICIT_2);
2575 }
2576 }
2577
2578
2579 INLINE_SIM_FPU (void)
2580 sim_fpu_print_status (int status,
2581 sim_fpu_print_func *print,
2582 void *arg)
2583 {
2584 int i = 1;
2585 const char *prefix = "";
2586 while (status >= i)
2587 {
2588 switch ((sim_fpu_status) (status & i))
2589 {
2590 case sim_fpu_status_denorm:
2591 print (arg, "%sD", prefix);
2592 break;
2593 case sim_fpu_status_invalid_snan:
2594 print (arg, "%sSNaN", prefix);
2595 break;
2596 case sim_fpu_status_invalid_qnan:
2597 print (arg, "%sQNaN", prefix);
2598 break;
2599 case sim_fpu_status_invalid_isi:
2600 print (arg, "%sISI", prefix);
2601 break;
2602 case sim_fpu_status_invalid_idi:
2603 print (arg, "%sIDI", prefix);
2604 break;
2605 case sim_fpu_status_invalid_zdz:
2606 print (arg, "%sZDZ", prefix);
2607 break;
2608 case sim_fpu_status_invalid_imz:
2609 print (arg, "%sIMZ", prefix);
2610 break;
2611 case sim_fpu_status_invalid_cvi:
2612 print (arg, "%sCVI", prefix);
2613 break;
2614 case sim_fpu_status_invalid_cmp:
2615 print (arg, "%sCMP", prefix);
2616 break;
2617 case sim_fpu_status_invalid_sqrt:
2618 print (arg, "%sSQRT", prefix);
2619 break;
2620 case sim_fpu_status_invalid_irx:
2621 print (arg, "%sIRX", prefix);
2622 break;
2623 case sim_fpu_status_inexact:
2624 print (arg, "%sX", prefix);
2625 break;
2626 case sim_fpu_status_overflow:
2627 print (arg, "%sO", prefix);
2628 break;
2629 case sim_fpu_status_underflow:
2630 print (arg, "%sU", prefix);
2631 break;
2632 case sim_fpu_status_invalid_div0:
2633 print (arg, "%s/", prefix);
2634 break;
2635 case sim_fpu_status_rounded:
2636 print (arg, "%sR", prefix);
2637 break;
2638 }
2639 i <<= 1;
2640 prefix = ",";
2641 }
2642 }
2643
2644 #endif