]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/common/sim-fpu.c
Switch the license of all files explicitly copyright the FSF
[thirdparty/binutils-gdb.git] / sim / common / sim-fpu.c
1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
4
5 /* Copyright 1994, 1997, 1998, 2003, 2007 Free Software Foundation, Inc.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* As a special exception, if you link this library with other files,
21 some of which are compiled with GCC, to produce an executable,
22 this library does not by itself cause the resulting executable
23 to be covered by the GNU General Public License.
24 This exception does not however invalidate any other reasons why
25 the executable file might be covered by the GNU General Public License. */
26
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28 mechanism for setting the rounding mode, or for generating or handling
29 exceptions.
30
31 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32 Wilson, all of Cygnus Support. */
33
34
35 #ifndef SIM_FPU_C
36 #define SIM_FPU_C
37
38 #include "sim-basics.h"
39 #include "sim-fpu.h"
40
41 #include "sim-io.h"
42 #include "sim-assert.h"
43
44
45 /* Debugging support.
46 If digits is -1, then print all digits. */
47
48 static void
49 print_bits (unsigned64 x,
50 int msbit,
51 int digits,
52 sim_fpu_print_func print,
53 void *arg)
54 {
55 unsigned64 bit = LSBIT64 (msbit);
56 int i = 4;
57 while (bit && digits)
58 {
59 if (i == 0)
60 print (arg, ",");
61
62 if ((x & bit))
63 print (arg, "1");
64 else
65 print (arg, "0");
66 bit >>= 1;
67
68 if (digits > 0) digits--;
69 i = (i + 1) % 4;
70 }
71 }
72
73
74
75 /* Quick and dirty conversion between a host double and host 64bit int */
76
77 typedef union {
78 double d;
79 unsigned64 i;
80 } sim_fpu_map;
81
82
83 /* A packed IEEE floating point number.
84
85 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
86 32 and 64 bit numbers. This number is interpreted as:
87
88 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
89 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
90
91 Denormalized (0 == BIASEDEXP && FRAC != 0):
92 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
93
94 Zero (0 == BIASEDEXP && FRAC == 0):
95 (sign ? "-" : "+") 0.0
96
97 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
98 (sign ? "-" : "+") "infinity"
99
100 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
101 SNaN.FRAC
102
103 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
104 QNaN.FRAC
105
106 */
107
108 #define NR_EXPBITS (is_double ? 11 : 8)
109 #define NR_FRACBITS (is_double ? 52 : 23)
110 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
111
112 #define EXPMAX32 (255)
113 #define EXMPAX64 (2047)
114 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
115
116 #define EXPBIAS32 (127)
117 #define EXPBIAS64 (1023)
118 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
119
120 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
121
122
123
124 /* An unpacked floating point number.
125
126 When unpacked, the fraction of both a 32 and 64 bit floating point
127 number is stored using the same format:
128
129 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
130 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
131
132 #define NR_PAD32 (30)
133 #define NR_PAD64 (0)
134 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
135 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
136
137 #define NR_GUARDS32 (7 + NR_PAD32)
138 #define NR_GUARDS64 (8 + NR_PAD64)
139 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
140 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
141
142 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
143 #define GUARDLSB LSBIT64 (NR_PAD)
144 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
145
146 #define NR_FRAC_GUARD (60)
147 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
148 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
149 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
150 #define NR_SPARE 2
151
152 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
153
154 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
155
156 #define NORMAL_EXPMAX32 (EXPBIAS32)
157 #define NORMAL_EXPMAX64 (EXPBIAS64)
158 #define NORMAL_EXPMAX (EXPBIAS)
159
160
161 /* Integer constants */
162
163 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
164 #define MAX_UINT32 LSMASK64 (31, 0)
165 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
166
167 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
168 #define MAX_UINT64 LSMASK64 (63, 0)
169 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
170
171 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
172 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
173 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
174 #define NR_INTBITS (is_64bit ? 64 : 32)
175
176 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
177 STATIC_INLINE_SIM_FPU (unsigned64)
178 pack_fpu (const sim_fpu *src,
179 int is_double)
180 {
181 int sign;
182 unsigned64 exp;
183 unsigned64 fraction;
184 unsigned64 packed;
185
186 switch (src->class)
187 {
188 /* create a NaN */
189 case sim_fpu_class_qnan:
190 sign = src->sign;
191 exp = EXPMAX;
192 /* force fraction to correct class */
193 fraction = src->fraction;
194 fraction >>= NR_GUARDS;
195 #ifdef SIM_QUIET_NAN_NEGATED
196 fraction |= QUIET_NAN - 1;
197 #else
198 fraction |= QUIET_NAN;
199 #endif
200 break;
201 case sim_fpu_class_snan:
202 sign = src->sign;
203 exp = EXPMAX;
204 /* force fraction to correct class */
205 fraction = src->fraction;
206 fraction >>= NR_GUARDS;
207 #ifdef SIM_QUIET_NAN_NEGATED
208 fraction |= QUIET_NAN;
209 #else
210 fraction &= ~QUIET_NAN;
211 #endif
212 break;
213 case sim_fpu_class_infinity:
214 sign = src->sign;
215 exp = EXPMAX;
216 fraction = 0;
217 break;
218 case sim_fpu_class_zero:
219 sign = src->sign;
220 exp = 0;
221 fraction = 0;
222 break;
223 case sim_fpu_class_number:
224 case sim_fpu_class_denorm:
225 ASSERT (src->fraction >= IMPLICIT_1);
226 ASSERT (src->fraction < IMPLICIT_2);
227 if (src->normal_exp < NORMAL_EXPMIN)
228 {
229 /* This number's exponent is too low to fit into the bits
230 available in the number We'll denormalize the number by
231 storing zero in the exponent and shift the fraction to
232 the right to make up for it. */
233 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
234 if (nr_shift > NR_FRACBITS)
235 {
236 /* underflow, just make the number zero */
237 sign = src->sign;
238 exp = 0;
239 fraction = 0;
240 }
241 else
242 {
243 sign = src->sign;
244 exp = 0;
245 /* Shift by the value */
246 fraction = src->fraction;
247 fraction >>= NR_GUARDS;
248 fraction >>= nr_shift;
249 }
250 }
251 else if (src->normal_exp > NORMAL_EXPMAX)
252 {
253 /* Infinity */
254 sign = src->sign;
255 exp = EXPMAX;
256 fraction = 0;
257 }
258 else
259 {
260 exp = (src->normal_exp + EXPBIAS);
261 sign = src->sign;
262 fraction = src->fraction;
263 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
264 or some such */
265 /* Round to nearest: If the guard bits are the all zero, but
266 the first, then we're half way between two numbers,
267 choose the one which makes the lsb of the answer 0. */
268 if ((fraction & GUARDMASK) == GUARDMSB)
269 {
270 if ((fraction & (GUARDMSB << 1)))
271 fraction += (GUARDMSB << 1);
272 }
273 else
274 {
275 /* Add a one to the guards to force round to nearest */
276 fraction += GUARDROUND;
277 }
278 if ((fraction & IMPLICIT_2)) /* rounding resulted in carry */
279 {
280 exp += 1;
281 fraction >>= 1;
282 }
283 fraction >>= NR_GUARDS;
284 /* When exp == EXPMAX (overflow from carry) fraction must
285 have been made zero */
286 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
287 }
288 break;
289 default:
290 abort ();
291 }
292
293 packed = ((sign ? SIGNBIT : 0)
294 | (exp << NR_FRACBITS)
295 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
296
297 /* trace operation */
298 #if 0
299 if (is_double)
300 {
301 }
302 else
303 {
304 printf ("pack_fpu: ");
305 printf ("-> %c%0lX.%06lX\n",
306 LSMASKED32 (packed, 31, 31) ? '8' : '0',
307 (long) LSEXTRACTED32 (packed, 30, 23),
308 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
309 }
310 #endif
311
312 return packed;
313 }
314
315
316 /* Unpack a 32/64 bit integer into a sim_fpu structure */
317 STATIC_INLINE_SIM_FPU (void)
318 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
319 {
320 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
321 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
322 int sign = (packed & SIGNBIT) != 0;
323
324 if (exp == 0)
325 {
326 /* Hmm. Looks like 0 */
327 if (fraction == 0)
328 {
329 /* tastes like zero */
330 dst->class = sim_fpu_class_zero;
331 dst->sign = sign;
332 dst->normal_exp = 0;
333 }
334 else
335 {
336 /* Zero exponent with non zero fraction - it's denormalized,
337 so there isn't a leading implicit one - we'll shift it so
338 it gets one. */
339 dst->normal_exp = exp - EXPBIAS + 1;
340 dst->class = sim_fpu_class_denorm;
341 dst->sign = sign;
342 fraction <<= NR_GUARDS;
343 while (fraction < IMPLICIT_1)
344 {
345 fraction <<= 1;
346 dst->normal_exp--;
347 }
348 dst->fraction = fraction;
349 }
350 }
351 else if (exp == EXPMAX)
352 {
353 /* Huge exponent*/
354 if (fraction == 0)
355 {
356 /* Attached to a zero fraction - means infinity */
357 dst->class = sim_fpu_class_infinity;
358 dst->sign = sign;
359 /* dst->normal_exp = EXPBIAS; */
360 /* dst->fraction = 0; */
361 }
362 else
363 {
364 int qnan;
365
366 /* Non zero fraction, means NaN */
367 dst->sign = sign;
368 dst->fraction = (fraction << NR_GUARDS);
369 #ifdef SIM_QUIET_NAN_NEGATED
370 qnan = (fraction & QUIET_NAN) == 0;
371 #else
372 qnan = fraction >= QUIET_NAN;
373 #endif
374 if (qnan)
375 dst->class = sim_fpu_class_qnan;
376 else
377 dst->class = sim_fpu_class_snan;
378 }
379 }
380 else
381 {
382 /* Nothing strange about this number */
383 dst->class = sim_fpu_class_number;
384 dst->sign = sign;
385 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
386 dst->normal_exp = exp - EXPBIAS;
387 }
388
389 /* trace operation */
390 #if 0
391 if (is_double)
392 {
393 }
394 else
395 {
396 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
397 LSMASKED32 (packed, 31, 31) ? '8' : '0',
398 (long) LSEXTRACTED32 (packed, 30, 23),
399 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
400 }
401 #endif
402
403 /* sanity checks */
404 {
405 sim_fpu_map val;
406 val.i = pack_fpu (dst, 1);
407 if (is_double)
408 {
409 ASSERT (val.i == packed);
410 }
411 else
412 {
413 unsigned32 val = pack_fpu (dst, 0);
414 unsigned32 org = packed;
415 ASSERT (val == org);
416 }
417 }
418 }
419
420
421 /* Convert a floating point into an integer */
422 STATIC_INLINE_SIM_FPU (int)
423 fpu2i (signed64 *i,
424 const sim_fpu *s,
425 int is_64bit,
426 sim_fpu_round round)
427 {
428 unsigned64 tmp;
429 int shift;
430 int status = 0;
431 if (sim_fpu_is_zero (s))
432 {
433 *i = 0;
434 return 0;
435 }
436 if (sim_fpu_is_snan (s))
437 {
438 *i = MIN_INT; /* FIXME */
439 return sim_fpu_status_invalid_cvi;
440 }
441 if (sim_fpu_is_qnan (s))
442 {
443 *i = MIN_INT; /* FIXME */
444 return sim_fpu_status_invalid_cvi;
445 }
446 /* map infinity onto MAX_INT... */
447 if (sim_fpu_is_infinity (s))
448 {
449 *i = s->sign ? MIN_INT : MAX_INT;
450 return sim_fpu_status_invalid_cvi;
451 }
452 /* it is a number, but a small one */
453 if (s->normal_exp < 0)
454 {
455 *i = 0;
456 return sim_fpu_status_inexact;
457 }
458 /* Is the floating point MIN_INT or just close? */
459 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
460 {
461 *i = MIN_INT;
462 ASSERT (s->fraction >= IMPLICIT_1);
463 if (s->fraction == IMPLICIT_1)
464 return 0; /* exact */
465 if (is_64bit) /* can't round */
466 return sim_fpu_status_invalid_cvi; /* must be overflow */
467 /* For a 32bit with MAX_INT, rounding is possible */
468 switch (round)
469 {
470 case sim_fpu_round_default:
471 abort ();
472 case sim_fpu_round_zero:
473 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
474 return sim_fpu_status_invalid_cvi;
475 else
476 return sim_fpu_status_inexact;
477 break;
478 case sim_fpu_round_near:
479 {
480 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
481 return sim_fpu_status_invalid_cvi;
482 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
483 return sim_fpu_status_invalid_cvi;
484 else
485 return sim_fpu_status_inexact;
486 }
487 case sim_fpu_round_up:
488 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
489 return sim_fpu_status_inexact;
490 else
491 return sim_fpu_status_invalid_cvi;
492 case sim_fpu_round_down:
493 return sim_fpu_status_invalid_cvi;
494 }
495 }
496 /* Would right shifting result in the FRAC being shifted into
497 (through) the integer's sign bit? */
498 if (s->normal_exp > (NR_INTBITS - 2))
499 {
500 *i = s->sign ? MIN_INT : MAX_INT;
501 return sim_fpu_status_invalid_cvi;
502 }
503 /* normal number shift it into place */
504 tmp = s->fraction;
505 shift = (s->normal_exp - (NR_FRAC_GUARD));
506 if (shift > 0)
507 {
508 tmp <<= shift;
509 }
510 else
511 {
512 shift = -shift;
513 if (tmp & ((SIGNED64 (1) << shift) - 1))
514 status |= sim_fpu_status_inexact;
515 tmp >>= shift;
516 }
517 *i = s->sign ? (-tmp) : (tmp);
518 return status;
519 }
520
521 /* convert an integer into a floating point */
522 STATIC_INLINE_SIM_FPU (int)
523 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
524 {
525 int status = 0;
526 if (i == 0)
527 {
528 f->class = sim_fpu_class_zero;
529 f->sign = 0;
530 f->normal_exp = 0;
531 }
532 else
533 {
534 f->class = sim_fpu_class_number;
535 f->sign = (i < 0);
536 f->normal_exp = NR_FRAC_GUARD;
537
538 if (f->sign)
539 {
540 /* Special case for minint, since there is no corresponding
541 +ve integer representation for it */
542 if (i == MIN_INT)
543 {
544 f->fraction = IMPLICIT_1;
545 f->normal_exp = NR_INTBITS - 1;
546 }
547 else
548 f->fraction = (-i);
549 }
550 else
551 f->fraction = i;
552
553 if (f->fraction >= IMPLICIT_2)
554 {
555 do
556 {
557 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
558 f->normal_exp += 1;
559 }
560 while (f->fraction >= IMPLICIT_2);
561 }
562 else if (f->fraction < IMPLICIT_1)
563 {
564 do
565 {
566 f->fraction <<= 1;
567 f->normal_exp -= 1;
568 }
569 while (f->fraction < IMPLICIT_1);
570 }
571 }
572
573 /* trace operation */
574 #if 0
575 {
576 printf ("i2fpu: 0x%08lX ->\n", (long) i);
577 }
578 #endif
579
580 /* sanity check */
581 {
582 signed64 val;
583 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
584 if (i >= MIN_INT32 && i <= MAX_INT32)
585 {
586 ASSERT (val == i);
587 }
588 }
589
590 return status;
591 }
592
593
594 /* Convert a floating point into an integer */
595 STATIC_INLINE_SIM_FPU (int)
596 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
597 {
598 const int is_double = 1;
599 unsigned64 tmp;
600 int shift;
601 if (sim_fpu_is_zero (s))
602 {
603 *u = 0;
604 return 0;
605 }
606 if (sim_fpu_is_nan (s))
607 {
608 *u = 0;
609 return 0;
610 }
611 /* it is a negative number */
612 if (s->sign)
613 {
614 *u = 0;
615 return 0;
616 }
617 /* get reasonable MAX_USI_INT... */
618 if (sim_fpu_is_infinity (s))
619 {
620 *u = MAX_UINT;
621 return 0;
622 }
623 /* it is a number, but a small one */
624 if (s->normal_exp < 0)
625 {
626 *u = 0;
627 return 0;
628 }
629 /* overflow */
630 if (s->normal_exp > (NR_INTBITS - 1))
631 {
632 *u = MAX_UINT;
633 return 0;
634 }
635 /* normal number */
636 tmp = (s->fraction & ~PADMASK);
637 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
638 if (shift > 0)
639 {
640 tmp <<= shift;
641 }
642 else
643 {
644 shift = -shift;
645 tmp >>= shift;
646 }
647 *u = tmp;
648 return 0;
649 }
650
651 /* Convert an unsigned integer into a floating point */
652 STATIC_INLINE_SIM_FPU (int)
653 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
654 {
655 if (u == 0)
656 {
657 f->class = sim_fpu_class_zero;
658 f->sign = 0;
659 f->normal_exp = 0;
660 }
661 else
662 {
663 f->class = sim_fpu_class_number;
664 f->sign = 0;
665 f->normal_exp = NR_FRAC_GUARD;
666 f->fraction = u;
667
668 while (f->fraction < IMPLICIT_1)
669 {
670 f->fraction <<= 1;
671 f->normal_exp -= 1;
672 }
673 }
674 return 0;
675 }
676
677
678 /* register <-> sim_fpu */
679
680 INLINE_SIM_FPU (void)
681 sim_fpu_32to (sim_fpu *f, unsigned32 s)
682 {
683 unpack_fpu (f, s, 0);
684 }
685
686
687 INLINE_SIM_FPU (void)
688 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
689 {
690 unsigned64 s = h;
691 s = (s << 32) | l;
692 unpack_fpu (f, s, 1);
693 }
694
695
696 INLINE_SIM_FPU (void)
697 sim_fpu_64to (sim_fpu *f, unsigned64 s)
698 {
699 unpack_fpu (f, s, 1);
700 }
701
702
703 INLINE_SIM_FPU (void)
704 sim_fpu_to32 (unsigned32 *s,
705 const sim_fpu *f)
706 {
707 *s = pack_fpu (f, 0);
708 }
709
710
711 INLINE_SIM_FPU (void)
712 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
713 const sim_fpu *f)
714 {
715 unsigned64 s = pack_fpu (f, 1);
716 *l = s;
717 *h = (s >> 32);
718 }
719
720
721 INLINE_SIM_FPU (void)
722 sim_fpu_to64 (unsigned64 *u,
723 const sim_fpu *f)
724 {
725 *u = pack_fpu (f, 1);
726 }
727
728
729 INLINE_SIM_FPU (void)
730 sim_fpu_fractionto (sim_fpu *f,
731 int sign,
732 int normal_exp,
733 unsigned64 fraction,
734 int precision)
735 {
736 int shift = (NR_FRAC_GUARD - precision);
737 f->class = sim_fpu_class_number;
738 f->sign = sign;
739 f->normal_exp = normal_exp;
740 /* shift the fraction to where sim-fpu expects it */
741 if (shift >= 0)
742 f->fraction = (fraction << shift);
743 else
744 f->fraction = (fraction >> -shift);
745 f->fraction |= IMPLICIT_1;
746 }
747
748
749 INLINE_SIM_FPU (unsigned64)
750 sim_fpu_tofraction (const sim_fpu *d,
751 int precision)
752 {
753 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
754 int shift = (NR_FRAC_GUARD - precision);
755 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
756 if (shift >= 0)
757 return fraction >> shift;
758 else
759 return fraction << -shift;
760 }
761
762
763 /* Rounding */
764
765 STATIC_INLINE_SIM_FPU (int)
766 do_normal_overflow (sim_fpu *f,
767 int is_double,
768 sim_fpu_round round)
769 {
770 switch (round)
771 {
772 case sim_fpu_round_default:
773 return 0;
774 case sim_fpu_round_near:
775 f->class = sim_fpu_class_infinity;
776 break;
777 case sim_fpu_round_up:
778 if (!f->sign)
779 f->class = sim_fpu_class_infinity;
780 break;
781 case sim_fpu_round_down:
782 if (f->sign)
783 f->class = sim_fpu_class_infinity;
784 break;
785 case sim_fpu_round_zero:
786 break;
787 }
788 f->normal_exp = NORMAL_EXPMAX;
789 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
790 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
791 }
792
793 STATIC_INLINE_SIM_FPU (int)
794 do_normal_underflow (sim_fpu *f,
795 int is_double,
796 sim_fpu_round round)
797 {
798 switch (round)
799 {
800 case sim_fpu_round_default:
801 return 0;
802 case sim_fpu_round_near:
803 f->class = sim_fpu_class_zero;
804 break;
805 case sim_fpu_round_up:
806 if (f->sign)
807 f->class = sim_fpu_class_zero;
808 break;
809 case sim_fpu_round_down:
810 if (!f->sign)
811 f->class = sim_fpu_class_zero;
812 break;
813 case sim_fpu_round_zero:
814 f->class = sim_fpu_class_zero;
815 break;
816 }
817 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
818 f->fraction = IMPLICIT_1;
819 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
820 }
821
822
823
824 /* Round a number using NR_GUARDS.
825 Will return the rounded number or F->FRACTION == 0 when underflow */
826
827 STATIC_INLINE_SIM_FPU (int)
828 do_normal_round (sim_fpu *f,
829 int nr_guards,
830 sim_fpu_round round)
831 {
832 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
833 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
834 unsigned64 fraclsb = guardmsb << 1;
835 if ((f->fraction & guardmask))
836 {
837 int status = sim_fpu_status_inexact;
838 switch (round)
839 {
840 case sim_fpu_round_default:
841 return 0;
842 case sim_fpu_round_near:
843 if ((f->fraction & guardmsb))
844 {
845 if ((f->fraction & fraclsb))
846 {
847 status |= sim_fpu_status_rounded;
848 }
849 else if ((f->fraction & (guardmask >> 1)))
850 {
851 status |= sim_fpu_status_rounded;
852 }
853 }
854 break;
855 case sim_fpu_round_up:
856 if (!f->sign)
857 status |= sim_fpu_status_rounded;
858 break;
859 case sim_fpu_round_down:
860 if (f->sign)
861 status |= sim_fpu_status_rounded;
862 break;
863 case sim_fpu_round_zero:
864 break;
865 }
866 f->fraction &= ~guardmask;
867 /* round if needed, handle resulting overflow */
868 if ((status & sim_fpu_status_rounded))
869 {
870 f->fraction += fraclsb;
871 if ((f->fraction & IMPLICIT_2))
872 {
873 f->fraction >>= 1;
874 f->normal_exp += 1;
875 }
876 }
877 return status;
878 }
879 else
880 return 0;
881 }
882
883
884 STATIC_INLINE_SIM_FPU (int)
885 do_round (sim_fpu *f,
886 int is_double,
887 sim_fpu_round round,
888 sim_fpu_denorm denorm)
889 {
890 switch (f->class)
891 {
892 case sim_fpu_class_qnan:
893 case sim_fpu_class_zero:
894 case sim_fpu_class_infinity:
895 return 0;
896 break;
897 case sim_fpu_class_snan:
898 /* Quieten a SignalingNaN */
899 f->class = sim_fpu_class_qnan;
900 return sim_fpu_status_invalid_snan;
901 break;
902 case sim_fpu_class_number:
903 case sim_fpu_class_denorm:
904 {
905 int status;
906 ASSERT (f->fraction < IMPLICIT_2);
907 ASSERT (f->fraction >= IMPLICIT_1);
908 if (f->normal_exp < NORMAL_EXPMIN)
909 {
910 /* This number's exponent is too low to fit into the bits
911 available in the number. Round off any bits that will be
912 discarded as a result of denormalization. Edge case is
913 the implicit bit shifted to GUARD0 and then rounded
914 up. */
915 int shift = NORMAL_EXPMIN - f->normal_exp;
916 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
917 && !(denorm & sim_fpu_denorm_zero))
918 {
919 status = do_normal_round (f, shift + NR_GUARDS, round);
920 if (f->fraction == 0) /* rounding underflowed */
921 {
922 status |= do_normal_underflow (f, is_double, round);
923 }
924 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
925 {
926 status |= sim_fpu_status_denorm;
927 /* Any loss of precision when denormalizing is
928 underflow. Some processors check for underflow
929 before rounding, some after! */
930 if (status & sim_fpu_status_inexact)
931 status |= sim_fpu_status_underflow;
932 /* Flag that resultant value has been denormalized */
933 f->class = sim_fpu_class_denorm;
934 }
935 else if ((denorm & sim_fpu_denorm_underflow_inexact))
936 {
937 if ((status & sim_fpu_status_inexact))
938 status |= sim_fpu_status_underflow;
939 }
940 }
941 else
942 {
943 status = do_normal_underflow (f, is_double, round);
944 }
945 }
946 else if (f->normal_exp > NORMAL_EXPMAX)
947 {
948 /* Infinity */
949 status = do_normal_overflow (f, is_double, round);
950 }
951 else
952 {
953 status = do_normal_round (f, NR_GUARDS, round);
954 if (f->fraction == 0)
955 /* f->class = sim_fpu_class_zero; */
956 status |= do_normal_underflow (f, is_double, round);
957 else if (f->normal_exp > NORMAL_EXPMAX)
958 /* oops! rounding caused overflow */
959 status |= do_normal_overflow (f, is_double, round);
960 }
961 ASSERT ((f->class == sim_fpu_class_number
962 || f->class == sim_fpu_class_denorm)
963 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
964 return status;
965 }
966 }
967 return 0;
968 }
969
970 INLINE_SIM_FPU (int)
971 sim_fpu_round_32 (sim_fpu *f,
972 sim_fpu_round round,
973 sim_fpu_denorm denorm)
974 {
975 return do_round (f, 0, round, denorm);
976 }
977
978 INLINE_SIM_FPU (int)
979 sim_fpu_round_64 (sim_fpu *f,
980 sim_fpu_round round,
981 sim_fpu_denorm denorm)
982 {
983 return do_round (f, 1, round, denorm);
984 }
985
986
987
988 /* Arithmetic ops */
989
990 INLINE_SIM_FPU (int)
991 sim_fpu_add (sim_fpu *f,
992 const sim_fpu *l,
993 const sim_fpu *r)
994 {
995 if (sim_fpu_is_snan (l))
996 {
997 *f = *l;
998 f->class = sim_fpu_class_qnan;
999 return sim_fpu_status_invalid_snan;
1000 }
1001 if (sim_fpu_is_snan (r))
1002 {
1003 *f = *r;
1004 f->class = sim_fpu_class_qnan;
1005 return sim_fpu_status_invalid_snan;
1006 }
1007 if (sim_fpu_is_qnan (l))
1008 {
1009 *f = *l;
1010 return 0;
1011 }
1012 if (sim_fpu_is_qnan (r))
1013 {
1014 *f = *r;
1015 return 0;
1016 }
1017 if (sim_fpu_is_infinity (l))
1018 {
1019 if (sim_fpu_is_infinity (r)
1020 && l->sign != r->sign)
1021 {
1022 *f = sim_fpu_qnan;
1023 return sim_fpu_status_invalid_isi;
1024 }
1025 *f = *l;
1026 return 0;
1027 }
1028 if (sim_fpu_is_infinity (r))
1029 {
1030 *f = *r;
1031 return 0;
1032 }
1033 if (sim_fpu_is_zero (l))
1034 {
1035 if (sim_fpu_is_zero (r))
1036 {
1037 *f = sim_fpu_zero;
1038 f->sign = l->sign & r->sign;
1039 }
1040 else
1041 *f = *r;
1042 return 0;
1043 }
1044 if (sim_fpu_is_zero (r))
1045 {
1046 *f = *l;
1047 return 0;
1048 }
1049 {
1050 int status = 0;
1051 int shift = l->normal_exp - r->normal_exp;
1052 unsigned64 lfraction;
1053 unsigned64 rfraction;
1054 /* use exp of larger */
1055 if (shift >= NR_FRAC_GUARD)
1056 {
1057 /* left has much bigger magnitute */
1058 *f = *l;
1059 return sim_fpu_status_inexact;
1060 }
1061 if (shift <= - NR_FRAC_GUARD)
1062 {
1063 /* right has much bigger magnitute */
1064 *f = *r;
1065 return sim_fpu_status_inexact;
1066 }
1067 lfraction = l->fraction;
1068 rfraction = r->fraction;
1069 if (shift > 0)
1070 {
1071 f->normal_exp = l->normal_exp;
1072 if (rfraction & LSMASK64 (shift - 1, 0))
1073 {
1074 status |= sim_fpu_status_inexact;
1075 rfraction |= LSBIT64 (shift); /* stick LSBit */
1076 }
1077 rfraction >>= shift;
1078 }
1079 else if (shift < 0)
1080 {
1081 f->normal_exp = r->normal_exp;
1082 if (lfraction & LSMASK64 (- shift - 1, 0))
1083 {
1084 status |= sim_fpu_status_inexact;
1085 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1086 }
1087 lfraction >>= -shift;
1088 }
1089 else
1090 {
1091 f->normal_exp = r->normal_exp;
1092 }
1093
1094 /* perform the addition */
1095 if (l->sign)
1096 lfraction = - lfraction;
1097 if (r->sign)
1098 rfraction = - rfraction;
1099 f->fraction = lfraction + rfraction;
1100
1101 /* zero? */
1102 if (f->fraction == 0)
1103 {
1104 *f = sim_fpu_zero;
1105 return 0;
1106 }
1107
1108 /* sign? */
1109 f->class = sim_fpu_class_number;
1110 if ((signed64) f->fraction >= 0)
1111 f->sign = 0;
1112 else
1113 {
1114 f->sign = 1;
1115 f->fraction = - f->fraction;
1116 }
1117
1118 /* normalize it */
1119 if ((f->fraction & IMPLICIT_2))
1120 {
1121 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1122 f->normal_exp ++;
1123 }
1124 else if (f->fraction < IMPLICIT_1)
1125 {
1126 do
1127 {
1128 f->fraction <<= 1;
1129 f->normal_exp --;
1130 }
1131 while (f->fraction < IMPLICIT_1);
1132 }
1133 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1134 return status;
1135 }
1136 }
1137
1138
1139 INLINE_SIM_FPU (int)
1140 sim_fpu_sub (sim_fpu *f,
1141 const sim_fpu *l,
1142 const sim_fpu *r)
1143 {
1144 if (sim_fpu_is_snan (l))
1145 {
1146 *f = *l;
1147 f->class = sim_fpu_class_qnan;
1148 return sim_fpu_status_invalid_snan;
1149 }
1150 if (sim_fpu_is_snan (r))
1151 {
1152 *f = *r;
1153 f->class = sim_fpu_class_qnan;
1154 return sim_fpu_status_invalid_snan;
1155 }
1156 if (sim_fpu_is_qnan (l))
1157 {
1158 *f = *l;
1159 return 0;
1160 }
1161 if (sim_fpu_is_qnan (r))
1162 {
1163 *f = *r;
1164 return 0;
1165 }
1166 if (sim_fpu_is_infinity (l))
1167 {
1168 if (sim_fpu_is_infinity (r)
1169 && l->sign == r->sign)
1170 {
1171 *f = sim_fpu_qnan;
1172 return sim_fpu_status_invalid_isi;
1173 }
1174 *f = *l;
1175 return 0;
1176 }
1177 if (sim_fpu_is_infinity (r))
1178 {
1179 *f = *r;
1180 f->sign = !r->sign;
1181 return 0;
1182 }
1183 if (sim_fpu_is_zero (l))
1184 {
1185 if (sim_fpu_is_zero (r))
1186 {
1187 *f = sim_fpu_zero;
1188 f->sign = l->sign & !r->sign;
1189 }
1190 else
1191 {
1192 *f = *r;
1193 f->sign = !r->sign;
1194 }
1195 return 0;
1196 }
1197 if (sim_fpu_is_zero (r))
1198 {
1199 *f = *l;
1200 return 0;
1201 }
1202 {
1203 int status = 0;
1204 int shift = l->normal_exp - r->normal_exp;
1205 unsigned64 lfraction;
1206 unsigned64 rfraction;
1207 /* use exp of larger */
1208 if (shift >= NR_FRAC_GUARD)
1209 {
1210 /* left has much bigger magnitute */
1211 *f = *l;
1212 return sim_fpu_status_inexact;
1213 }
1214 if (shift <= - NR_FRAC_GUARD)
1215 {
1216 /* right has much bigger magnitute */
1217 *f = *r;
1218 f->sign = !r->sign;
1219 return sim_fpu_status_inexact;
1220 }
1221 lfraction = l->fraction;
1222 rfraction = r->fraction;
1223 if (shift > 0)
1224 {
1225 f->normal_exp = l->normal_exp;
1226 if (rfraction & LSMASK64 (shift - 1, 0))
1227 {
1228 status |= sim_fpu_status_inexact;
1229 rfraction |= LSBIT64 (shift); /* stick LSBit */
1230 }
1231 rfraction >>= shift;
1232 }
1233 else if (shift < 0)
1234 {
1235 f->normal_exp = r->normal_exp;
1236 if (lfraction & LSMASK64 (- shift - 1, 0))
1237 {
1238 status |= sim_fpu_status_inexact;
1239 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1240 }
1241 lfraction >>= -shift;
1242 }
1243 else
1244 {
1245 f->normal_exp = r->normal_exp;
1246 }
1247
1248 /* perform the subtraction */
1249 if (l->sign)
1250 lfraction = - lfraction;
1251 if (!r->sign)
1252 rfraction = - rfraction;
1253 f->fraction = lfraction + rfraction;
1254
1255 /* zero? */
1256 if (f->fraction == 0)
1257 {
1258 *f = sim_fpu_zero;
1259 return 0;
1260 }
1261
1262 /* sign? */
1263 f->class = sim_fpu_class_number;
1264 if ((signed64) f->fraction >= 0)
1265 f->sign = 0;
1266 else
1267 {
1268 f->sign = 1;
1269 f->fraction = - f->fraction;
1270 }
1271
1272 /* normalize it */
1273 if ((f->fraction & IMPLICIT_2))
1274 {
1275 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1276 f->normal_exp ++;
1277 }
1278 else if (f->fraction < IMPLICIT_1)
1279 {
1280 do
1281 {
1282 f->fraction <<= 1;
1283 f->normal_exp --;
1284 }
1285 while (f->fraction < IMPLICIT_1);
1286 }
1287 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1288 return status;
1289 }
1290 }
1291
1292
1293 INLINE_SIM_FPU (int)
1294 sim_fpu_mul (sim_fpu *f,
1295 const sim_fpu *l,
1296 const sim_fpu *r)
1297 {
1298 if (sim_fpu_is_snan (l))
1299 {
1300 *f = *l;
1301 f->class = sim_fpu_class_qnan;
1302 return sim_fpu_status_invalid_snan;
1303 }
1304 if (sim_fpu_is_snan (r))
1305 {
1306 *f = *r;
1307 f->class = sim_fpu_class_qnan;
1308 return sim_fpu_status_invalid_snan;
1309 }
1310 if (sim_fpu_is_qnan (l))
1311 {
1312 *f = *l;
1313 return 0;
1314 }
1315 if (sim_fpu_is_qnan (r))
1316 {
1317 *f = *r;
1318 return 0;
1319 }
1320 if (sim_fpu_is_infinity (l))
1321 {
1322 if (sim_fpu_is_zero (r))
1323 {
1324 *f = sim_fpu_qnan;
1325 return sim_fpu_status_invalid_imz;
1326 }
1327 *f = *l;
1328 f->sign = l->sign ^ r->sign;
1329 return 0;
1330 }
1331 if (sim_fpu_is_infinity (r))
1332 {
1333 if (sim_fpu_is_zero (l))
1334 {
1335 *f = sim_fpu_qnan;
1336 return sim_fpu_status_invalid_imz;
1337 }
1338 *f = *r;
1339 f->sign = l->sign ^ r->sign;
1340 return 0;
1341 }
1342 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1343 {
1344 *f = sim_fpu_zero;
1345 f->sign = l->sign ^ r->sign;
1346 return 0;
1347 }
1348 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1349 128 bit number */
1350 {
1351 unsigned64 low;
1352 unsigned64 high;
1353 unsigned64 nl = l->fraction & 0xffffffff;
1354 unsigned64 nh = l->fraction >> 32;
1355 unsigned64 ml = r->fraction & 0xffffffff;
1356 unsigned64 mh = r->fraction >>32;
1357 unsigned64 pp_ll = ml * nl;
1358 unsigned64 pp_hl = mh * nl;
1359 unsigned64 pp_lh = ml * nh;
1360 unsigned64 pp_hh = mh * nh;
1361 unsigned64 res2 = 0;
1362 unsigned64 res0 = 0;
1363 unsigned64 ps_hh__ = pp_hl + pp_lh;
1364 if (ps_hh__ < pp_hl)
1365 res2 += UNSIGNED64 (0x100000000);
1366 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1367 res0 = pp_ll + pp_hl;
1368 if (res0 < pp_ll)
1369 res2++;
1370 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1371 high = res2;
1372 low = res0;
1373
1374 f->normal_exp = l->normal_exp + r->normal_exp;
1375 f->sign = l->sign ^ r->sign;
1376 f->class = sim_fpu_class_number;
1377
1378 /* Input is bounded by [1,2) ; [2^60,2^61)
1379 Output is bounded by [1,4) ; [2^120,2^122) */
1380
1381 /* Adjust the exponent according to where the decimal point ended
1382 up in the high 64 bit word. In the source the decimal point
1383 was at NR_FRAC_GUARD. */
1384 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1385
1386 /* The high word is bounded according to the above. Consequently
1387 it has never overflowed into IMPLICIT_2. */
1388 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1389 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1390 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1391
1392 /* normalize */
1393 do
1394 {
1395 f->normal_exp--;
1396 high <<= 1;
1397 if (low & LSBIT64 (63))
1398 high |= 1;
1399 low <<= 1;
1400 }
1401 while (high < IMPLICIT_1);
1402
1403 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1404 if (low != 0)
1405 {
1406 f->fraction = (high | 1); /* sticky */
1407 return sim_fpu_status_inexact;
1408 }
1409 else
1410 {
1411 f->fraction = high;
1412 return 0;
1413 }
1414 return 0;
1415 }
1416 }
1417
1418 INLINE_SIM_FPU (int)
1419 sim_fpu_div (sim_fpu *f,
1420 const sim_fpu *l,
1421 const sim_fpu *r)
1422 {
1423 if (sim_fpu_is_snan (l))
1424 {
1425 *f = *l;
1426 f->class = sim_fpu_class_qnan;
1427 return sim_fpu_status_invalid_snan;
1428 }
1429 if (sim_fpu_is_snan (r))
1430 {
1431 *f = *r;
1432 f->class = sim_fpu_class_qnan;
1433 return sim_fpu_status_invalid_snan;
1434 }
1435 if (sim_fpu_is_qnan (l))
1436 {
1437 *f = *l;
1438 f->class = sim_fpu_class_qnan;
1439 return 0;
1440 }
1441 if (sim_fpu_is_qnan (r))
1442 {
1443 *f = *r;
1444 f->class = sim_fpu_class_qnan;
1445 return 0;
1446 }
1447 if (sim_fpu_is_infinity (l))
1448 {
1449 if (sim_fpu_is_infinity (r))
1450 {
1451 *f = sim_fpu_qnan;
1452 return sim_fpu_status_invalid_idi;
1453 }
1454 else
1455 {
1456 *f = *l;
1457 f->sign = l->sign ^ r->sign;
1458 return 0;
1459 }
1460 }
1461 if (sim_fpu_is_zero (l))
1462 {
1463 if (sim_fpu_is_zero (r))
1464 {
1465 *f = sim_fpu_qnan;
1466 return sim_fpu_status_invalid_zdz;
1467 }
1468 else
1469 {
1470 *f = *l;
1471 f->sign = l->sign ^ r->sign;
1472 return 0;
1473 }
1474 }
1475 if (sim_fpu_is_infinity (r))
1476 {
1477 *f = sim_fpu_zero;
1478 f->sign = l->sign ^ r->sign;
1479 return 0;
1480 }
1481 if (sim_fpu_is_zero (r))
1482 {
1483 f->class = sim_fpu_class_infinity;
1484 f->sign = l->sign ^ r->sign;
1485 return sim_fpu_status_invalid_div0;
1486 }
1487
1488 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1489 128 bit number */
1490 {
1491 /* quotient = ( ( numerator / denominator)
1492 x 2^(numerator exponent - denominator exponent)
1493 */
1494 unsigned64 numerator;
1495 unsigned64 denominator;
1496 unsigned64 quotient;
1497 unsigned64 bit;
1498
1499 f->class = sim_fpu_class_number;
1500 f->sign = l->sign ^ r->sign;
1501 f->normal_exp = l->normal_exp - r->normal_exp;
1502
1503 numerator = l->fraction;
1504 denominator = r->fraction;
1505
1506 /* Fraction will be less than 1.0 */
1507 if (numerator < denominator)
1508 {
1509 numerator <<= 1;
1510 f->normal_exp--;
1511 }
1512 ASSERT (numerator >= denominator);
1513
1514 /* Gain extra precision, already used one spare bit */
1515 numerator <<= NR_SPARE;
1516 denominator <<= NR_SPARE;
1517
1518 /* Does divide one bit at a time. Optimize??? */
1519 quotient = 0;
1520 bit = (IMPLICIT_1 << NR_SPARE);
1521 while (bit)
1522 {
1523 if (numerator >= denominator)
1524 {
1525 quotient |= bit;
1526 numerator -= denominator;
1527 }
1528 bit >>= 1;
1529 numerator <<= 1;
1530 }
1531
1532 /* discard (but save) the extra bits */
1533 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1534 quotient = (quotient >> NR_SPARE) | 1;
1535 else
1536 quotient = (quotient >> NR_SPARE);
1537
1538 f->fraction = quotient;
1539 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1540 if (numerator != 0)
1541 {
1542 f->fraction |= 1; /* stick remaining bits */
1543 return sim_fpu_status_inexact;
1544 }
1545 else
1546 return 0;
1547 }
1548 }
1549
1550
1551 INLINE_SIM_FPU (int)
1552 sim_fpu_max (sim_fpu *f,
1553 const sim_fpu *l,
1554 const sim_fpu *r)
1555 {
1556 if (sim_fpu_is_snan (l))
1557 {
1558 *f = *l;
1559 f->class = sim_fpu_class_qnan;
1560 return sim_fpu_status_invalid_snan;
1561 }
1562 if (sim_fpu_is_snan (r))
1563 {
1564 *f = *r;
1565 f->class = sim_fpu_class_qnan;
1566 return sim_fpu_status_invalid_snan;
1567 }
1568 if (sim_fpu_is_qnan (l))
1569 {
1570 *f = *l;
1571 return 0;
1572 }
1573 if (sim_fpu_is_qnan (r))
1574 {
1575 *f = *r;
1576 return 0;
1577 }
1578 if (sim_fpu_is_infinity (l))
1579 {
1580 if (sim_fpu_is_infinity (r)
1581 && l->sign == r->sign)
1582 {
1583 *f = sim_fpu_qnan;
1584 return sim_fpu_status_invalid_isi;
1585 }
1586 if (l->sign)
1587 *f = *r; /* -inf < anything */
1588 else
1589 *f = *l; /* +inf > anthing */
1590 return 0;
1591 }
1592 if (sim_fpu_is_infinity (r))
1593 {
1594 if (r->sign)
1595 *f = *l; /* anything > -inf */
1596 else
1597 *f = *r; /* anthing < +inf */
1598 return 0;
1599 }
1600 if (l->sign > r->sign)
1601 {
1602 *f = *r; /* -ve < +ve */
1603 return 0;
1604 }
1605 if (l->sign < r->sign)
1606 {
1607 *f = *l; /* +ve > -ve */
1608 return 0;
1609 }
1610 ASSERT (l->sign == r->sign);
1611 if (l->normal_exp > r->normal_exp
1612 || (l->normal_exp == r->normal_exp &&
1613 l->fraction > r->fraction))
1614 {
1615 /* |l| > |r| */
1616 if (l->sign)
1617 *f = *r; /* -ve < -ve */
1618 else
1619 *f = *l; /* +ve > +ve */
1620 return 0;
1621 }
1622 else
1623 {
1624 /* |l| <= |r| */
1625 if (l->sign)
1626 *f = *l; /* -ve > -ve */
1627 else
1628 *f = *r; /* +ve < +ve */
1629 return 0;
1630 }
1631 }
1632
1633
1634 INLINE_SIM_FPU (int)
1635 sim_fpu_min (sim_fpu *f,
1636 const sim_fpu *l,
1637 const sim_fpu *r)
1638 {
1639 if (sim_fpu_is_snan (l))
1640 {
1641 *f = *l;
1642 f->class = sim_fpu_class_qnan;
1643 return sim_fpu_status_invalid_snan;
1644 }
1645 if (sim_fpu_is_snan (r))
1646 {
1647 *f = *r;
1648 f->class = sim_fpu_class_qnan;
1649 return sim_fpu_status_invalid_snan;
1650 }
1651 if (sim_fpu_is_qnan (l))
1652 {
1653 *f = *l;
1654 return 0;
1655 }
1656 if (sim_fpu_is_qnan (r))
1657 {
1658 *f = *r;
1659 return 0;
1660 }
1661 if (sim_fpu_is_infinity (l))
1662 {
1663 if (sim_fpu_is_infinity (r)
1664 && l->sign == r->sign)
1665 {
1666 *f = sim_fpu_qnan;
1667 return sim_fpu_status_invalid_isi;
1668 }
1669 if (l->sign)
1670 *f = *l; /* -inf < anything */
1671 else
1672 *f = *r; /* +inf > anthing */
1673 return 0;
1674 }
1675 if (sim_fpu_is_infinity (r))
1676 {
1677 if (r->sign)
1678 *f = *r; /* anything > -inf */
1679 else
1680 *f = *l; /* anything < +inf */
1681 return 0;
1682 }
1683 if (l->sign > r->sign)
1684 {
1685 *f = *l; /* -ve < +ve */
1686 return 0;
1687 }
1688 if (l->sign < r->sign)
1689 {
1690 *f = *r; /* +ve > -ve */
1691 return 0;
1692 }
1693 ASSERT (l->sign == r->sign);
1694 if (l->normal_exp > r->normal_exp
1695 || (l->normal_exp == r->normal_exp &&
1696 l->fraction > r->fraction))
1697 {
1698 /* |l| > |r| */
1699 if (l->sign)
1700 *f = *l; /* -ve < -ve */
1701 else
1702 *f = *r; /* +ve > +ve */
1703 return 0;
1704 }
1705 else
1706 {
1707 /* |l| <= |r| */
1708 if (l->sign)
1709 *f = *r; /* -ve > -ve */
1710 else
1711 *f = *l; /* +ve < +ve */
1712 return 0;
1713 }
1714 }
1715
1716
1717 INLINE_SIM_FPU (int)
1718 sim_fpu_neg (sim_fpu *f,
1719 const sim_fpu *r)
1720 {
1721 if (sim_fpu_is_snan (r))
1722 {
1723 *f = *r;
1724 f->class = sim_fpu_class_qnan;
1725 return sim_fpu_status_invalid_snan;
1726 }
1727 if (sim_fpu_is_qnan (r))
1728 {
1729 *f = *r;
1730 return 0;
1731 }
1732 *f = *r;
1733 f->sign = !r->sign;
1734 return 0;
1735 }
1736
1737
1738 INLINE_SIM_FPU (int)
1739 sim_fpu_abs (sim_fpu *f,
1740 const sim_fpu *r)
1741 {
1742 *f = *r;
1743 f->sign = 0;
1744 if (sim_fpu_is_snan (r))
1745 {
1746 f->class = sim_fpu_class_qnan;
1747 return sim_fpu_status_invalid_snan;
1748 }
1749 return 0;
1750 }
1751
1752
1753 INLINE_SIM_FPU (int)
1754 sim_fpu_inv (sim_fpu *f,
1755 const sim_fpu *r)
1756 {
1757 return sim_fpu_div (f, &sim_fpu_one, r);
1758 }
1759
1760
1761 INLINE_SIM_FPU (int)
1762 sim_fpu_sqrt (sim_fpu *f,
1763 const sim_fpu *r)
1764 {
1765 if (sim_fpu_is_snan (r))
1766 {
1767 *f = sim_fpu_qnan;
1768 return sim_fpu_status_invalid_snan;
1769 }
1770 if (sim_fpu_is_qnan (r))
1771 {
1772 *f = sim_fpu_qnan;
1773 return 0;
1774 }
1775 if (sim_fpu_is_zero (r))
1776 {
1777 f->class = sim_fpu_class_zero;
1778 f->sign = r->sign;
1779 f->normal_exp = 0;
1780 return 0;
1781 }
1782 if (sim_fpu_is_infinity (r))
1783 {
1784 if (r->sign)
1785 {
1786 *f = sim_fpu_qnan;
1787 return sim_fpu_status_invalid_sqrt;
1788 }
1789 else
1790 {
1791 f->class = sim_fpu_class_infinity;
1792 f->sign = 0;
1793 f->sign = 0;
1794 return 0;
1795 }
1796 }
1797 if (r->sign)
1798 {
1799 *f = sim_fpu_qnan;
1800 return sim_fpu_status_invalid_sqrt;
1801 }
1802
1803 /* @(#)e_sqrt.c 5.1 93/09/24 */
1804 /*
1805 * ====================================================
1806 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1807 *
1808 * Developed at SunPro, a Sun Microsystems, Inc. business.
1809 * Permission to use, copy, modify, and distribute this
1810 * software is freely granted, provided that this notice
1811 * is preserved.
1812 * ====================================================
1813 */
1814
1815 /* __ieee754_sqrt(x)
1816 * Return correctly rounded sqrt.
1817 * ------------------------------------------
1818 * | Use the hardware sqrt if you have one |
1819 * ------------------------------------------
1820 * Method:
1821 * Bit by bit method using integer arithmetic. (Slow, but portable)
1822 * 1. Normalization
1823 * Scale x to y in [1,4) with even powers of 2:
1824 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1825 * sqrt(x) = 2^k * sqrt(y)
1826 -
1827 - Since:
1828 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1829 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1830 - Define:
1831 - y = ((m even) ? x : 2.x)
1832 - Then:
1833 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1834 - And:
1835 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1836 -
1837 * 2. Bit by bit computation
1838 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1839 * i 0
1840 * i+1 2
1841 * s = 2*q , and y = 2 * ( y - q ). (1)
1842 * i i i i
1843 *
1844 * To compute q from q , one checks whether
1845 * i+1 i
1846 *
1847 * -(i+1) 2
1848 * (q + 2 ) <= y. (2)
1849 * i
1850 * -(i+1)
1851 * If (2) is false, then q = q ; otherwise q = q + 2 .
1852 * i+1 i i+1 i
1853 *
1854 * With some algebric manipulation, it is not difficult to see
1855 * that (2) is equivalent to
1856 * -(i+1)
1857 * s + 2 <= y (3)
1858 * i i
1859 *
1860 * The advantage of (3) is that s and y can be computed by
1861 * i i
1862 * the following recurrence formula:
1863 * if (3) is false
1864 *
1865 * s = s , y = y ; (4)
1866 * i+1 i i+1 i
1867 *
1868 -
1869 - NOTE: y = 2*y
1870 - i+1 i
1871 -
1872 * otherwise,
1873 * -i -(i+1)
1874 * s = s + 2 , y = y - s - 2 (5)
1875 * i+1 i i+1 i i
1876 *
1877 -
1878 - -(i+1)
1879 - NOTE: y = 2 (y - s - 2 )
1880 - i+1 i i
1881 -
1882 * One may easily use induction to prove (4) and (5).
1883 * Note. Since the left hand side of (3) contain only i+2 bits,
1884 * it does not necessary to do a full (53-bit) comparison
1885 * in (3).
1886 * 3. Final rounding
1887 * After generating the 53 bits result, we compute one more bit.
1888 * Together with the remainder, we can decide whether the
1889 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1890 * (it will never equal to 1/2ulp).
1891 * The rounding mode can be detected by checking whether
1892 * huge + tiny is equal to huge, and whether huge - tiny is
1893 * equal to huge for some floating point number "huge" and "tiny".
1894 *
1895 * Special cases:
1896 * sqrt(+-0) = +-0 ... exact
1897 * sqrt(inf) = inf
1898 * sqrt(-ve) = NaN ... with invalid signal
1899 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1900 *
1901 * Other methods : see the appended file at the end of the program below.
1902 *---------------
1903 */
1904
1905 {
1906 /* generate sqrt(x) bit by bit */
1907 unsigned64 y;
1908 unsigned64 q;
1909 unsigned64 s;
1910 unsigned64 b;
1911
1912 f->class = sim_fpu_class_number;
1913 f->sign = 0;
1914 y = r->fraction;
1915 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
1916
1917 /* odd exp, double x to make it even */
1918 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
1919 if ((r->normal_exp & 1))
1920 {
1921 y += y;
1922 }
1923 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
1924
1925 /* Let loop determine first value of s (either 1 or 2) */
1926 b = IMPLICIT_1;
1927 q = 0;
1928 s = 0;
1929
1930 while (b)
1931 {
1932 unsigned64 t = s + b;
1933 if (t <= y)
1934 {
1935 s |= (b << 1);
1936 y -= t;
1937 q |= b;
1938 }
1939 y <<= 1;
1940 b >>= 1;
1941 }
1942
1943 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
1944 f->fraction = q;
1945 if (y != 0)
1946 {
1947 f->fraction |= 1; /* stick remaining bits */
1948 return sim_fpu_status_inexact;
1949 }
1950 else
1951 return 0;
1952 }
1953 }
1954
1955
1956 /* int/long <-> sim_fpu */
1957
1958 INLINE_SIM_FPU (int)
1959 sim_fpu_i32to (sim_fpu *f,
1960 signed32 i,
1961 sim_fpu_round round)
1962 {
1963 i2fpu (f, i, 0);
1964 return 0;
1965 }
1966
1967 INLINE_SIM_FPU (int)
1968 sim_fpu_u32to (sim_fpu *f,
1969 unsigned32 u,
1970 sim_fpu_round round)
1971 {
1972 u2fpu (f, u, 0);
1973 return 0;
1974 }
1975
1976 INLINE_SIM_FPU (int)
1977 sim_fpu_i64to (sim_fpu *f,
1978 signed64 i,
1979 sim_fpu_round round)
1980 {
1981 i2fpu (f, i, 1);
1982 return 0;
1983 }
1984
1985 INLINE_SIM_FPU (int)
1986 sim_fpu_u64to (sim_fpu *f,
1987 unsigned64 u,
1988 sim_fpu_round round)
1989 {
1990 u2fpu (f, u, 1);
1991 return 0;
1992 }
1993
1994
1995 INLINE_SIM_FPU (int)
1996 sim_fpu_to32i (signed32 *i,
1997 const sim_fpu *f,
1998 sim_fpu_round round)
1999 {
2000 signed64 i64;
2001 int status = fpu2i (&i64, f, 0, round);
2002 *i = i64;
2003 return status;
2004 }
2005
2006 INLINE_SIM_FPU (int)
2007 sim_fpu_to32u (unsigned32 *u,
2008 const sim_fpu *f,
2009 sim_fpu_round round)
2010 {
2011 unsigned64 u64;
2012 int status = fpu2u (&u64, f, 0);
2013 *u = u64;
2014 return status;
2015 }
2016
2017 INLINE_SIM_FPU (int)
2018 sim_fpu_to64i (signed64 *i,
2019 const sim_fpu *f,
2020 sim_fpu_round round)
2021 {
2022 return fpu2i (i, f, 1, round);
2023 }
2024
2025
2026 INLINE_SIM_FPU (int)
2027 sim_fpu_to64u (unsigned64 *u,
2028 const sim_fpu *f,
2029 sim_fpu_round round)
2030 {
2031 return fpu2u (u, f, 1);
2032 }
2033
2034
2035
2036 /* sim_fpu -> host format */
2037
2038 #if 0
2039 INLINE_SIM_FPU (float)
2040 sim_fpu_2f (const sim_fpu *f)
2041 {
2042 return fval.d;
2043 }
2044 #endif
2045
2046
2047 INLINE_SIM_FPU (double)
2048 sim_fpu_2d (const sim_fpu *s)
2049 {
2050 sim_fpu_map val;
2051 if (sim_fpu_is_snan (s))
2052 {
2053 /* gag SNaN's */
2054 sim_fpu n = *s;
2055 n.class = sim_fpu_class_qnan;
2056 val.i = pack_fpu (&n, 1);
2057 }
2058 else
2059 {
2060 val.i = pack_fpu (s, 1);
2061 }
2062 return val.d;
2063 }
2064
2065
2066 #if 0
2067 INLINE_SIM_FPU (void)
2068 sim_fpu_f2 (sim_fpu *f,
2069 float s)
2070 {
2071 sim_fpu_map val;
2072 val.d = s;
2073 unpack_fpu (f, val.i, 1);
2074 }
2075 #endif
2076
2077
2078 INLINE_SIM_FPU (void)
2079 sim_fpu_d2 (sim_fpu *f,
2080 double d)
2081 {
2082 sim_fpu_map val;
2083 val.d = d;
2084 unpack_fpu (f, val.i, 1);
2085 }
2086
2087
2088 /* General */
2089
2090 INLINE_SIM_FPU (int)
2091 sim_fpu_is_nan (const sim_fpu *d)
2092 {
2093 switch (d->class)
2094 {
2095 case sim_fpu_class_qnan:
2096 case sim_fpu_class_snan:
2097 return 1;
2098 default:
2099 return 0;
2100 }
2101 }
2102
2103 INLINE_SIM_FPU (int)
2104 sim_fpu_is_qnan (const sim_fpu *d)
2105 {
2106 switch (d->class)
2107 {
2108 case sim_fpu_class_qnan:
2109 return 1;
2110 default:
2111 return 0;
2112 }
2113 }
2114
2115 INLINE_SIM_FPU (int)
2116 sim_fpu_is_snan (const sim_fpu *d)
2117 {
2118 switch (d->class)
2119 {
2120 case sim_fpu_class_snan:
2121 return 1;
2122 default:
2123 return 0;
2124 }
2125 }
2126
2127 INLINE_SIM_FPU (int)
2128 sim_fpu_is_zero (const sim_fpu *d)
2129 {
2130 switch (d->class)
2131 {
2132 case sim_fpu_class_zero:
2133 return 1;
2134 default:
2135 return 0;
2136 }
2137 }
2138
2139 INLINE_SIM_FPU (int)
2140 sim_fpu_is_infinity (const sim_fpu *d)
2141 {
2142 switch (d->class)
2143 {
2144 case sim_fpu_class_infinity:
2145 return 1;
2146 default:
2147 return 0;
2148 }
2149 }
2150
2151 INLINE_SIM_FPU (int)
2152 sim_fpu_is_number (const sim_fpu *d)
2153 {
2154 switch (d->class)
2155 {
2156 case sim_fpu_class_denorm:
2157 case sim_fpu_class_number:
2158 return 1;
2159 default:
2160 return 0;
2161 }
2162 }
2163
2164 INLINE_SIM_FPU (int)
2165 sim_fpu_is_denorm (const sim_fpu *d)
2166 {
2167 switch (d->class)
2168 {
2169 case sim_fpu_class_denorm:
2170 return 1;
2171 default:
2172 return 0;
2173 }
2174 }
2175
2176
2177 INLINE_SIM_FPU (int)
2178 sim_fpu_sign (const sim_fpu *d)
2179 {
2180 return d->sign;
2181 }
2182
2183
2184 INLINE_SIM_FPU (int)
2185 sim_fpu_exp (const sim_fpu *d)
2186 {
2187 return d->normal_exp;
2188 }
2189
2190
2191 INLINE_SIM_FPU (unsigned64)
2192 sim_fpu_fraction (const sim_fpu *d)
2193 {
2194 return d->fraction;
2195 }
2196
2197
2198 INLINE_SIM_FPU (unsigned64)
2199 sim_fpu_guard (const sim_fpu *d, int is_double)
2200 {
2201 unsigned64 rv;
2202 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2203 rv = (d->fraction & guardmask) >> NR_PAD;
2204 return rv;
2205 }
2206
2207
2208 INLINE_SIM_FPU (int)
2209 sim_fpu_is (const sim_fpu *d)
2210 {
2211 switch (d->class)
2212 {
2213 case sim_fpu_class_qnan:
2214 return SIM_FPU_IS_QNAN;
2215 case sim_fpu_class_snan:
2216 return SIM_FPU_IS_SNAN;
2217 case sim_fpu_class_infinity:
2218 if (d->sign)
2219 return SIM_FPU_IS_NINF;
2220 else
2221 return SIM_FPU_IS_PINF;
2222 case sim_fpu_class_number:
2223 if (d->sign)
2224 return SIM_FPU_IS_NNUMBER;
2225 else
2226 return SIM_FPU_IS_PNUMBER;
2227 case sim_fpu_class_denorm:
2228 if (d->sign)
2229 return SIM_FPU_IS_NDENORM;
2230 else
2231 return SIM_FPU_IS_PDENORM;
2232 case sim_fpu_class_zero:
2233 if (d->sign)
2234 return SIM_FPU_IS_NZERO;
2235 else
2236 return SIM_FPU_IS_PZERO;
2237 default:
2238 return -1;
2239 abort ();
2240 }
2241 }
2242
2243 INLINE_SIM_FPU (int)
2244 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2245 {
2246 sim_fpu res;
2247 sim_fpu_sub (&res, l, r);
2248 return sim_fpu_is (&res);
2249 }
2250
2251 INLINE_SIM_FPU (int)
2252 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2253 {
2254 int status;
2255 sim_fpu_lt (&status, l, r);
2256 return status;
2257 }
2258
2259 INLINE_SIM_FPU (int)
2260 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2261 {
2262 int is;
2263 sim_fpu_le (&is, l, r);
2264 return is;
2265 }
2266
2267 INLINE_SIM_FPU (int)
2268 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2269 {
2270 int is;
2271 sim_fpu_eq (&is, l, r);
2272 return is;
2273 }
2274
2275 INLINE_SIM_FPU (int)
2276 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2277 {
2278 int is;
2279 sim_fpu_ne (&is, l, r);
2280 return is;
2281 }
2282
2283 INLINE_SIM_FPU (int)
2284 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2285 {
2286 int is;
2287 sim_fpu_ge (&is, l, r);
2288 return is;
2289 }
2290
2291 INLINE_SIM_FPU (int)
2292 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2293 {
2294 int is;
2295 sim_fpu_gt (&is, l, r);
2296 return is;
2297 }
2298
2299
2300 /* Compare operators */
2301
2302 INLINE_SIM_FPU (int)
2303 sim_fpu_lt (int *is,
2304 const sim_fpu *l,
2305 const sim_fpu *r)
2306 {
2307 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2308 {
2309 sim_fpu_map lval;
2310 sim_fpu_map rval;
2311 lval.i = pack_fpu (l, 1);
2312 rval.i = pack_fpu (r, 1);
2313 (*is) = (lval.d < rval.d);
2314 return 0;
2315 }
2316 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2317 {
2318 *is = 0;
2319 return sim_fpu_status_invalid_snan;
2320 }
2321 else
2322 {
2323 *is = 0;
2324 return sim_fpu_status_invalid_qnan;
2325 }
2326 }
2327
2328 INLINE_SIM_FPU (int)
2329 sim_fpu_le (int *is,
2330 const sim_fpu *l,
2331 const sim_fpu *r)
2332 {
2333 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2334 {
2335 sim_fpu_map lval;
2336 sim_fpu_map rval;
2337 lval.i = pack_fpu (l, 1);
2338 rval.i = pack_fpu (r, 1);
2339 *is = (lval.d <= rval.d);
2340 return 0;
2341 }
2342 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2343 {
2344 *is = 0;
2345 return sim_fpu_status_invalid_snan;
2346 }
2347 else
2348 {
2349 *is = 0;
2350 return sim_fpu_status_invalid_qnan;
2351 }
2352 }
2353
2354 INLINE_SIM_FPU (int)
2355 sim_fpu_eq (int *is,
2356 const sim_fpu *l,
2357 const sim_fpu *r)
2358 {
2359 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2360 {
2361 sim_fpu_map lval;
2362 sim_fpu_map rval;
2363 lval.i = pack_fpu (l, 1);
2364 rval.i = pack_fpu (r, 1);
2365 (*is) = (lval.d == rval.d);
2366 return 0;
2367 }
2368 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2369 {
2370 *is = 0;
2371 return sim_fpu_status_invalid_snan;
2372 }
2373 else
2374 {
2375 *is = 0;
2376 return sim_fpu_status_invalid_qnan;
2377 }
2378 }
2379
2380 INLINE_SIM_FPU (int)
2381 sim_fpu_ne (int *is,
2382 const sim_fpu *l,
2383 const sim_fpu *r)
2384 {
2385 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2386 {
2387 sim_fpu_map lval;
2388 sim_fpu_map rval;
2389 lval.i = pack_fpu (l, 1);
2390 rval.i = pack_fpu (r, 1);
2391 (*is) = (lval.d != rval.d);
2392 return 0;
2393 }
2394 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2395 {
2396 *is = 0;
2397 return sim_fpu_status_invalid_snan;
2398 }
2399 else
2400 {
2401 *is = 0;
2402 return sim_fpu_status_invalid_qnan;
2403 }
2404 }
2405
2406 INLINE_SIM_FPU (int)
2407 sim_fpu_ge (int *is,
2408 const sim_fpu *l,
2409 const sim_fpu *r)
2410 {
2411 return sim_fpu_le (is, r, l);
2412 }
2413
2414 INLINE_SIM_FPU (int)
2415 sim_fpu_gt (int *is,
2416 const sim_fpu *l,
2417 const sim_fpu *r)
2418 {
2419 return sim_fpu_lt (is, r, l);
2420 }
2421
2422
2423 /* A number of useful constants */
2424
2425 #if EXTERN_SIM_FPU_P
2426 const sim_fpu sim_fpu_zero = {
2427 sim_fpu_class_zero,
2428 };
2429 const sim_fpu sim_fpu_qnan = {
2430 sim_fpu_class_qnan,
2431 };
2432 const sim_fpu sim_fpu_one = {
2433 sim_fpu_class_number, 0, IMPLICIT_1, 0
2434 };
2435 const sim_fpu sim_fpu_two = {
2436 sim_fpu_class_number, 0, IMPLICIT_1, 1
2437 };
2438 const sim_fpu sim_fpu_max32 = {
2439 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2440 };
2441 const sim_fpu sim_fpu_max64 = {
2442 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2443 };
2444 #endif
2445
2446
2447 /* For debugging */
2448
2449 INLINE_SIM_FPU (void)
2450 sim_fpu_print_fpu (const sim_fpu *f,
2451 sim_fpu_print_func *print,
2452 void *arg)
2453 {
2454 sim_fpu_printn_fpu (f, print, -1, arg);
2455 }
2456
2457 INLINE_SIM_FPU (void)
2458 sim_fpu_printn_fpu (const sim_fpu *f,
2459 sim_fpu_print_func *print,
2460 int digits,
2461 void *arg)
2462 {
2463 print (arg, "%s", f->sign ? "-" : "+");
2464 switch (f->class)
2465 {
2466 case sim_fpu_class_qnan:
2467 print (arg, "0.");
2468 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2469 print (arg, "*QuietNaN");
2470 break;
2471 case sim_fpu_class_snan:
2472 print (arg, "0.");
2473 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2474 print (arg, "*SignalNaN");
2475 break;
2476 case sim_fpu_class_zero:
2477 print (arg, "0.0");
2478 break;
2479 case sim_fpu_class_infinity:
2480 print (arg, "INF");
2481 break;
2482 case sim_fpu_class_number:
2483 case sim_fpu_class_denorm:
2484 print (arg, "1.");
2485 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2486 print (arg, "*2^%+d", f->normal_exp);
2487 ASSERT (f->fraction >= IMPLICIT_1);
2488 ASSERT (f->fraction < IMPLICIT_2);
2489 }
2490 }
2491
2492
2493 INLINE_SIM_FPU (void)
2494 sim_fpu_print_status (int status,
2495 sim_fpu_print_func *print,
2496 void *arg)
2497 {
2498 int i = 1;
2499 char *prefix = "";
2500 while (status >= i)
2501 {
2502 switch ((sim_fpu_status) (status & i))
2503 {
2504 case sim_fpu_status_denorm:
2505 print (arg, "%sD", prefix);
2506 break;
2507 case sim_fpu_status_invalid_snan:
2508 print (arg, "%sSNaN", prefix);
2509 break;
2510 case sim_fpu_status_invalid_qnan:
2511 print (arg, "%sQNaN", prefix);
2512 break;
2513 case sim_fpu_status_invalid_isi:
2514 print (arg, "%sISI", prefix);
2515 break;
2516 case sim_fpu_status_invalid_idi:
2517 print (arg, "%sIDI", prefix);
2518 break;
2519 case sim_fpu_status_invalid_zdz:
2520 print (arg, "%sZDZ", prefix);
2521 break;
2522 case sim_fpu_status_invalid_imz:
2523 print (arg, "%sIMZ", prefix);
2524 break;
2525 case sim_fpu_status_invalid_cvi:
2526 print (arg, "%sCVI", prefix);
2527 break;
2528 case sim_fpu_status_invalid_cmp:
2529 print (arg, "%sCMP", prefix);
2530 break;
2531 case sim_fpu_status_invalid_sqrt:
2532 print (arg, "%sSQRT", prefix);
2533 break;
2534 break;
2535 case sim_fpu_status_inexact:
2536 print (arg, "%sX", prefix);
2537 break;
2538 break;
2539 case sim_fpu_status_overflow:
2540 print (arg, "%sO", prefix);
2541 break;
2542 break;
2543 case sim_fpu_status_underflow:
2544 print (arg, "%sU", prefix);
2545 break;
2546 break;
2547 case sim_fpu_status_invalid_div0:
2548 print (arg, "%s/", prefix);
2549 break;
2550 break;
2551 case sim_fpu_status_rounded:
2552 print (arg, "%sR", prefix);
2553 break;
2554 break;
2555 }
2556 i <<= 1;
2557 prefix = ",";
2558 }
2559 }
2560
2561 #endif