]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/common/sim-fpu.c
Update copyright notices to add year 2010.
[thirdparty/binutils-gdb.git] / sim / common / sim-fpu.c
1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
4
5 /* Copyright 1994, 1997, 1998, 2003, 2007, 2008, 2009, 2010
6 Free Software Foundation, Inc.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 /* As a special exception, if you link this library with other files,
22 some of which are compiled with GCC, to produce an executable,
23 this library does not by itself cause the resulting executable
24 to be covered by the GNU General Public License.
25 This exception does not however invalidate any other reasons why
26 the executable file might be covered by the GNU General Public License. */
27
28 /* This implements IEEE 754 format arithmetic, but does not provide a
29 mechanism for setting the rounding mode, or for generating or handling
30 exceptions.
31
32 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
33 Wilson, all of Cygnus Support. */
34
35
36 #ifndef SIM_FPU_C
37 #define SIM_FPU_C
38
39 #include "sim-basics.h"
40 #include "sim-fpu.h"
41
42 #include "sim-io.h"
43 #include "sim-assert.h"
44
45
46 /* Debugging support.
47 If digits is -1, then print all digits. */
48
49 static void
50 print_bits (unsigned64 x,
51 int msbit,
52 int digits,
53 sim_fpu_print_func print,
54 void *arg)
55 {
56 unsigned64 bit = LSBIT64 (msbit);
57 int i = 4;
58 while (bit && digits)
59 {
60 if (i == 0)
61 print (arg, ",");
62
63 if ((x & bit))
64 print (arg, "1");
65 else
66 print (arg, "0");
67 bit >>= 1;
68
69 if (digits > 0) digits--;
70 i = (i + 1) % 4;
71 }
72 }
73
74
75
76 /* Quick and dirty conversion between a host double and host 64bit int */
77
78 typedef union {
79 double d;
80 unsigned64 i;
81 } sim_fpu_map;
82
83
84 /* A packed IEEE floating point number.
85
86 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
87 32 and 64 bit numbers. This number is interpreted as:
88
89 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
90 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
91
92 Denormalized (0 == BIASEDEXP && FRAC != 0):
93 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
94
95 Zero (0 == BIASEDEXP && FRAC == 0):
96 (sign ? "-" : "+") 0.0
97
98 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
99 (sign ? "-" : "+") "infinity"
100
101 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
102 SNaN.FRAC
103
104 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
105 QNaN.FRAC
106
107 */
108
109 #define NR_EXPBITS (is_double ? 11 : 8)
110 #define NR_FRACBITS (is_double ? 52 : 23)
111 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
112
113 #define EXPMAX32 (255)
114 #define EXMPAX64 (2047)
115 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
116
117 #define EXPBIAS32 (127)
118 #define EXPBIAS64 (1023)
119 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
120
121 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
122
123
124
125 /* An unpacked floating point number.
126
127 When unpacked, the fraction of both a 32 and 64 bit floating point
128 number is stored using the same format:
129
130 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
131 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
132
133 #define NR_PAD32 (30)
134 #define NR_PAD64 (0)
135 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
136 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
137
138 #define NR_GUARDS32 (7 + NR_PAD32)
139 #define NR_GUARDS64 (8 + NR_PAD64)
140 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
141 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
142
143 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
144 #define GUARDLSB LSBIT64 (NR_PAD)
145 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
146
147 #define NR_FRAC_GUARD (60)
148 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
149 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
150 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
151 #define NR_SPARE 2
152
153 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
154
155 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
156
157 #define NORMAL_EXPMAX32 (EXPBIAS32)
158 #define NORMAL_EXPMAX64 (EXPBIAS64)
159 #define NORMAL_EXPMAX (EXPBIAS)
160
161
162 /* Integer constants */
163
164 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
165 #define MAX_UINT32 LSMASK64 (31, 0)
166 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
167
168 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
169 #define MAX_UINT64 LSMASK64 (63, 0)
170 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
171
172 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
173 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
174 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
175 #define NR_INTBITS (is_64bit ? 64 : 32)
176
177 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
178 STATIC_INLINE_SIM_FPU (unsigned64)
179 pack_fpu (const sim_fpu *src,
180 int is_double)
181 {
182 int sign;
183 unsigned64 exp;
184 unsigned64 fraction;
185 unsigned64 packed;
186
187 switch (src->class)
188 {
189 /* create a NaN */
190 case sim_fpu_class_qnan:
191 sign = src->sign;
192 exp = EXPMAX;
193 /* force fraction to correct class */
194 fraction = src->fraction;
195 fraction >>= NR_GUARDS;
196 #ifdef SIM_QUIET_NAN_NEGATED
197 fraction |= QUIET_NAN - 1;
198 #else
199 fraction |= QUIET_NAN;
200 #endif
201 break;
202 case sim_fpu_class_snan:
203 sign = src->sign;
204 exp = EXPMAX;
205 /* force fraction to correct class */
206 fraction = src->fraction;
207 fraction >>= NR_GUARDS;
208 #ifdef SIM_QUIET_NAN_NEGATED
209 fraction |= QUIET_NAN;
210 #else
211 fraction &= ~QUIET_NAN;
212 #endif
213 break;
214 case sim_fpu_class_infinity:
215 sign = src->sign;
216 exp = EXPMAX;
217 fraction = 0;
218 break;
219 case sim_fpu_class_zero:
220 sign = src->sign;
221 exp = 0;
222 fraction = 0;
223 break;
224 case sim_fpu_class_number:
225 case sim_fpu_class_denorm:
226 ASSERT (src->fraction >= IMPLICIT_1);
227 ASSERT (src->fraction < IMPLICIT_2);
228 if (src->normal_exp < NORMAL_EXPMIN)
229 {
230 /* This number's exponent is too low to fit into the bits
231 available in the number We'll denormalize the number by
232 storing zero in the exponent and shift the fraction to
233 the right to make up for it. */
234 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
235 if (nr_shift > NR_FRACBITS)
236 {
237 /* underflow, just make the number zero */
238 sign = src->sign;
239 exp = 0;
240 fraction = 0;
241 }
242 else
243 {
244 sign = src->sign;
245 exp = 0;
246 /* Shift by the value */
247 fraction = src->fraction;
248 fraction >>= NR_GUARDS;
249 fraction >>= nr_shift;
250 }
251 }
252 else if (src->normal_exp > NORMAL_EXPMAX)
253 {
254 /* Infinity */
255 sign = src->sign;
256 exp = EXPMAX;
257 fraction = 0;
258 }
259 else
260 {
261 exp = (src->normal_exp + EXPBIAS);
262 sign = src->sign;
263 fraction = src->fraction;
264 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
265 or some such */
266 /* Round to nearest: If the guard bits are the all zero, but
267 the first, then we're half way between two numbers,
268 choose the one which makes the lsb of the answer 0. */
269 if ((fraction & GUARDMASK) == GUARDMSB)
270 {
271 if ((fraction & (GUARDMSB << 1)))
272 fraction += (GUARDMSB << 1);
273 }
274 else
275 {
276 /* Add a one to the guards to force round to nearest */
277 fraction += GUARDROUND;
278 }
279 if ((fraction & IMPLICIT_2)) /* rounding resulted in carry */
280 {
281 exp += 1;
282 fraction >>= 1;
283 }
284 fraction >>= NR_GUARDS;
285 /* When exp == EXPMAX (overflow from carry) fraction must
286 have been made zero */
287 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
288 }
289 break;
290 default:
291 abort ();
292 }
293
294 packed = ((sign ? SIGNBIT : 0)
295 | (exp << NR_FRACBITS)
296 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
297
298 /* trace operation */
299 #if 0
300 if (is_double)
301 {
302 }
303 else
304 {
305 printf ("pack_fpu: ");
306 printf ("-> %c%0lX.%06lX\n",
307 LSMASKED32 (packed, 31, 31) ? '8' : '0',
308 (long) LSEXTRACTED32 (packed, 30, 23),
309 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
310 }
311 #endif
312
313 return packed;
314 }
315
316
317 /* Unpack a 32/64 bit integer into a sim_fpu structure */
318 STATIC_INLINE_SIM_FPU (void)
319 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
320 {
321 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
322 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
323 int sign = (packed & SIGNBIT) != 0;
324
325 if (exp == 0)
326 {
327 /* Hmm. Looks like 0 */
328 if (fraction == 0)
329 {
330 /* tastes like zero */
331 dst->class = sim_fpu_class_zero;
332 dst->sign = sign;
333 dst->normal_exp = 0;
334 }
335 else
336 {
337 /* Zero exponent with non zero fraction - it's denormalized,
338 so there isn't a leading implicit one - we'll shift it so
339 it gets one. */
340 dst->normal_exp = exp - EXPBIAS + 1;
341 dst->class = sim_fpu_class_denorm;
342 dst->sign = sign;
343 fraction <<= NR_GUARDS;
344 while (fraction < IMPLICIT_1)
345 {
346 fraction <<= 1;
347 dst->normal_exp--;
348 }
349 dst->fraction = fraction;
350 }
351 }
352 else if (exp == EXPMAX)
353 {
354 /* Huge exponent*/
355 if (fraction == 0)
356 {
357 /* Attached to a zero fraction - means infinity */
358 dst->class = sim_fpu_class_infinity;
359 dst->sign = sign;
360 /* dst->normal_exp = EXPBIAS; */
361 /* dst->fraction = 0; */
362 }
363 else
364 {
365 int qnan;
366
367 /* Non zero fraction, means NaN */
368 dst->sign = sign;
369 dst->fraction = (fraction << NR_GUARDS);
370 #ifdef SIM_QUIET_NAN_NEGATED
371 qnan = (fraction & QUIET_NAN) == 0;
372 #else
373 qnan = fraction >= QUIET_NAN;
374 #endif
375 if (qnan)
376 dst->class = sim_fpu_class_qnan;
377 else
378 dst->class = sim_fpu_class_snan;
379 }
380 }
381 else
382 {
383 /* Nothing strange about this number */
384 dst->class = sim_fpu_class_number;
385 dst->sign = sign;
386 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
387 dst->normal_exp = exp - EXPBIAS;
388 }
389
390 /* trace operation */
391 #if 0
392 if (is_double)
393 {
394 }
395 else
396 {
397 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
398 LSMASKED32 (packed, 31, 31) ? '8' : '0',
399 (long) LSEXTRACTED32 (packed, 30, 23),
400 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
401 }
402 #endif
403
404 /* sanity checks */
405 {
406 sim_fpu_map val;
407 val.i = pack_fpu (dst, 1);
408 if (is_double)
409 {
410 ASSERT (val.i == packed);
411 }
412 else
413 {
414 unsigned32 val = pack_fpu (dst, 0);
415 unsigned32 org = packed;
416 ASSERT (val == org);
417 }
418 }
419 }
420
421
422 /* Convert a floating point into an integer */
423 STATIC_INLINE_SIM_FPU (int)
424 fpu2i (signed64 *i,
425 const sim_fpu *s,
426 int is_64bit,
427 sim_fpu_round round)
428 {
429 unsigned64 tmp;
430 int shift;
431 int status = 0;
432 if (sim_fpu_is_zero (s))
433 {
434 *i = 0;
435 return 0;
436 }
437 if (sim_fpu_is_snan (s))
438 {
439 *i = MIN_INT; /* FIXME */
440 return sim_fpu_status_invalid_cvi;
441 }
442 if (sim_fpu_is_qnan (s))
443 {
444 *i = MIN_INT; /* FIXME */
445 return sim_fpu_status_invalid_cvi;
446 }
447 /* map infinity onto MAX_INT... */
448 if (sim_fpu_is_infinity (s))
449 {
450 *i = s->sign ? MIN_INT : MAX_INT;
451 return sim_fpu_status_invalid_cvi;
452 }
453 /* it is a number, but a small one */
454 if (s->normal_exp < 0)
455 {
456 *i = 0;
457 return sim_fpu_status_inexact;
458 }
459 /* Is the floating point MIN_INT or just close? */
460 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
461 {
462 *i = MIN_INT;
463 ASSERT (s->fraction >= IMPLICIT_1);
464 if (s->fraction == IMPLICIT_1)
465 return 0; /* exact */
466 if (is_64bit) /* can't round */
467 return sim_fpu_status_invalid_cvi; /* must be overflow */
468 /* For a 32bit with MAX_INT, rounding is possible */
469 switch (round)
470 {
471 case sim_fpu_round_default:
472 abort ();
473 case sim_fpu_round_zero:
474 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
475 return sim_fpu_status_invalid_cvi;
476 else
477 return sim_fpu_status_inexact;
478 break;
479 case sim_fpu_round_near:
480 {
481 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
482 return sim_fpu_status_invalid_cvi;
483 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
484 return sim_fpu_status_invalid_cvi;
485 else
486 return sim_fpu_status_inexact;
487 }
488 case sim_fpu_round_up:
489 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
490 return sim_fpu_status_inexact;
491 else
492 return sim_fpu_status_invalid_cvi;
493 case sim_fpu_round_down:
494 return sim_fpu_status_invalid_cvi;
495 }
496 }
497 /* Would right shifting result in the FRAC being shifted into
498 (through) the integer's sign bit? */
499 if (s->normal_exp > (NR_INTBITS - 2))
500 {
501 *i = s->sign ? MIN_INT : MAX_INT;
502 return sim_fpu_status_invalid_cvi;
503 }
504 /* normal number shift it into place */
505 tmp = s->fraction;
506 shift = (s->normal_exp - (NR_FRAC_GUARD));
507 if (shift > 0)
508 {
509 tmp <<= shift;
510 }
511 else
512 {
513 shift = -shift;
514 if (tmp & ((SIGNED64 (1) << shift) - 1))
515 status |= sim_fpu_status_inexact;
516 tmp >>= shift;
517 }
518 *i = s->sign ? (-tmp) : (tmp);
519 return status;
520 }
521
522 /* convert an integer into a floating point */
523 STATIC_INLINE_SIM_FPU (int)
524 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
525 {
526 int status = 0;
527 if (i == 0)
528 {
529 f->class = sim_fpu_class_zero;
530 f->sign = 0;
531 f->normal_exp = 0;
532 }
533 else
534 {
535 f->class = sim_fpu_class_number;
536 f->sign = (i < 0);
537 f->normal_exp = NR_FRAC_GUARD;
538
539 if (f->sign)
540 {
541 /* Special case for minint, since there is no corresponding
542 +ve integer representation for it */
543 if (i == MIN_INT)
544 {
545 f->fraction = IMPLICIT_1;
546 f->normal_exp = NR_INTBITS - 1;
547 }
548 else
549 f->fraction = (-i);
550 }
551 else
552 f->fraction = i;
553
554 if (f->fraction >= IMPLICIT_2)
555 {
556 do
557 {
558 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
559 f->normal_exp += 1;
560 }
561 while (f->fraction >= IMPLICIT_2);
562 }
563 else if (f->fraction < IMPLICIT_1)
564 {
565 do
566 {
567 f->fraction <<= 1;
568 f->normal_exp -= 1;
569 }
570 while (f->fraction < IMPLICIT_1);
571 }
572 }
573
574 /* trace operation */
575 #if 0
576 {
577 printf ("i2fpu: 0x%08lX ->\n", (long) i);
578 }
579 #endif
580
581 /* sanity check */
582 {
583 signed64 val;
584 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
585 if (i >= MIN_INT32 && i <= MAX_INT32)
586 {
587 ASSERT (val == i);
588 }
589 }
590
591 return status;
592 }
593
594
595 /* Convert a floating point into an integer */
596 STATIC_INLINE_SIM_FPU (int)
597 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
598 {
599 const int is_double = 1;
600 unsigned64 tmp;
601 int shift;
602 if (sim_fpu_is_zero (s))
603 {
604 *u = 0;
605 return 0;
606 }
607 if (sim_fpu_is_nan (s))
608 {
609 *u = 0;
610 return 0;
611 }
612 /* it is a negative number */
613 if (s->sign)
614 {
615 *u = 0;
616 return 0;
617 }
618 /* get reasonable MAX_USI_INT... */
619 if (sim_fpu_is_infinity (s))
620 {
621 *u = MAX_UINT;
622 return 0;
623 }
624 /* it is a number, but a small one */
625 if (s->normal_exp < 0)
626 {
627 *u = 0;
628 return 0;
629 }
630 /* overflow */
631 if (s->normal_exp > (NR_INTBITS - 1))
632 {
633 *u = MAX_UINT;
634 return 0;
635 }
636 /* normal number */
637 tmp = (s->fraction & ~PADMASK);
638 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
639 if (shift > 0)
640 {
641 tmp <<= shift;
642 }
643 else
644 {
645 shift = -shift;
646 tmp >>= shift;
647 }
648 *u = tmp;
649 return 0;
650 }
651
652 /* Convert an unsigned integer into a floating point */
653 STATIC_INLINE_SIM_FPU (int)
654 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
655 {
656 if (u == 0)
657 {
658 f->class = sim_fpu_class_zero;
659 f->sign = 0;
660 f->normal_exp = 0;
661 }
662 else
663 {
664 f->class = sim_fpu_class_number;
665 f->sign = 0;
666 f->normal_exp = NR_FRAC_GUARD;
667 f->fraction = u;
668
669 while (f->fraction < IMPLICIT_1)
670 {
671 f->fraction <<= 1;
672 f->normal_exp -= 1;
673 }
674 }
675 return 0;
676 }
677
678
679 /* register <-> sim_fpu */
680
681 INLINE_SIM_FPU (void)
682 sim_fpu_32to (sim_fpu *f, unsigned32 s)
683 {
684 unpack_fpu (f, s, 0);
685 }
686
687
688 INLINE_SIM_FPU (void)
689 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
690 {
691 unsigned64 s = h;
692 s = (s << 32) | l;
693 unpack_fpu (f, s, 1);
694 }
695
696
697 INLINE_SIM_FPU (void)
698 sim_fpu_64to (sim_fpu *f, unsigned64 s)
699 {
700 unpack_fpu (f, s, 1);
701 }
702
703
704 INLINE_SIM_FPU (void)
705 sim_fpu_to32 (unsigned32 *s,
706 const sim_fpu *f)
707 {
708 *s = pack_fpu (f, 0);
709 }
710
711
712 INLINE_SIM_FPU (void)
713 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
714 const sim_fpu *f)
715 {
716 unsigned64 s = pack_fpu (f, 1);
717 *l = s;
718 *h = (s >> 32);
719 }
720
721
722 INLINE_SIM_FPU (void)
723 sim_fpu_to64 (unsigned64 *u,
724 const sim_fpu *f)
725 {
726 *u = pack_fpu (f, 1);
727 }
728
729
730 INLINE_SIM_FPU (void)
731 sim_fpu_fractionto (sim_fpu *f,
732 int sign,
733 int normal_exp,
734 unsigned64 fraction,
735 int precision)
736 {
737 int shift = (NR_FRAC_GUARD - precision);
738 f->class = sim_fpu_class_number;
739 f->sign = sign;
740 f->normal_exp = normal_exp;
741 /* shift the fraction to where sim-fpu expects it */
742 if (shift >= 0)
743 f->fraction = (fraction << shift);
744 else
745 f->fraction = (fraction >> -shift);
746 f->fraction |= IMPLICIT_1;
747 }
748
749
750 INLINE_SIM_FPU (unsigned64)
751 sim_fpu_tofraction (const sim_fpu *d,
752 int precision)
753 {
754 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
755 int shift = (NR_FRAC_GUARD - precision);
756 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
757 if (shift >= 0)
758 return fraction >> shift;
759 else
760 return fraction << -shift;
761 }
762
763
764 /* Rounding */
765
766 STATIC_INLINE_SIM_FPU (int)
767 do_normal_overflow (sim_fpu *f,
768 int is_double,
769 sim_fpu_round round)
770 {
771 switch (round)
772 {
773 case sim_fpu_round_default:
774 return 0;
775 case sim_fpu_round_near:
776 f->class = sim_fpu_class_infinity;
777 break;
778 case sim_fpu_round_up:
779 if (!f->sign)
780 f->class = sim_fpu_class_infinity;
781 break;
782 case sim_fpu_round_down:
783 if (f->sign)
784 f->class = sim_fpu_class_infinity;
785 break;
786 case sim_fpu_round_zero:
787 break;
788 }
789 f->normal_exp = NORMAL_EXPMAX;
790 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
791 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
792 }
793
794 STATIC_INLINE_SIM_FPU (int)
795 do_normal_underflow (sim_fpu *f,
796 int is_double,
797 sim_fpu_round round)
798 {
799 switch (round)
800 {
801 case sim_fpu_round_default:
802 return 0;
803 case sim_fpu_round_near:
804 f->class = sim_fpu_class_zero;
805 break;
806 case sim_fpu_round_up:
807 if (f->sign)
808 f->class = sim_fpu_class_zero;
809 break;
810 case sim_fpu_round_down:
811 if (!f->sign)
812 f->class = sim_fpu_class_zero;
813 break;
814 case sim_fpu_round_zero:
815 f->class = sim_fpu_class_zero;
816 break;
817 }
818 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
819 f->fraction = IMPLICIT_1;
820 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
821 }
822
823
824
825 /* Round a number using NR_GUARDS.
826 Will return the rounded number or F->FRACTION == 0 when underflow */
827
828 STATIC_INLINE_SIM_FPU (int)
829 do_normal_round (sim_fpu *f,
830 int nr_guards,
831 sim_fpu_round round)
832 {
833 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
834 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
835 unsigned64 fraclsb = guardmsb << 1;
836 if ((f->fraction & guardmask))
837 {
838 int status = sim_fpu_status_inexact;
839 switch (round)
840 {
841 case sim_fpu_round_default:
842 return 0;
843 case sim_fpu_round_near:
844 if ((f->fraction & guardmsb))
845 {
846 if ((f->fraction & fraclsb))
847 {
848 status |= sim_fpu_status_rounded;
849 }
850 else if ((f->fraction & (guardmask >> 1)))
851 {
852 status |= sim_fpu_status_rounded;
853 }
854 }
855 break;
856 case sim_fpu_round_up:
857 if (!f->sign)
858 status |= sim_fpu_status_rounded;
859 break;
860 case sim_fpu_round_down:
861 if (f->sign)
862 status |= sim_fpu_status_rounded;
863 break;
864 case sim_fpu_round_zero:
865 break;
866 }
867 f->fraction &= ~guardmask;
868 /* round if needed, handle resulting overflow */
869 if ((status & sim_fpu_status_rounded))
870 {
871 f->fraction += fraclsb;
872 if ((f->fraction & IMPLICIT_2))
873 {
874 f->fraction >>= 1;
875 f->normal_exp += 1;
876 }
877 }
878 return status;
879 }
880 else
881 return 0;
882 }
883
884
885 STATIC_INLINE_SIM_FPU (int)
886 do_round (sim_fpu *f,
887 int is_double,
888 sim_fpu_round round,
889 sim_fpu_denorm denorm)
890 {
891 switch (f->class)
892 {
893 case sim_fpu_class_qnan:
894 case sim_fpu_class_zero:
895 case sim_fpu_class_infinity:
896 return 0;
897 break;
898 case sim_fpu_class_snan:
899 /* Quieten a SignalingNaN */
900 f->class = sim_fpu_class_qnan;
901 return sim_fpu_status_invalid_snan;
902 break;
903 case sim_fpu_class_number:
904 case sim_fpu_class_denorm:
905 {
906 int status;
907 ASSERT (f->fraction < IMPLICIT_2);
908 ASSERT (f->fraction >= IMPLICIT_1);
909 if (f->normal_exp < NORMAL_EXPMIN)
910 {
911 /* This number's exponent is too low to fit into the bits
912 available in the number. Round off any bits that will be
913 discarded as a result of denormalization. Edge case is
914 the implicit bit shifted to GUARD0 and then rounded
915 up. */
916 int shift = NORMAL_EXPMIN - f->normal_exp;
917 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
918 && !(denorm & sim_fpu_denorm_zero))
919 {
920 status = do_normal_round (f, shift + NR_GUARDS, round);
921 if (f->fraction == 0) /* rounding underflowed */
922 {
923 status |= do_normal_underflow (f, is_double, round);
924 }
925 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
926 {
927 status |= sim_fpu_status_denorm;
928 /* Any loss of precision when denormalizing is
929 underflow. Some processors check for underflow
930 before rounding, some after! */
931 if (status & sim_fpu_status_inexact)
932 status |= sim_fpu_status_underflow;
933 /* Flag that resultant value has been denormalized */
934 f->class = sim_fpu_class_denorm;
935 }
936 else if ((denorm & sim_fpu_denorm_underflow_inexact))
937 {
938 if ((status & sim_fpu_status_inexact))
939 status |= sim_fpu_status_underflow;
940 }
941 }
942 else
943 {
944 status = do_normal_underflow (f, is_double, round);
945 }
946 }
947 else if (f->normal_exp > NORMAL_EXPMAX)
948 {
949 /* Infinity */
950 status = do_normal_overflow (f, is_double, round);
951 }
952 else
953 {
954 status = do_normal_round (f, NR_GUARDS, round);
955 if (f->fraction == 0)
956 /* f->class = sim_fpu_class_zero; */
957 status |= do_normal_underflow (f, is_double, round);
958 else if (f->normal_exp > NORMAL_EXPMAX)
959 /* oops! rounding caused overflow */
960 status |= do_normal_overflow (f, is_double, round);
961 }
962 ASSERT ((f->class == sim_fpu_class_number
963 || f->class == sim_fpu_class_denorm)
964 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
965 return status;
966 }
967 }
968 return 0;
969 }
970
971 INLINE_SIM_FPU (int)
972 sim_fpu_round_32 (sim_fpu *f,
973 sim_fpu_round round,
974 sim_fpu_denorm denorm)
975 {
976 return do_round (f, 0, round, denorm);
977 }
978
979 INLINE_SIM_FPU (int)
980 sim_fpu_round_64 (sim_fpu *f,
981 sim_fpu_round round,
982 sim_fpu_denorm denorm)
983 {
984 return do_round (f, 1, round, denorm);
985 }
986
987
988
989 /* Arithmetic ops */
990
991 INLINE_SIM_FPU (int)
992 sim_fpu_add (sim_fpu *f,
993 const sim_fpu *l,
994 const sim_fpu *r)
995 {
996 if (sim_fpu_is_snan (l))
997 {
998 *f = *l;
999 f->class = sim_fpu_class_qnan;
1000 return sim_fpu_status_invalid_snan;
1001 }
1002 if (sim_fpu_is_snan (r))
1003 {
1004 *f = *r;
1005 f->class = sim_fpu_class_qnan;
1006 return sim_fpu_status_invalid_snan;
1007 }
1008 if (sim_fpu_is_qnan (l))
1009 {
1010 *f = *l;
1011 return 0;
1012 }
1013 if (sim_fpu_is_qnan (r))
1014 {
1015 *f = *r;
1016 return 0;
1017 }
1018 if (sim_fpu_is_infinity (l))
1019 {
1020 if (sim_fpu_is_infinity (r)
1021 && l->sign != r->sign)
1022 {
1023 *f = sim_fpu_qnan;
1024 return sim_fpu_status_invalid_isi;
1025 }
1026 *f = *l;
1027 return 0;
1028 }
1029 if (sim_fpu_is_infinity (r))
1030 {
1031 *f = *r;
1032 return 0;
1033 }
1034 if (sim_fpu_is_zero (l))
1035 {
1036 if (sim_fpu_is_zero (r))
1037 {
1038 *f = sim_fpu_zero;
1039 f->sign = l->sign & r->sign;
1040 }
1041 else
1042 *f = *r;
1043 return 0;
1044 }
1045 if (sim_fpu_is_zero (r))
1046 {
1047 *f = *l;
1048 return 0;
1049 }
1050 {
1051 int status = 0;
1052 int shift = l->normal_exp - r->normal_exp;
1053 unsigned64 lfraction;
1054 unsigned64 rfraction;
1055 /* use exp of larger */
1056 if (shift >= NR_FRAC_GUARD)
1057 {
1058 /* left has much bigger magnitute */
1059 *f = *l;
1060 return sim_fpu_status_inexact;
1061 }
1062 if (shift <= - NR_FRAC_GUARD)
1063 {
1064 /* right has much bigger magnitute */
1065 *f = *r;
1066 return sim_fpu_status_inexact;
1067 }
1068 lfraction = l->fraction;
1069 rfraction = r->fraction;
1070 if (shift > 0)
1071 {
1072 f->normal_exp = l->normal_exp;
1073 if (rfraction & LSMASK64 (shift - 1, 0))
1074 {
1075 status |= sim_fpu_status_inexact;
1076 rfraction |= LSBIT64 (shift); /* stick LSBit */
1077 }
1078 rfraction >>= shift;
1079 }
1080 else if (shift < 0)
1081 {
1082 f->normal_exp = r->normal_exp;
1083 if (lfraction & LSMASK64 (- shift - 1, 0))
1084 {
1085 status |= sim_fpu_status_inexact;
1086 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1087 }
1088 lfraction >>= -shift;
1089 }
1090 else
1091 {
1092 f->normal_exp = r->normal_exp;
1093 }
1094
1095 /* perform the addition */
1096 if (l->sign)
1097 lfraction = - lfraction;
1098 if (r->sign)
1099 rfraction = - rfraction;
1100 f->fraction = lfraction + rfraction;
1101
1102 /* zero? */
1103 if (f->fraction == 0)
1104 {
1105 *f = sim_fpu_zero;
1106 return 0;
1107 }
1108
1109 /* sign? */
1110 f->class = sim_fpu_class_number;
1111 if ((signed64) f->fraction >= 0)
1112 f->sign = 0;
1113 else
1114 {
1115 f->sign = 1;
1116 f->fraction = - f->fraction;
1117 }
1118
1119 /* normalize it */
1120 if ((f->fraction & IMPLICIT_2))
1121 {
1122 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1123 f->normal_exp ++;
1124 }
1125 else if (f->fraction < IMPLICIT_1)
1126 {
1127 do
1128 {
1129 f->fraction <<= 1;
1130 f->normal_exp --;
1131 }
1132 while (f->fraction < IMPLICIT_1);
1133 }
1134 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1135 return status;
1136 }
1137 }
1138
1139
1140 INLINE_SIM_FPU (int)
1141 sim_fpu_sub (sim_fpu *f,
1142 const sim_fpu *l,
1143 const sim_fpu *r)
1144 {
1145 if (sim_fpu_is_snan (l))
1146 {
1147 *f = *l;
1148 f->class = sim_fpu_class_qnan;
1149 return sim_fpu_status_invalid_snan;
1150 }
1151 if (sim_fpu_is_snan (r))
1152 {
1153 *f = *r;
1154 f->class = sim_fpu_class_qnan;
1155 return sim_fpu_status_invalid_snan;
1156 }
1157 if (sim_fpu_is_qnan (l))
1158 {
1159 *f = *l;
1160 return 0;
1161 }
1162 if (sim_fpu_is_qnan (r))
1163 {
1164 *f = *r;
1165 return 0;
1166 }
1167 if (sim_fpu_is_infinity (l))
1168 {
1169 if (sim_fpu_is_infinity (r)
1170 && l->sign == r->sign)
1171 {
1172 *f = sim_fpu_qnan;
1173 return sim_fpu_status_invalid_isi;
1174 }
1175 *f = *l;
1176 return 0;
1177 }
1178 if (sim_fpu_is_infinity (r))
1179 {
1180 *f = *r;
1181 f->sign = !r->sign;
1182 return 0;
1183 }
1184 if (sim_fpu_is_zero (l))
1185 {
1186 if (sim_fpu_is_zero (r))
1187 {
1188 *f = sim_fpu_zero;
1189 f->sign = l->sign & !r->sign;
1190 }
1191 else
1192 {
1193 *f = *r;
1194 f->sign = !r->sign;
1195 }
1196 return 0;
1197 }
1198 if (sim_fpu_is_zero (r))
1199 {
1200 *f = *l;
1201 return 0;
1202 }
1203 {
1204 int status = 0;
1205 int shift = l->normal_exp - r->normal_exp;
1206 unsigned64 lfraction;
1207 unsigned64 rfraction;
1208 /* use exp of larger */
1209 if (shift >= NR_FRAC_GUARD)
1210 {
1211 /* left has much bigger magnitute */
1212 *f = *l;
1213 return sim_fpu_status_inexact;
1214 }
1215 if (shift <= - NR_FRAC_GUARD)
1216 {
1217 /* right has much bigger magnitute */
1218 *f = *r;
1219 f->sign = !r->sign;
1220 return sim_fpu_status_inexact;
1221 }
1222 lfraction = l->fraction;
1223 rfraction = r->fraction;
1224 if (shift > 0)
1225 {
1226 f->normal_exp = l->normal_exp;
1227 if (rfraction & LSMASK64 (shift - 1, 0))
1228 {
1229 status |= sim_fpu_status_inexact;
1230 rfraction |= LSBIT64 (shift); /* stick LSBit */
1231 }
1232 rfraction >>= shift;
1233 }
1234 else if (shift < 0)
1235 {
1236 f->normal_exp = r->normal_exp;
1237 if (lfraction & LSMASK64 (- shift - 1, 0))
1238 {
1239 status |= sim_fpu_status_inexact;
1240 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1241 }
1242 lfraction >>= -shift;
1243 }
1244 else
1245 {
1246 f->normal_exp = r->normal_exp;
1247 }
1248
1249 /* perform the subtraction */
1250 if (l->sign)
1251 lfraction = - lfraction;
1252 if (!r->sign)
1253 rfraction = - rfraction;
1254 f->fraction = lfraction + rfraction;
1255
1256 /* zero? */
1257 if (f->fraction == 0)
1258 {
1259 *f = sim_fpu_zero;
1260 return 0;
1261 }
1262
1263 /* sign? */
1264 f->class = sim_fpu_class_number;
1265 if ((signed64) f->fraction >= 0)
1266 f->sign = 0;
1267 else
1268 {
1269 f->sign = 1;
1270 f->fraction = - f->fraction;
1271 }
1272
1273 /* normalize it */
1274 if ((f->fraction & IMPLICIT_2))
1275 {
1276 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1277 f->normal_exp ++;
1278 }
1279 else if (f->fraction < IMPLICIT_1)
1280 {
1281 do
1282 {
1283 f->fraction <<= 1;
1284 f->normal_exp --;
1285 }
1286 while (f->fraction < IMPLICIT_1);
1287 }
1288 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1289 return status;
1290 }
1291 }
1292
1293
1294 INLINE_SIM_FPU (int)
1295 sim_fpu_mul (sim_fpu *f,
1296 const sim_fpu *l,
1297 const sim_fpu *r)
1298 {
1299 if (sim_fpu_is_snan (l))
1300 {
1301 *f = *l;
1302 f->class = sim_fpu_class_qnan;
1303 return sim_fpu_status_invalid_snan;
1304 }
1305 if (sim_fpu_is_snan (r))
1306 {
1307 *f = *r;
1308 f->class = sim_fpu_class_qnan;
1309 return sim_fpu_status_invalid_snan;
1310 }
1311 if (sim_fpu_is_qnan (l))
1312 {
1313 *f = *l;
1314 return 0;
1315 }
1316 if (sim_fpu_is_qnan (r))
1317 {
1318 *f = *r;
1319 return 0;
1320 }
1321 if (sim_fpu_is_infinity (l))
1322 {
1323 if (sim_fpu_is_zero (r))
1324 {
1325 *f = sim_fpu_qnan;
1326 return sim_fpu_status_invalid_imz;
1327 }
1328 *f = *l;
1329 f->sign = l->sign ^ r->sign;
1330 return 0;
1331 }
1332 if (sim_fpu_is_infinity (r))
1333 {
1334 if (sim_fpu_is_zero (l))
1335 {
1336 *f = sim_fpu_qnan;
1337 return sim_fpu_status_invalid_imz;
1338 }
1339 *f = *r;
1340 f->sign = l->sign ^ r->sign;
1341 return 0;
1342 }
1343 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1344 {
1345 *f = sim_fpu_zero;
1346 f->sign = l->sign ^ r->sign;
1347 return 0;
1348 }
1349 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1350 128 bit number */
1351 {
1352 unsigned64 low;
1353 unsigned64 high;
1354 unsigned64 nl = l->fraction & 0xffffffff;
1355 unsigned64 nh = l->fraction >> 32;
1356 unsigned64 ml = r->fraction & 0xffffffff;
1357 unsigned64 mh = r->fraction >>32;
1358 unsigned64 pp_ll = ml * nl;
1359 unsigned64 pp_hl = mh * nl;
1360 unsigned64 pp_lh = ml * nh;
1361 unsigned64 pp_hh = mh * nh;
1362 unsigned64 res2 = 0;
1363 unsigned64 res0 = 0;
1364 unsigned64 ps_hh__ = pp_hl + pp_lh;
1365 if (ps_hh__ < pp_hl)
1366 res2 += UNSIGNED64 (0x100000000);
1367 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1368 res0 = pp_ll + pp_hl;
1369 if (res0 < pp_ll)
1370 res2++;
1371 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1372 high = res2;
1373 low = res0;
1374
1375 f->normal_exp = l->normal_exp + r->normal_exp;
1376 f->sign = l->sign ^ r->sign;
1377 f->class = sim_fpu_class_number;
1378
1379 /* Input is bounded by [1,2) ; [2^60,2^61)
1380 Output is bounded by [1,4) ; [2^120,2^122) */
1381
1382 /* Adjust the exponent according to where the decimal point ended
1383 up in the high 64 bit word. In the source the decimal point
1384 was at NR_FRAC_GUARD. */
1385 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1386
1387 /* The high word is bounded according to the above. Consequently
1388 it has never overflowed into IMPLICIT_2. */
1389 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1390 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1391 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1392
1393 /* normalize */
1394 do
1395 {
1396 f->normal_exp--;
1397 high <<= 1;
1398 if (low & LSBIT64 (63))
1399 high |= 1;
1400 low <<= 1;
1401 }
1402 while (high < IMPLICIT_1);
1403
1404 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1405 if (low != 0)
1406 {
1407 f->fraction = (high | 1); /* sticky */
1408 return sim_fpu_status_inexact;
1409 }
1410 else
1411 {
1412 f->fraction = high;
1413 return 0;
1414 }
1415 return 0;
1416 }
1417 }
1418
1419 INLINE_SIM_FPU (int)
1420 sim_fpu_div (sim_fpu *f,
1421 const sim_fpu *l,
1422 const sim_fpu *r)
1423 {
1424 if (sim_fpu_is_snan (l))
1425 {
1426 *f = *l;
1427 f->class = sim_fpu_class_qnan;
1428 return sim_fpu_status_invalid_snan;
1429 }
1430 if (sim_fpu_is_snan (r))
1431 {
1432 *f = *r;
1433 f->class = sim_fpu_class_qnan;
1434 return sim_fpu_status_invalid_snan;
1435 }
1436 if (sim_fpu_is_qnan (l))
1437 {
1438 *f = *l;
1439 f->class = sim_fpu_class_qnan;
1440 return 0;
1441 }
1442 if (sim_fpu_is_qnan (r))
1443 {
1444 *f = *r;
1445 f->class = sim_fpu_class_qnan;
1446 return 0;
1447 }
1448 if (sim_fpu_is_infinity (l))
1449 {
1450 if (sim_fpu_is_infinity (r))
1451 {
1452 *f = sim_fpu_qnan;
1453 return sim_fpu_status_invalid_idi;
1454 }
1455 else
1456 {
1457 *f = *l;
1458 f->sign = l->sign ^ r->sign;
1459 return 0;
1460 }
1461 }
1462 if (sim_fpu_is_zero (l))
1463 {
1464 if (sim_fpu_is_zero (r))
1465 {
1466 *f = sim_fpu_qnan;
1467 return sim_fpu_status_invalid_zdz;
1468 }
1469 else
1470 {
1471 *f = *l;
1472 f->sign = l->sign ^ r->sign;
1473 return 0;
1474 }
1475 }
1476 if (sim_fpu_is_infinity (r))
1477 {
1478 *f = sim_fpu_zero;
1479 f->sign = l->sign ^ r->sign;
1480 return 0;
1481 }
1482 if (sim_fpu_is_zero (r))
1483 {
1484 f->class = sim_fpu_class_infinity;
1485 f->sign = l->sign ^ r->sign;
1486 return sim_fpu_status_invalid_div0;
1487 }
1488
1489 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1490 128 bit number */
1491 {
1492 /* quotient = ( ( numerator / denominator)
1493 x 2^(numerator exponent - denominator exponent)
1494 */
1495 unsigned64 numerator;
1496 unsigned64 denominator;
1497 unsigned64 quotient;
1498 unsigned64 bit;
1499
1500 f->class = sim_fpu_class_number;
1501 f->sign = l->sign ^ r->sign;
1502 f->normal_exp = l->normal_exp - r->normal_exp;
1503
1504 numerator = l->fraction;
1505 denominator = r->fraction;
1506
1507 /* Fraction will be less than 1.0 */
1508 if (numerator < denominator)
1509 {
1510 numerator <<= 1;
1511 f->normal_exp--;
1512 }
1513 ASSERT (numerator >= denominator);
1514
1515 /* Gain extra precision, already used one spare bit */
1516 numerator <<= NR_SPARE;
1517 denominator <<= NR_SPARE;
1518
1519 /* Does divide one bit at a time. Optimize??? */
1520 quotient = 0;
1521 bit = (IMPLICIT_1 << NR_SPARE);
1522 while (bit)
1523 {
1524 if (numerator >= denominator)
1525 {
1526 quotient |= bit;
1527 numerator -= denominator;
1528 }
1529 bit >>= 1;
1530 numerator <<= 1;
1531 }
1532
1533 /* discard (but save) the extra bits */
1534 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1535 quotient = (quotient >> NR_SPARE) | 1;
1536 else
1537 quotient = (quotient >> NR_SPARE);
1538
1539 f->fraction = quotient;
1540 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1541 if (numerator != 0)
1542 {
1543 f->fraction |= 1; /* stick remaining bits */
1544 return sim_fpu_status_inexact;
1545 }
1546 else
1547 return 0;
1548 }
1549 }
1550
1551
1552 INLINE_SIM_FPU (int)
1553 sim_fpu_max (sim_fpu *f,
1554 const sim_fpu *l,
1555 const sim_fpu *r)
1556 {
1557 if (sim_fpu_is_snan (l))
1558 {
1559 *f = *l;
1560 f->class = sim_fpu_class_qnan;
1561 return sim_fpu_status_invalid_snan;
1562 }
1563 if (sim_fpu_is_snan (r))
1564 {
1565 *f = *r;
1566 f->class = sim_fpu_class_qnan;
1567 return sim_fpu_status_invalid_snan;
1568 }
1569 if (sim_fpu_is_qnan (l))
1570 {
1571 *f = *l;
1572 return 0;
1573 }
1574 if (sim_fpu_is_qnan (r))
1575 {
1576 *f = *r;
1577 return 0;
1578 }
1579 if (sim_fpu_is_infinity (l))
1580 {
1581 if (sim_fpu_is_infinity (r)
1582 && l->sign == r->sign)
1583 {
1584 *f = sim_fpu_qnan;
1585 return sim_fpu_status_invalid_isi;
1586 }
1587 if (l->sign)
1588 *f = *r; /* -inf < anything */
1589 else
1590 *f = *l; /* +inf > anthing */
1591 return 0;
1592 }
1593 if (sim_fpu_is_infinity (r))
1594 {
1595 if (r->sign)
1596 *f = *l; /* anything > -inf */
1597 else
1598 *f = *r; /* anthing < +inf */
1599 return 0;
1600 }
1601 if (l->sign > r->sign)
1602 {
1603 *f = *r; /* -ve < +ve */
1604 return 0;
1605 }
1606 if (l->sign < r->sign)
1607 {
1608 *f = *l; /* +ve > -ve */
1609 return 0;
1610 }
1611 ASSERT (l->sign == r->sign);
1612 if (l->normal_exp > r->normal_exp
1613 || (l->normal_exp == r->normal_exp &&
1614 l->fraction > r->fraction))
1615 {
1616 /* |l| > |r| */
1617 if (l->sign)
1618 *f = *r; /* -ve < -ve */
1619 else
1620 *f = *l; /* +ve > +ve */
1621 return 0;
1622 }
1623 else
1624 {
1625 /* |l| <= |r| */
1626 if (l->sign)
1627 *f = *l; /* -ve > -ve */
1628 else
1629 *f = *r; /* +ve < +ve */
1630 return 0;
1631 }
1632 }
1633
1634
1635 INLINE_SIM_FPU (int)
1636 sim_fpu_min (sim_fpu *f,
1637 const sim_fpu *l,
1638 const sim_fpu *r)
1639 {
1640 if (sim_fpu_is_snan (l))
1641 {
1642 *f = *l;
1643 f->class = sim_fpu_class_qnan;
1644 return sim_fpu_status_invalid_snan;
1645 }
1646 if (sim_fpu_is_snan (r))
1647 {
1648 *f = *r;
1649 f->class = sim_fpu_class_qnan;
1650 return sim_fpu_status_invalid_snan;
1651 }
1652 if (sim_fpu_is_qnan (l))
1653 {
1654 *f = *l;
1655 return 0;
1656 }
1657 if (sim_fpu_is_qnan (r))
1658 {
1659 *f = *r;
1660 return 0;
1661 }
1662 if (sim_fpu_is_infinity (l))
1663 {
1664 if (sim_fpu_is_infinity (r)
1665 && l->sign == r->sign)
1666 {
1667 *f = sim_fpu_qnan;
1668 return sim_fpu_status_invalid_isi;
1669 }
1670 if (l->sign)
1671 *f = *l; /* -inf < anything */
1672 else
1673 *f = *r; /* +inf > anthing */
1674 return 0;
1675 }
1676 if (sim_fpu_is_infinity (r))
1677 {
1678 if (r->sign)
1679 *f = *r; /* anything > -inf */
1680 else
1681 *f = *l; /* anything < +inf */
1682 return 0;
1683 }
1684 if (l->sign > r->sign)
1685 {
1686 *f = *l; /* -ve < +ve */
1687 return 0;
1688 }
1689 if (l->sign < r->sign)
1690 {
1691 *f = *r; /* +ve > -ve */
1692 return 0;
1693 }
1694 ASSERT (l->sign == r->sign);
1695 if (l->normal_exp > r->normal_exp
1696 || (l->normal_exp == r->normal_exp &&
1697 l->fraction > r->fraction))
1698 {
1699 /* |l| > |r| */
1700 if (l->sign)
1701 *f = *l; /* -ve < -ve */
1702 else
1703 *f = *r; /* +ve > +ve */
1704 return 0;
1705 }
1706 else
1707 {
1708 /* |l| <= |r| */
1709 if (l->sign)
1710 *f = *r; /* -ve > -ve */
1711 else
1712 *f = *l; /* +ve < +ve */
1713 return 0;
1714 }
1715 }
1716
1717
1718 INLINE_SIM_FPU (int)
1719 sim_fpu_neg (sim_fpu *f,
1720 const sim_fpu *r)
1721 {
1722 if (sim_fpu_is_snan (r))
1723 {
1724 *f = *r;
1725 f->class = sim_fpu_class_qnan;
1726 return sim_fpu_status_invalid_snan;
1727 }
1728 if (sim_fpu_is_qnan (r))
1729 {
1730 *f = *r;
1731 return 0;
1732 }
1733 *f = *r;
1734 f->sign = !r->sign;
1735 return 0;
1736 }
1737
1738
1739 INLINE_SIM_FPU (int)
1740 sim_fpu_abs (sim_fpu *f,
1741 const sim_fpu *r)
1742 {
1743 *f = *r;
1744 f->sign = 0;
1745 if (sim_fpu_is_snan (r))
1746 {
1747 f->class = sim_fpu_class_qnan;
1748 return sim_fpu_status_invalid_snan;
1749 }
1750 return 0;
1751 }
1752
1753
1754 INLINE_SIM_FPU (int)
1755 sim_fpu_inv (sim_fpu *f,
1756 const sim_fpu *r)
1757 {
1758 return sim_fpu_div (f, &sim_fpu_one, r);
1759 }
1760
1761
1762 INLINE_SIM_FPU (int)
1763 sim_fpu_sqrt (sim_fpu *f,
1764 const sim_fpu *r)
1765 {
1766 if (sim_fpu_is_snan (r))
1767 {
1768 *f = sim_fpu_qnan;
1769 return sim_fpu_status_invalid_snan;
1770 }
1771 if (sim_fpu_is_qnan (r))
1772 {
1773 *f = sim_fpu_qnan;
1774 return 0;
1775 }
1776 if (sim_fpu_is_zero (r))
1777 {
1778 f->class = sim_fpu_class_zero;
1779 f->sign = r->sign;
1780 f->normal_exp = 0;
1781 return 0;
1782 }
1783 if (sim_fpu_is_infinity (r))
1784 {
1785 if (r->sign)
1786 {
1787 *f = sim_fpu_qnan;
1788 return sim_fpu_status_invalid_sqrt;
1789 }
1790 else
1791 {
1792 f->class = sim_fpu_class_infinity;
1793 f->sign = 0;
1794 f->sign = 0;
1795 return 0;
1796 }
1797 }
1798 if (r->sign)
1799 {
1800 *f = sim_fpu_qnan;
1801 return sim_fpu_status_invalid_sqrt;
1802 }
1803
1804 /* @(#)e_sqrt.c 5.1 93/09/24 */
1805 /*
1806 * ====================================================
1807 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1808 *
1809 * Developed at SunPro, a Sun Microsystems, Inc. business.
1810 * Permission to use, copy, modify, and distribute this
1811 * software is freely granted, provided that this notice
1812 * is preserved.
1813 * ====================================================
1814 */
1815
1816 /* __ieee754_sqrt(x)
1817 * Return correctly rounded sqrt.
1818 * ------------------------------------------
1819 * | Use the hardware sqrt if you have one |
1820 * ------------------------------------------
1821 * Method:
1822 * Bit by bit method using integer arithmetic. (Slow, but portable)
1823 * 1. Normalization
1824 * Scale x to y in [1,4) with even powers of 2:
1825 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1826 * sqrt(x) = 2^k * sqrt(y)
1827 -
1828 - Since:
1829 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1830 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1831 - Define:
1832 - y = ((m even) ? x : 2.x)
1833 - Then:
1834 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1835 - And:
1836 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1837 -
1838 * 2. Bit by bit computation
1839 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1840 * i 0
1841 * i+1 2
1842 * s = 2*q , and y = 2 * ( y - q ). (1)
1843 * i i i i
1844 *
1845 * To compute q from q , one checks whether
1846 * i+1 i
1847 *
1848 * -(i+1) 2
1849 * (q + 2 ) <= y. (2)
1850 * i
1851 * -(i+1)
1852 * If (2) is false, then q = q ; otherwise q = q + 2 .
1853 * i+1 i i+1 i
1854 *
1855 * With some algebric manipulation, it is not difficult to see
1856 * that (2) is equivalent to
1857 * -(i+1)
1858 * s + 2 <= y (3)
1859 * i i
1860 *
1861 * The advantage of (3) is that s and y can be computed by
1862 * i i
1863 * the following recurrence formula:
1864 * if (3) is false
1865 *
1866 * s = s , y = y ; (4)
1867 * i+1 i i+1 i
1868 *
1869 -
1870 - NOTE: y = 2*y
1871 - i+1 i
1872 -
1873 * otherwise,
1874 * -i -(i+1)
1875 * s = s + 2 , y = y - s - 2 (5)
1876 * i+1 i i+1 i i
1877 *
1878 -
1879 - -(i+1)
1880 - NOTE: y = 2 (y - s - 2 )
1881 - i+1 i i
1882 -
1883 * One may easily use induction to prove (4) and (5).
1884 * Note. Since the left hand side of (3) contain only i+2 bits,
1885 * it does not necessary to do a full (53-bit) comparison
1886 * in (3).
1887 * 3. Final rounding
1888 * After generating the 53 bits result, we compute one more bit.
1889 * Together with the remainder, we can decide whether the
1890 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1891 * (it will never equal to 1/2ulp).
1892 * The rounding mode can be detected by checking whether
1893 * huge + tiny is equal to huge, and whether huge - tiny is
1894 * equal to huge for some floating point number "huge" and "tiny".
1895 *
1896 * Special cases:
1897 * sqrt(+-0) = +-0 ... exact
1898 * sqrt(inf) = inf
1899 * sqrt(-ve) = NaN ... with invalid signal
1900 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1901 *
1902 * Other methods : see the appended file at the end of the program below.
1903 *---------------
1904 */
1905
1906 {
1907 /* generate sqrt(x) bit by bit */
1908 unsigned64 y;
1909 unsigned64 q;
1910 unsigned64 s;
1911 unsigned64 b;
1912
1913 f->class = sim_fpu_class_number;
1914 f->sign = 0;
1915 y = r->fraction;
1916 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
1917
1918 /* odd exp, double x to make it even */
1919 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
1920 if ((r->normal_exp & 1))
1921 {
1922 y += y;
1923 }
1924 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
1925
1926 /* Let loop determine first value of s (either 1 or 2) */
1927 b = IMPLICIT_1;
1928 q = 0;
1929 s = 0;
1930
1931 while (b)
1932 {
1933 unsigned64 t = s + b;
1934 if (t <= y)
1935 {
1936 s |= (b << 1);
1937 y -= t;
1938 q |= b;
1939 }
1940 y <<= 1;
1941 b >>= 1;
1942 }
1943
1944 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
1945 f->fraction = q;
1946 if (y != 0)
1947 {
1948 f->fraction |= 1; /* stick remaining bits */
1949 return sim_fpu_status_inexact;
1950 }
1951 else
1952 return 0;
1953 }
1954 }
1955
1956
1957 /* int/long <-> sim_fpu */
1958
1959 INLINE_SIM_FPU (int)
1960 sim_fpu_i32to (sim_fpu *f,
1961 signed32 i,
1962 sim_fpu_round round)
1963 {
1964 i2fpu (f, i, 0);
1965 return 0;
1966 }
1967
1968 INLINE_SIM_FPU (int)
1969 sim_fpu_u32to (sim_fpu *f,
1970 unsigned32 u,
1971 sim_fpu_round round)
1972 {
1973 u2fpu (f, u, 0);
1974 return 0;
1975 }
1976
1977 INLINE_SIM_FPU (int)
1978 sim_fpu_i64to (sim_fpu *f,
1979 signed64 i,
1980 sim_fpu_round round)
1981 {
1982 i2fpu (f, i, 1);
1983 return 0;
1984 }
1985
1986 INLINE_SIM_FPU (int)
1987 sim_fpu_u64to (sim_fpu *f,
1988 unsigned64 u,
1989 sim_fpu_round round)
1990 {
1991 u2fpu (f, u, 1);
1992 return 0;
1993 }
1994
1995
1996 INLINE_SIM_FPU (int)
1997 sim_fpu_to32i (signed32 *i,
1998 const sim_fpu *f,
1999 sim_fpu_round round)
2000 {
2001 signed64 i64;
2002 int status = fpu2i (&i64, f, 0, round);
2003 *i = i64;
2004 return status;
2005 }
2006
2007 INLINE_SIM_FPU (int)
2008 sim_fpu_to32u (unsigned32 *u,
2009 const sim_fpu *f,
2010 sim_fpu_round round)
2011 {
2012 unsigned64 u64;
2013 int status = fpu2u (&u64, f, 0);
2014 *u = u64;
2015 return status;
2016 }
2017
2018 INLINE_SIM_FPU (int)
2019 sim_fpu_to64i (signed64 *i,
2020 const sim_fpu *f,
2021 sim_fpu_round round)
2022 {
2023 return fpu2i (i, f, 1, round);
2024 }
2025
2026
2027 INLINE_SIM_FPU (int)
2028 sim_fpu_to64u (unsigned64 *u,
2029 const sim_fpu *f,
2030 sim_fpu_round round)
2031 {
2032 return fpu2u (u, f, 1);
2033 }
2034
2035
2036
2037 /* sim_fpu -> host format */
2038
2039 #if 0
2040 INLINE_SIM_FPU (float)
2041 sim_fpu_2f (const sim_fpu *f)
2042 {
2043 return fval.d;
2044 }
2045 #endif
2046
2047
2048 INLINE_SIM_FPU (double)
2049 sim_fpu_2d (const sim_fpu *s)
2050 {
2051 sim_fpu_map val;
2052 if (sim_fpu_is_snan (s))
2053 {
2054 /* gag SNaN's */
2055 sim_fpu n = *s;
2056 n.class = sim_fpu_class_qnan;
2057 val.i = pack_fpu (&n, 1);
2058 }
2059 else
2060 {
2061 val.i = pack_fpu (s, 1);
2062 }
2063 return val.d;
2064 }
2065
2066
2067 #if 0
2068 INLINE_SIM_FPU (void)
2069 sim_fpu_f2 (sim_fpu *f,
2070 float s)
2071 {
2072 sim_fpu_map val;
2073 val.d = s;
2074 unpack_fpu (f, val.i, 1);
2075 }
2076 #endif
2077
2078
2079 INLINE_SIM_FPU (void)
2080 sim_fpu_d2 (sim_fpu *f,
2081 double d)
2082 {
2083 sim_fpu_map val;
2084 val.d = d;
2085 unpack_fpu (f, val.i, 1);
2086 }
2087
2088
2089 /* General */
2090
2091 INLINE_SIM_FPU (int)
2092 sim_fpu_is_nan (const sim_fpu *d)
2093 {
2094 switch (d->class)
2095 {
2096 case sim_fpu_class_qnan:
2097 case sim_fpu_class_snan:
2098 return 1;
2099 default:
2100 return 0;
2101 }
2102 }
2103
2104 INLINE_SIM_FPU (int)
2105 sim_fpu_is_qnan (const sim_fpu *d)
2106 {
2107 switch (d->class)
2108 {
2109 case sim_fpu_class_qnan:
2110 return 1;
2111 default:
2112 return 0;
2113 }
2114 }
2115
2116 INLINE_SIM_FPU (int)
2117 sim_fpu_is_snan (const sim_fpu *d)
2118 {
2119 switch (d->class)
2120 {
2121 case sim_fpu_class_snan:
2122 return 1;
2123 default:
2124 return 0;
2125 }
2126 }
2127
2128 INLINE_SIM_FPU (int)
2129 sim_fpu_is_zero (const sim_fpu *d)
2130 {
2131 switch (d->class)
2132 {
2133 case sim_fpu_class_zero:
2134 return 1;
2135 default:
2136 return 0;
2137 }
2138 }
2139
2140 INLINE_SIM_FPU (int)
2141 sim_fpu_is_infinity (const sim_fpu *d)
2142 {
2143 switch (d->class)
2144 {
2145 case sim_fpu_class_infinity:
2146 return 1;
2147 default:
2148 return 0;
2149 }
2150 }
2151
2152 INLINE_SIM_FPU (int)
2153 sim_fpu_is_number (const sim_fpu *d)
2154 {
2155 switch (d->class)
2156 {
2157 case sim_fpu_class_denorm:
2158 case sim_fpu_class_number:
2159 return 1;
2160 default:
2161 return 0;
2162 }
2163 }
2164
2165 INLINE_SIM_FPU (int)
2166 sim_fpu_is_denorm (const sim_fpu *d)
2167 {
2168 switch (d->class)
2169 {
2170 case sim_fpu_class_denorm:
2171 return 1;
2172 default:
2173 return 0;
2174 }
2175 }
2176
2177
2178 INLINE_SIM_FPU (int)
2179 sim_fpu_sign (const sim_fpu *d)
2180 {
2181 return d->sign;
2182 }
2183
2184
2185 INLINE_SIM_FPU (int)
2186 sim_fpu_exp (const sim_fpu *d)
2187 {
2188 return d->normal_exp;
2189 }
2190
2191
2192 INLINE_SIM_FPU (unsigned64)
2193 sim_fpu_fraction (const sim_fpu *d)
2194 {
2195 return d->fraction;
2196 }
2197
2198
2199 INLINE_SIM_FPU (unsigned64)
2200 sim_fpu_guard (const sim_fpu *d, int is_double)
2201 {
2202 unsigned64 rv;
2203 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2204 rv = (d->fraction & guardmask) >> NR_PAD;
2205 return rv;
2206 }
2207
2208
2209 INLINE_SIM_FPU (int)
2210 sim_fpu_is (const sim_fpu *d)
2211 {
2212 switch (d->class)
2213 {
2214 case sim_fpu_class_qnan:
2215 return SIM_FPU_IS_QNAN;
2216 case sim_fpu_class_snan:
2217 return SIM_FPU_IS_SNAN;
2218 case sim_fpu_class_infinity:
2219 if (d->sign)
2220 return SIM_FPU_IS_NINF;
2221 else
2222 return SIM_FPU_IS_PINF;
2223 case sim_fpu_class_number:
2224 if (d->sign)
2225 return SIM_FPU_IS_NNUMBER;
2226 else
2227 return SIM_FPU_IS_PNUMBER;
2228 case sim_fpu_class_denorm:
2229 if (d->sign)
2230 return SIM_FPU_IS_NDENORM;
2231 else
2232 return SIM_FPU_IS_PDENORM;
2233 case sim_fpu_class_zero:
2234 if (d->sign)
2235 return SIM_FPU_IS_NZERO;
2236 else
2237 return SIM_FPU_IS_PZERO;
2238 default:
2239 return -1;
2240 abort ();
2241 }
2242 }
2243
2244 INLINE_SIM_FPU (int)
2245 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2246 {
2247 sim_fpu res;
2248 sim_fpu_sub (&res, l, r);
2249 return sim_fpu_is (&res);
2250 }
2251
2252 INLINE_SIM_FPU (int)
2253 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2254 {
2255 int status;
2256 sim_fpu_lt (&status, l, r);
2257 return status;
2258 }
2259
2260 INLINE_SIM_FPU (int)
2261 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2262 {
2263 int is;
2264 sim_fpu_le (&is, l, r);
2265 return is;
2266 }
2267
2268 INLINE_SIM_FPU (int)
2269 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2270 {
2271 int is;
2272 sim_fpu_eq (&is, l, r);
2273 return is;
2274 }
2275
2276 INLINE_SIM_FPU (int)
2277 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2278 {
2279 int is;
2280 sim_fpu_ne (&is, l, r);
2281 return is;
2282 }
2283
2284 INLINE_SIM_FPU (int)
2285 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2286 {
2287 int is;
2288 sim_fpu_ge (&is, l, r);
2289 return is;
2290 }
2291
2292 INLINE_SIM_FPU (int)
2293 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2294 {
2295 int is;
2296 sim_fpu_gt (&is, l, r);
2297 return is;
2298 }
2299
2300
2301 /* Compare operators */
2302
2303 INLINE_SIM_FPU (int)
2304 sim_fpu_lt (int *is,
2305 const sim_fpu *l,
2306 const sim_fpu *r)
2307 {
2308 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2309 {
2310 sim_fpu_map lval;
2311 sim_fpu_map rval;
2312 lval.i = pack_fpu (l, 1);
2313 rval.i = pack_fpu (r, 1);
2314 (*is) = (lval.d < rval.d);
2315 return 0;
2316 }
2317 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2318 {
2319 *is = 0;
2320 return sim_fpu_status_invalid_snan;
2321 }
2322 else
2323 {
2324 *is = 0;
2325 return sim_fpu_status_invalid_qnan;
2326 }
2327 }
2328
2329 INLINE_SIM_FPU (int)
2330 sim_fpu_le (int *is,
2331 const sim_fpu *l,
2332 const sim_fpu *r)
2333 {
2334 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2335 {
2336 sim_fpu_map lval;
2337 sim_fpu_map rval;
2338 lval.i = pack_fpu (l, 1);
2339 rval.i = pack_fpu (r, 1);
2340 *is = (lval.d <= rval.d);
2341 return 0;
2342 }
2343 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2344 {
2345 *is = 0;
2346 return sim_fpu_status_invalid_snan;
2347 }
2348 else
2349 {
2350 *is = 0;
2351 return sim_fpu_status_invalid_qnan;
2352 }
2353 }
2354
2355 INLINE_SIM_FPU (int)
2356 sim_fpu_eq (int *is,
2357 const sim_fpu *l,
2358 const sim_fpu *r)
2359 {
2360 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2361 {
2362 sim_fpu_map lval;
2363 sim_fpu_map rval;
2364 lval.i = pack_fpu (l, 1);
2365 rval.i = pack_fpu (r, 1);
2366 (*is) = (lval.d == rval.d);
2367 return 0;
2368 }
2369 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2370 {
2371 *is = 0;
2372 return sim_fpu_status_invalid_snan;
2373 }
2374 else
2375 {
2376 *is = 0;
2377 return sim_fpu_status_invalid_qnan;
2378 }
2379 }
2380
2381 INLINE_SIM_FPU (int)
2382 sim_fpu_ne (int *is,
2383 const sim_fpu *l,
2384 const sim_fpu *r)
2385 {
2386 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2387 {
2388 sim_fpu_map lval;
2389 sim_fpu_map rval;
2390 lval.i = pack_fpu (l, 1);
2391 rval.i = pack_fpu (r, 1);
2392 (*is) = (lval.d != rval.d);
2393 return 0;
2394 }
2395 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2396 {
2397 *is = 0;
2398 return sim_fpu_status_invalid_snan;
2399 }
2400 else
2401 {
2402 *is = 0;
2403 return sim_fpu_status_invalid_qnan;
2404 }
2405 }
2406
2407 INLINE_SIM_FPU (int)
2408 sim_fpu_ge (int *is,
2409 const sim_fpu *l,
2410 const sim_fpu *r)
2411 {
2412 return sim_fpu_le (is, r, l);
2413 }
2414
2415 INLINE_SIM_FPU (int)
2416 sim_fpu_gt (int *is,
2417 const sim_fpu *l,
2418 const sim_fpu *r)
2419 {
2420 return sim_fpu_lt (is, r, l);
2421 }
2422
2423
2424 /* A number of useful constants */
2425
2426 #if EXTERN_SIM_FPU_P
2427 const sim_fpu sim_fpu_zero = {
2428 sim_fpu_class_zero,
2429 };
2430 const sim_fpu sim_fpu_qnan = {
2431 sim_fpu_class_qnan,
2432 };
2433 const sim_fpu sim_fpu_one = {
2434 sim_fpu_class_number, 0, IMPLICIT_1, 0
2435 };
2436 const sim_fpu sim_fpu_two = {
2437 sim_fpu_class_number, 0, IMPLICIT_1, 1
2438 };
2439 const sim_fpu sim_fpu_max32 = {
2440 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2441 };
2442 const sim_fpu sim_fpu_max64 = {
2443 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2444 };
2445 #endif
2446
2447
2448 /* For debugging */
2449
2450 INLINE_SIM_FPU (void)
2451 sim_fpu_print_fpu (const sim_fpu *f,
2452 sim_fpu_print_func *print,
2453 void *arg)
2454 {
2455 sim_fpu_printn_fpu (f, print, -1, arg);
2456 }
2457
2458 INLINE_SIM_FPU (void)
2459 sim_fpu_printn_fpu (const sim_fpu *f,
2460 sim_fpu_print_func *print,
2461 int digits,
2462 void *arg)
2463 {
2464 print (arg, "%s", f->sign ? "-" : "+");
2465 switch (f->class)
2466 {
2467 case sim_fpu_class_qnan:
2468 print (arg, "0.");
2469 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2470 print (arg, "*QuietNaN");
2471 break;
2472 case sim_fpu_class_snan:
2473 print (arg, "0.");
2474 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2475 print (arg, "*SignalNaN");
2476 break;
2477 case sim_fpu_class_zero:
2478 print (arg, "0.0");
2479 break;
2480 case sim_fpu_class_infinity:
2481 print (arg, "INF");
2482 break;
2483 case sim_fpu_class_number:
2484 case sim_fpu_class_denorm:
2485 print (arg, "1.");
2486 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2487 print (arg, "*2^%+d", f->normal_exp);
2488 ASSERT (f->fraction >= IMPLICIT_1);
2489 ASSERT (f->fraction < IMPLICIT_2);
2490 }
2491 }
2492
2493
2494 INLINE_SIM_FPU (void)
2495 sim_fpu_print_status (int status,
2496 sim_fpu_print_func *print,
2497 void *arg)
2498 {
2499 int i = 1;
2500 char *prefix = "";
2501 while (status >= i)
2502 {
2503 switch ((sim_fpu_status) (status & i))
2504 {
2505 case sim_fpu_status_denorm:
2506 print (arg, "%sD", prefix);
2507 break;
2508 case sim_fpu_status_invalid_snan:
2509 print (arg, "%sSNaN", prefix);
2510 break;
2511 case sim_fpu_status_invalid_qnan:
2512 print (arg, "%sQNaN", prefix);
2513 break;
2514 case sim_fpu_status_invalid_isi:
2515 print (arg, "%sISI", prefix);
2516 break;
2517 case sim_fpu_status_invalid_idi:
2518 print (arg, "%sIDI", prefix);
2519 break;
2520 case sim_fpu_status_invalid_zdz:
2521 print (arg, "%sZDZ", prefix);
2522 break;
2523 case sim_fpu_status_invalid_imz:
2524 print (arg, "%sIMZ", prefix);
2525 break;
2526 case sim_fpu_status_invalid_cvi:
2527 print (arg, "%sCVI", prefix);
2528 break;
2529 case sim_fpu_status_invalid_cmp:
2530 print (arg, "%sCMP", prefix);
2531 break;
2532 case sim_fpu_status_invalid_sqrt:
2533 print (arg, "%sSQRT", prefix);
2534 break;
2535 break;
2536 case sim_fpu_status_inexact:
2537 print (arg, "%sX", prefix);
2538 break;
2539 break;
2540 case sim_fpu_status_overflow:
2541 print (arg, "%sO", prefix);
2542 break;
2543 break;
2544 case sim_fpu_status_underflow:
2545 print (arg, "%sU", prefix);
2546 break;
2547 break;
2548 case sim_fpu_status_invalid_div0:
2549 print (arg, "%s/", prefix);
2550 break;
2551 break;
2552 case sim_fpu_status_rounded:
2553 print (arg, "%sR", prefix);
2554 break;
2555 break;
2556 }
2557 i <<= 1;
2558 prefix = ",";
2559 }
2560 }
2561
2562 #endif