]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/common/sim-fpu.c
Update copyright year range in all GDB files
[thirdparty/binutils-gdb.git] / sim / common / sim-fpu.c
1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
4
5 /* Copyright 1994-2021 Free Software Foundation, Inc.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* As a special exception, if you link this library with other files,
21 some of which are compiled with GCC, to produce an executable,
22 this library does not by itself cause the resulting executable
23 to be covered by the GNU General Public License.
24 This exception does not however invalidate any other reasons why
25 the executable file might be covered by the GNU General Public License. */
26
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28 mechanism for setting the rounding mode, or for generating or handling
29 exceptions.
30
31 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32 Wilson, all of Cygnus Support. */
33
34
35 #ifndef SIM_FPU_C
36 #define SIM_FPU_C
37
38 #include "sim-basics.h"
39 #include "sim-fpu.h"
40
41 #include "sim-io.h"
42 #include "sim-assert.h"
43
44 #ifdef HAVE_STDLIB_H
45 #include <stdlib.h>
46 #endif
47
48 /* Debugging support.
49 If digits is -1, then print all digits. */
50
51 static void
52 print_bits (unsigned64 x,
53 int msbit,
54 int digits,
55 sim_fpu_print_func print,
56 void *arg)
57 {
58 unsigned64 bit = LSBIT64 (msbit);
59 int i = 4;
60 while (bit && digits)
61 {
62 if (i == 0)
63 print (arg, ",");
64
65 if ((x & bit))
66 print (arg, "1");
67 else
68 print (arg, "0");
69 bit >>= 1;
70
71 if (digits > 0)
72 digits--;
73 i = (i + 1) % 4;
74 }
75 }
76
77
78
79 /* Quick and dirty conversion between a host double and host 64bit int. */
80
81 typedef union
82 {
83 double d;
84 unsigned64 i;
85 } sim_fpu_map;
86
87
88 /* A packed IEEE floating point number.
89
90 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
91 32 and 64 bit numbers. This number is interpreted as:
92
93 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
94 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
95
96 Denormalized (0 == BIASEDEXP && FRAC != 0):
97 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
98
99 Zero (0 == BIASEDEXP && FRAC == 0):
100 (sign ? "-" : "+") 0.0
101
102 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
103 (sign ? "-" : "+") "infinity"
104
105 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
106 SNaN.FRAC
107
108 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
109 QNaN.FRAC
110
111 */
112
113 #define NR_EXPBITS (is_double ? 11 : 8)
114 #define NR_FRACBITS (is_double ? 52 : 23)
115 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
116
117 #define EXPMAX32 (255)
118 #define EXMPAX64 (2047)
119 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
120
121 #define EXPBIAS32 (127)
122 #define EXPBIAS64 (1023)
123 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
124
125 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
126
127
128
129 /* An unpacked floating point number.
130
131 When unpacked, the fraction of both a 32 and 64 bit floating point
132 number is stored using the same format:
133
134 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
135 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
136
137 #define NR_PAD32 (30)
138 #define NR_PAD64 (0)
139 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
140 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
141
142 #define NR_GUARDS32 (7 + NR_PAD32)
143 #define NR_GUARDS64 (8 + NR_PAD64)
144 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
145 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
146
147 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
148 #define GUARDLSB LSBIT64 (NR_PAD)
149 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
150
151 #define NR_FRAC_GUARD (60)
152 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
153 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
154 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
155 #define NR_SPARE 2
156
157 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
158
159 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
160
161 #define NORMAL_EXPMAX32 (EXPBIAS32)
162 #define NORMAL_EXPMAX64 (EXPBIAS64)
163 #define NORMAL_EXPMAX (EXPBIAS)
164
165
166 /* Integer constants */
167
168 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
169 #define MAX_UINT32 LSMASK64 (31, 0)
170 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
171
172 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
173 #define MAX_UINT64 LSMASK64 (63, 0)
174 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
175
176 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
177 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
178 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
179 #define NR_INTBITS (is_64bit ? 64 : 32)
180
181 /* Squeeze an unpacked sim_fpu struct into a 32/64 bit integer. */
182 STATIC_INLINE_SIM_FPU (unsigned64)
183 pack_fpu (const sim_fpu *src,
184 int is_double)
185 {
186 int sign;
187 unsigned64 exp;
188 unsigned64 fraction;
189 unsigned64 packed;
190
191 switch (src->class)
192 {
193 /* Create a NaN. */
194 case sim_fpu_class_qnan:
195 sign = src->sign;
196 exp = EXPMAX;
197 /* Force fraction to correct class. */
198 fraction = src->fraction;
199 fraction >>= NR_GUARDS;
200 #ifdef SIM_QUIET_NAN_NEGATED
201 fraction |= QUIET_NAN - 1;
202 #else
203 fraction |= QUIET_NAN;
204 #endif
205 break;
206 case sim_fpu_class_snan:
207 sign = src->sign;
208 exp = EXPMAX;
209 /* Force fraction to correct class. */
210 fraction = src->fraction;
211 fraction >>= NR_GUARDS;
212 #ifdef SIM_QUIET_NAN_NEGATED
213 fraction |= QUIET_NAN;
214 #else
215 fraction &= ~QUIET_NAN;
216 #endif
217 break;
218 case sim_fpu_class_infinity:
219 sign = src->sign;
220 exp = EXPMAX;
221 fraction = 0;
222 break;
223 case sim_fpu_class_zero:
224 sign = src->sign;
225 exp = 0;
226 fraction = 0;
227 break;
228 case sim_fpu_class_number:
229 case sim_fpu_class_denorm:
230 ASSERT (src->fraction >= IMPLICIT_1);
231 ASSERT (src->fraction < IMPLICIT_2);
232 if (src->normal_exp < NORMAL_EXPMIN)
233 {
234 /* This number's exponent is too low to fit into the bits
235 available in the number We'll denormalize the number by
236 storing zero in the exponent and shift the fraction to
237 the right to make up for it. */
238 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
239 if (nr_shift > NR_FRACBITS)
240 {
241 /* Underflow, just make the number zero. */
242 sign = src->sign;
243 exp = 0;
244 fraction = 0;
245 }
246 else
247 {
248 sign = src->sign;
249 exp = 0;
250 /* Shift by the value. */
251 fraction = src->fraction;
252 fraction >>= NR_GUARDS;
253 fraction >>= nr_shift;
254 }
255 }
256 else if (src->normal_exp > NORMAL_EXPMAX)
257 {
258 /* Infinity */
259 sign = src->sign;
260 exp = EXPMAX;
261 fraction = 0;
262 }
263 else
264 {
265 exp = (src->normal_exp + EXPBIAS);
266 sign = src->sign;
267 fraction = src->fraction;
268 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
269 or some such. */
270 /* Round to nearest: If the guard bits are the all zero, but
271 the first, then we're half way between two numbers,
272 choose the one which makes the lsb of the answer 0. */
273 if ((fraction & GUARDMASK) == GUARDMSB)
274 {
275 if ((fraction & (GUARDMSB << 1)))
276 fraction += (GUARDMSB << 1);
277 }
278 else
279 {
280 /* Add a one to the guards to force round to nearest. */
281 fraction += GUARDROUND;
282 }
283 if ((fraction & IMPLICIT_2)) /* Rounding resulted in carry. */
284 {
285 exp += 1;
286 fraction >>= 1;
287 }
288 fraction >>= NR_GUARDS;
289 /* When exp == EXPMAX (overflow from carry) fraction must
290 have been made zero. */
291 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
292 }
293 break;
294 default:
295 abort ();
296 }
297
298 packed = ((sign ? SIGNBIT : 0)
299 | (exp << NR_FRACBITS)
300 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
301
302 /* Trace operation. */
303 #if 0
304 if (is_double)
305 {
306 }
307 else
308 {
309 printf ("pack_fpu: ");
310 printf ("-> %c%0lX.%06lX\n",
311 LSMASKED32 (packed, 31, 31) ? '8' : '0',
312 (long) LSEXTRACTED32 (packed, 30, 23),
313 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
314 }
315 #endif
316
317 return packed;
318 }
319
320
321 /* Unpack a 32/64 bit integer into a sim_fpu structure. */
322 STATIC_INLINE_SIM_FPU (void)
323 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
324 {
325 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
326 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
327 int sign = (packed & SIGNBIT) != 0;
328
329 if (exp == 0)
330 {
331 /* Hmm. Looks like 0 */
332 if (fraction == 0)
333 {
334 /* Tastes like zero. */
335 dst->class = sim_fpu_class_zero;
336 dst->sign = sign;
337 dst->normal_exp = 0;
338 }
339 else
340 {
341 /* Zero exponent with non zero fraction - it's denormalized,
342 so there isn't a leading implicit one - we'll shift it so
343 it gets one. */
344 dst->normal_exp = exp - EXPBIAS + 1;
345 dst->class = sim_fpu_class_denorm;
346 dst->sign = sign;
347 fraction <<= NR_GUARDS;
348 while (fraction < IMPLICIT_1)
349 {
350 fraction <<= 1;
351 dst->normal_exp--;
352 }
353 dst->fraction = fraction;
354 }
355 }
356 else if (exp == EXPMAX)
357 {
358 /* Huge exponent*/
359 if (fraction == 0)
360 {
361 /* Attached to a zero fraction - means infinity. */
362 dst->class = sim_fpu_class_infinity;
363 dst->sign = sign;
364 /* dst->normal_exp = EXPBIAS; */
365 /* dst->fraction = 0; */
366 }
367 else
368 {
369 int qnan;
370
371 /* Non zero fraction, means NaN. */
372 dst->sign = sign;
373 dst->fraction = (fraction << NR_GUARDS);
374 #ifdef SIM_QUIET_NAN_NEGATED
375 qnan = (fraction & QUIET_NAN) == 0;
376 #else
377 qnan = fraction >= QUIET_NAN;
378 #endif
379 if (qnan)
380 dst->class = sim_fpu_class_qnan;
381 else
382 dst->class = sim_fpu_class_snan;
383 }
384 }
385 else
386 {
387 /* Nothing strange about this number. */
388 dst->class = sim_fpu_class_number;
389 dst->sign = sign;
390 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
391 dst->normal_exp = exp - EXPBIAS;
392 }
393
394 /* Trace operation. */
395 #if 0
396 if (is_double)
397 {
398 }
399 else
400 {
401 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
402 LSMASKED32 (packed, 31, 31) ? '8' : '0',
403 (long) LSEXTRACTED32 (packed, 30, 23),
404 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
405 }
406 #endif
407
408 /* sanity checks */
409 {
410 sim_fpu_map val;
411 val.i = pack_fpu (dst, 1);
412 if (is_double)
413 {
414 ASSERT (val.i == packed);
415 }
416 else
417 {
418 unsigned32 val = pack_fpu (dst, 0);
419 unsigned32 org = packed;
420 ASSERT (val == org);
421 }
422 }
423 }
424
425
426 /* Convert a floating point into an integer. */
427 STATIC_INLINE_SIM_FPU (int)
428 fpu2i (signed64 *i,
429 const sim_fpu *s,
430 int is_64bit,
431 sim_fpu_round round)
432 {
433 unsigned64 tmp;
434 int shift;
435 int status = 0;
436 if (sim_fpu_is_zero (s))
437 {
438 *i = 0;
439 return 0;
440 }
441 if (sim_fpu_is_snan (s))
442 {
443 *i = MIN_INT; /* FIXME */
444 return sim_fpu_status_invalid_cvi;
445 }
446 if (sim_fpu_is_qnan (s))
447 {
448 *i = MIN_INT; /* FIXME */
449 return sim_fpu_status_invalid_cvi;
450 }
451 /* Map infinity onto MAX_INT... */
452 if (sim_fpu_is_infinity (s))
453 {
454 *i = s->sign ? MIN_INT : MAX_INT;
455 return sim_fpu_status_invalid_cvi;
456 }
457 /* It is a number, but a small one. */
458 if (s->normal_exp < 0)
459 {
460 *i = 0;
461 return sim_fpu_status_inexact;
462 }
463 /* Is the floating point MIN_INT or just close? */
464 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
465 {
466 *i = MIN_INT;
467 ASSERT (s->fraction >= IMPLICIT_1);
468 if (s->fraction == IMPLICIT_1)
469 return 0; /* exact */
470 if (is_64bit) /* can't round */
471 return sim_fpu_status_invalid_cvi; /* must be overflow */
472 /* For a 32bit with MAX_INT, rounding is possible. */
473 switch (round)
474 {
475 case sim_fpu_round_default:
476 abort ();
477 case sim_fpu_round_zero:
478 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
479 return sim_fpu_status_invalid_cvi;
480 else
481 return sim_fpu_status_inexact;
482 break;
483 case sim_fpu_round_near:
484 {
485 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
486 return sim_fpu_status_invalid_cvi;
487 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
488 return sim_fpu_status_invalid_cvi;
489 else
490 return sim_fpu_status_inexact;
491 }
492 case sim_fpu_round_up:
493 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
494 return sim_fpu_status_inexact;
495 else
496 return sim_fpu_status_invalid_cvi;
497 case sim_fpu_round_down:
498 return sim_fpu_status_invalid_cvi;
499 }
500 }
501 /* Would right shifting result in the FRAC being shifted into
502 (through) the integer's sign bit? */
503 if (s->normal_exp > (NR_INTBITS - 2))
504 {
505 *i = s->sign ? MIN_INT : MAX_INT;
506 return sim_fpu_status_invalid_cvi;
507 }
508 /* Normal number, shift it into place. */
509 tmp = s->fraction;
510 shift = (s->normal_exp - (NR_FRAC_GUARD));
511 if (shift > 0)
512 {
513 tmp <<= shift;
514 }
515 else
516 {
517 shift = -shift;
518 if (tmp & ((SIGNED64 (1) << shift) - 1))
519 status |= sim_fpu_status_inexact;
520 tmp >>= shift;
521 }
522 *i = s->sign ? (-tmp) : (tmp);
523 return status;
524 }
525
526 /* Convert an integer into a floating point. */
527 STATIC_INLINE_SIM_FPU (int)
528 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
529 {
530 int status = 0;
531 if (i == 0)
532 {
533 f->class = sim_fpu_class_zero;
534 f->sign = 0;
535 f->normal_exp = 0;
536 }
537 else
538 {
539 f->class = sim_fpu_class_number;
540 f->sign = (i < 0);
541 f->normal_exp = NR_FRAC_GUARD;
542
543 if (f->sign)
544 {
545 /* Special case for minint, since there is no corresponding
546 +ve integer representation for it. */
547 if (i == MIN_INT)
548 {
549 f->fraction = IMPLICIT_1;
550 f->normal_exp = NR_INTBITS - 1;
551 }
552 else
553 f->fraction = (-i);
554 }
555 else
556 f->fraction = i;
557
558 if (f->fraction >= IMPLICIT_2)
559 {
560 do
561 {
562 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
563 f->normal_exp += 1;
564 }
565 while (f->fraction >= IMPLICIT_2);
566 }
567 else if (f->fraction < IMPLICIT_1)
568 {
569 do
570 {
571 f->fraction <<= 1;
572 f->normal_exp -= 1;
573 }
574 while (f->fraction < IMPLICIT_1);
575 }
576 }
577
578 /* trace operation */
579 #if 0
580 {
581 printf ("i2fpu: 0x%08lX ->\n", (long) i);
582 }
583 #endif
584
585 /* sanity check */
586 {
587 signed64 val;
588 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
589 if (i >= MIN_INT32 && i <= MAX_INT32)
590 {
591 ASSERT (val == i);
592 }
593 }
594
595 return status;
596 }
597
598
599 /* Convert a floating point into an integer. */
600 STATIC_INLINE_SIM_FPU (int)
601 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
602 {
603 const int is_double = 1;
604 unsigned64 tmp;
605 int shift;
606 if (sim_fpu_is_zero (s))
607 {
608 *u = 0;
609 return 0;
610 }
611 if (sim_fpu_is_nan (s))
612 {
613 *u = 0;
614 return 0;
615 }
616 /* It is a negative number. */
617 if (s->sign)
618 {
619 *u = 0;
620 return 0;
621 }
622 /* Get reasonable MAX_USI_INT... */
623 if (sim_fpu_is_infinity (s))
624 {
625 *u = MAX_UINT;
626 return 0;
627 }
628 /* It is a number, but a small one. */
629 if (s->normal_exp < 0)
630 {
631 *u = 0;
632 return 0;
633 }
634 /* overflow */
635 if (s->normal_exp > (NR_INTBITS - 1))
636 {
637 *u = MAX_UINT;
638 return 0;
639 }
640 /* normal number */
641 tmp = (s->fraction & ~PADMASK);
642 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
643 if (shift > 0)
644 {
645 tmp <<= shift;
646 }
647 else
648 {
649 shift = -shift;
650 tmp >>= shift;
651 }
652 *u = tmp;
653 return 0;
654 }
655
656 /* Convert an unsigned integer into a floating point. */
657 STATIC_INLINE_SIM_FPU (int)
658 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
659 {
660 if (u == 0)
661 {
662 f->class = sim_fpu_class_zero;
663 f->sign = 0;
664 f->normal_exp = 0;
665 }
666 else
667 {
668 f->class = sim_fpu_class_number;
669 f->sign = 0;
670 f->normal_exp = NR_FRAC_GUARD;
671 f->fraction = u;
672
673 while (f->fraction < IMPLICIT_1)
674 {
675 f->fraction <<= 1;
676 f->normal_exp -= 1;
677 }
678 }
679 return 0;
680 }
681
682
683 /* register <-> sim_fpu */
684
685 INLINE_SIM_FPU (void)
686 sim_fpu_32to (sim_fpu *f, unsigned32 s)
687 {
688 unpack_fpu (f, s, 0);
689 }
690
691
692 INLINE_SIM_FPU (void)
693 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
694 {
695 unsigned64 s = h;
696 s = (s << 32) | l;
697 unpack_fpu (f, s, 1);
698 }
699
700
701 INLINE_SIM_FPU (void)
702 sim_fpu_64to (sim_fpu *f, unsigned64 s)
703 {
704 unpack_fpu (f, s, 1);
705 }
706
707
708 INLINE_SIM_FPU (void)
709 sim_fpu_to32 (unsigned32 *s,
710 const sim_fpu *f)
711 {
712 *s = pack_fpu (f, 0);
713 }
714
715
716 INLINE_SIM_FPU (void)
717 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
718 const sim_fpu *f)
719 {
720 unsigned64 s = pack_fpu (f, 1);
721 *l = s;
722 *h = (s >> 32);
723 }
724
725
726 INLINE_SIM_FPU (void)
727 sim_fpu_to64 (unsigned64 *u,
728 const sim_fpu *f)
729 {
730 *u = pack_fpu (f, 1);
731 }
732
733
734 INLINE_SIM_FPU (void)
735 sim_fpu_fractionto (sim_fpu *f,
736 int sign,
737 int normal_exp,
738 unsigned64 fraction,
739 int precision)
740 {
741 int shift = (NR_FRAC_GUARD - precision);
742 f->class = sim_fpu_class_number;
743 f->sign = sign;
744 f->normal_exp = normal_exp;
745 /* Shift the fraction to where sim-fpu expects it. */
746 if (shift >= 0)
747 f->fraction = (fraction << shift);
748 else
749 f->fraction = (fraction >> -shift);
750 f->fraction |= IMPLICIT_1;
751 }
752
753
754 INLINE_SIM_FPU (unsigned64)
755 sim_fpu_tofraction (const sim_fpu *d,
756 int precision)
757 {
758 /* We have NR_FRAC_GUARD bits, we want only PRECISION bits. */
759 int shift = (NR_FRAC_GUARD - precision);
760 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
761 if (shift >= 0)
762 return fraction >> shift;
763 else
764 return fraction << -shift;
765 }
766
767
768 /* Rounding */
769
770 STATIC_INLINE_SIM_FPU (int)
771 do_normal_overflow (sim_fpu *f,
772 int is_double,
773 sim_fpu_round round)
774 {
775 switch (round)
776 {
777 case sim_fpu_round_default:
778 return 0;
779 case sim_fpu_round_near:
780 f->class = sim_fpu_class_infinity;
781 break;
782 case sim_fpu_round_up:
783 if (!f->sign)
784 f->class = sim_fpu_class_infinity;
785 break;
786 case sim_fpu_round_down:
787 if (f->sign)
788 f->class = sim_fpu_class_infinity;
789 break;
790 case sim_fpu_round_zero:
791 break;
792 }
793 f->normal_exp = NORMAL_EXPMAX;
794 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
795 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
796 }
797
798 STATIC_INLINE_SIM_FPU (int)
799 do_normal_underflow (sim_fpu *f,
800 int is_double,
801 sim_fpu_round round)
802 {
803 switch (round)
804 {
805 case sim_fpu_round_default:
806 return 0;
807 case sim_fpu_round_near:
808 f->class = sim_fpu_class_zero;
809 break;
810 case sim_fpu_round_up:
811 if (f->sign)
812 f->class = sim_fpu_class_zero;
813 break;
814 case sim_fpu_round_down:
815 if (!f->sign)
816 f->class = sim_fpu_class_zero;
817 break;
818 case sim_fpu_round_zero:
819 f->class = sim_fpu_class_zero;
820 break;
821 }
822 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
823 f->fraction = IMPLICIT_1;
824 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
825 }
826
827
828
829 /* Round a number using NR_GUARDS.
830 Will return the rounded number or F->FRACTION == 0 when underflow. */
831
832 STATIC_INLINE_SIM_FPU (int)
833 do_normal_round (sim_fpu *f,
834 int nr_guards,
835 sim_fpu_round round)
836 {
837 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
838 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
839 unsigned64 fraclsb = guardmsb << 1;
840 if ((f->fraction & guardmask))
841 {
842 int status = sim_fpu_status_inexact;
843 switch (round)
844 {
845 case sim_fpu_round_default:
846 return 0;
847 case sim_fpu_round_near:
848 if ((f->fraction & guardmsb))
849 {
850 if ((f->fraction & fraclsb))
851 {
852 status |= sim_fpu_status_rounded;
853 }
854 else if ((f->fraction & (guardmask >> 1)))
855 {
856 status |= sim_fpu_status_rounded;
857 }
858 }
859 break;
860 case sim_fpu_round_up:
861 if (!f->sign)
862 status |= sim_fpu_status_rounded;
863 break;
864 case sim_fpu_round_down:
865 if (f->sign)
866 status |= sim_fpu_status_rounded;
867 break;
868 case sim_fpu_round_zero:
869 break;
870 }
871 f->fraction &= ~guardmask;
872 /* Round if needed, handle resulting overflow. */
873 if ((status & sim_fpu_status_rounded))
874 {
875 f->fraction += fraclsb;
876 if ((f->fraction & IMPLICIT_2))
877 {
878 f->fraction >>= 1;
879 f->normal_exp += 1;
880 }
881 }
882 return status;
883 }
884 else
885 return 0;
886 }
887
888
889 STATIC_INLINE_SIM_FPU (int)
890 do_round (sim_fpu *f,
891 int is_double,
892 sim_fpu_round round,
893 sim_fpu_denorm denorm)
894 {
895 switch (f->class)
896 {
897 case sim_fpu_class_qnan:
898 case sim_fpu_class_zero:
899 case sim_fpu_class_infinity:
900 return 0;
901 break;
902 case sim_fpu_class_snan:
903 /* Quieten a SignalingNaN. */
904 f->class = sim_fpu_class_qnan;
905 return sim_fpu_status_invalid_snan;
906 break;
907 case sim_fpu_class_number:
908 case sim_fpu_class_denorm:
909 {
910 int status;
911 ASSERT (f->fraction < IMPLICIT_2);
912 ASSERT (f->fraction >= IMPLICIT_1);
913 if (f->normal_exp < NORMAL_EXPMIN)
914 {
915 /* This number's exponent is too low to fit into the bits
916 available in the number. Round off any bits that will be
917 discarded as a result of denormalization. Edge case is
918 the implicit bit shifted to GUARD0 and then rounded
919 up. */
920 int shift = NORMAL_EXPMIN - f->normal_exp;
921 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
922 && !(denorm & sim_fpu_denorm_zero))
923 {
924 status = do_normal_round (f, shift + NR_GUARDS, round);
925 if (f->fraction == 0) /* Rounding underflowed. */
926 {
927 status |= do_normal_underflow (f, is_double, round);
928 }
929 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
930 {
931 status |= sim_fpu_status_denorm;
932 /* Any loss of precision when denormalizing is
933 underflow. Some processors check for underflow
934 before rounding, some after! */
935 if (status & sim_fpu_status_inexact)
936 status |= sim_fpu_status_underflow;
937 /* Flag that resultant value has been denormalized. */
938 f->class = sim_fpu_class_denorm;
939 }
940 else if ((denorm & sim_fpu_denorm_underflow_inexact))
941 {
942 if ((status & sim_fpu_status_inexact))
943 status |= sim_fpu_status_underflow;
944 }
945 }
946 else
947 {
948 status = do_normal_underflow (f, is_double, round);
949 }
950 }
951 else if (f->normal_exp > NORMAL_EXPMAX)
952 {
953 /* Infinity */
954 status = do_normal_overflow (f, is_double, round);
955 }
956 else
957 {
958 status = do_normal_round (f, NR_GUARDS, round);
959 if (f->fraction == 0)
960 /* f->class = sim_fpu_class_zero; */
961 status |= do_normal_underflow (f, is_double, round);
962 else if (f->normal_exp > NORMAL_EXPMAX)
963 /* Oops! rounding caused overflow. */
964 status |= do_normal_overflow (f, is_double, round);
965 }
966 ASSERT ((f->class == sim_fpu_class_number
967 || f->class == sim_fpu_class_denorm)
968 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
969 return status;
970 }
971 }
972 return 0;
973 }
974
975 INLINE_SIM_FPU (int)
976 sim_fpu_round_32 (sim_fpu *f,
977 sim_fpu_round round,
978 sim_fpu_denorm denorm)
979 {
980 return do_round (f, 0, round, denorm);
981 }
982
983 INLINE_SIM_FPU (int)
984 sim_fpu_round_64 (sim_fpu *f,
985 sim_fpu_round round,
986 sim_fpu_denorm denorm)
987 {
988 return do_round (f, 1, round, denorm);
989 }
990
991
992
993 /* Arithmetic ops */
994
995 INLINE_SIM_FPU (int)
996 sim_fpu_add (sim_fpu *f,
997 const sim_fpu *l,
998 const sim_fpu *r)
999 {
1000 if (sim_fpu_is_snan (l))
1001 {
1002 *f = *l;
1003 f->class = sim_fpu_class_qnan;
1004 return sim_fpu_status_invalid_snan;
1005 }
1006 if (sim_fpu_is_snan (r))
1007 {
1008 *f = *r;
1009 f->class = sim_fpu_class_qnan;
1010 return sim_fpu_status_invalid_snan;
1011 }
1012 if (sim_fpu_is_qnan (l))
1013 {
1014 *f = *l;
1015 return 0;
1016 }
1017 if (sim_fpu_is_qnan (r))
1018 {
1019 *f = *r;
1020 return 0;
1021 }
1022 if (sim_fpu_is_infinity (l))
1023 {
1024 if (sim_fpu_is_infinity (r)
1025 && l->sign != r->sign)
1026 {
1027 *f = sim_fpu_qnan;
1028 return sim_fpu_status_invalid_isi;
1029 }
1030 *f = *l;
1031 return 0;
1032 }
1033 if (sim_fpu_is_infinity (r))
1034 {
1035 *f = *r;
1036 return 0;
1037 }
1038 if (sim_fpu_is_zero (l))
1039 {
1040 if (sim_fpu_is_zero (r))
1041 {
1042 *f = sim_fpu_zero;
1043 f->sign = l->sign & r->sign;
1044 }
1045 else
1046 *f = *r;
1047 return 0;
1048 }
1049 if (sim_fpu_is_zero (r))
1050 {
1051 *f = *l;
1052 return 0;
1053 }
1054 {
1055 int status = 0;
1056 int shift = l->normal_exp - r->normal_exp;
1057 unsigned64 lfraction;
1058 unsigned64 rfraction;
1059 /* use exp of larger */
1060 if (shift >= NR_FRAC_GUARD)
1061 {
1062 /* left has much bigger magnitude */
1063 *f = *l;
1064 return sim_fpu_status_inexact;
1065 }
1066 if (shift <= - NR_FRAC_GUARD)
1067 {
1068 /* right has much bigger magnitude */
1069 *f = *r;
1070 return sim_fpu_status_inexact;
1071 }
1072 lfraction = l->fraction;
1073 rfraction = r->fraction;
1074 if (shift > 0)
1075 {
1076 f->normal_exp = l->normal_exp;
1077 if (rfraction & LSMASK64 (shift - 1, 0))
1078 {
1079 status |= sim_fpu_status_inexact;
1080 rfraction |= LSBIT64 (shift); /* Stick LSBit. */
1081 }
1082 rfraction >>= shift;
1083 }
1084 else if (shift < 0)
1085 {
1086 f->normal_exp = r->normal_exp;
1087 if (lfraction & LSMASK64 (- shift - 1, 0))
1088 {
1089 status |= sim_fpu_status_inexact;
1090 lfraction |= LSBIT64 (- shift); /* Stick LSBit. */
1091 }
1092 lfraction >>= -shift;
1093 }
1094 else
1095 {
1096 f->normal_exp = r->normal_exp;
1097 }
1098
1099 /* Perform the addition. */
1100 if (l->sign)
1101 lfraction = - lfraction;
1102 if (r->sign)
1103 rfraction = - rfraction;
1104 f->fraction = lfraction + rfraction;
1105
1106 /* zero? */
1107 if (f->fraction == 0)
1108 {
1109 *f = sim_fpu_zero;
1110 return 0;
1111 }
1112
1113 /* sign? */
1114 f->class = sim_fpu_class_number;
1115 if (((signed64) f->fraction) >= 0)
1116 f->sign = 0;
1117 else
1118 {
1119 f->sign = 1;
1120 f->fraction = - f->fraction;
1121 }
1122
1123 /* Normalize it. */
1124 if ((f->fraction & IMPLICIT_2))
1125 {
1126 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1127 f->normal_exp ++;
1128 }
1129 else if (f->fraction < IMPLICIT_1)
1130 {
1131 do
1132 {
1133 f->fraction <<= 1;
1134 f->normal_exp --;
1135 }
1136 while (f->fraction < IMPLICIT_1);
1137 }
1138 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1139 return status;
1140 }
1141 }
1142
1143
1144 INLINE_SIM_FPU (int)
1145 sim_fpu_sub (sim_fpu *f,
1146 const sim_fpu *l,
1147 const sim_fpu *r)
1148 {
1149 if (sim_fpu_is_snan (l))
1150 {
1151 *f = *l;
1152 f->class = sim_fpu_class_qnan;
1153 return sim_fpu_status_invalid_snan;
1154 }
1155 if (sim_fpu_is_snan (r))
1156 {
1157 *f = *r;
1158 f->class = sim_fpu_class_qnan;
1159 return sim_fpu_status_invalid_snan;
1160 }
1161 if (sim_fpu_is_qnan (l))
1162 {
1163 *f = *l;
1164 return 0;
1165 }
1166 if (sim_fpu_is_qnan (r))
1167 {
1168 *f = *r;
1169 return 0;
1170 }
1171 if (sim_fpu_is_infinity (l))
1172 {
1173 if (sim_fpu_is_infinity (r)
1174 && l->sign == r->sign)
1175 {
1176 *f = sim_fpu_qnan;
1177 return sim_fpu_status_invalid_isi;
1178 }
1179 *f = *l;
1180 return 0;
1181 }
1182 if (sim_fpu_is_infinity (r))
1183 {
1184 *f = *r;
1185 f->sign = !r->sign;
1186 return 0;
1187 }
1188 if (sim_fpu_is_zero (l))
1189 {
1190 if (sim_fpu_is_zero (r))
1191 {
1192 *f = sim_fpu_zero;
1193 f->sign = l->sign & !r->sign;
1194 }
1195 else
1196 {
1197 *f = *r;
1198 f->sign = !r->sign;
1199 }
1200 return 0;
1201 }
1202 if (sim_fpu_is_zero (r))
1203 {
1204 *f = *l;
1205 return 0;
1206 }
1207 {
1208 int status = 0;
1209 int shift = l->normal_exp - r->normal_exp;
1210 unsigned64 lfraction;
1211 unsigned64 rfraction;
1212 /* use exp of larger */
1213 if (shift >= NR_FRAC_GUARD)
1214 {
1215 /* left has much bigger magnitude */
1216 *f = *l;
1217 return sim_fpu_status_inexact;
1218 }
1219 if (shift <= - NR_FRAC_GUARD)
1220 {
1221 /* right has much bigger magnitude */
1222 *f = *r;
1223 f->sign = !r->sign;
1224 return sim_fpu_status_inexact;
1225 }
1226 lfraction = l->fraction;
1227 rfraction = r->fraction;
1228 if (shift > 0)
1229 {
1230 f->normal_exp = l->normal_exp;
1231 if (rfraction & LSMASK64 (shift - 1, 0))
1232 {
1233 status |= sim_fpu_status_inexact;
1234 rfraction |= LSBIT64 (shift); /* Stick LSBit. */
1235 }
1236 rfraction >>= shift;
1237 }
1238 else if (shift < 0)
1239 {
1240 f->normal_exp = r->normal_exp;
1241 if (lfraction & LSMASK64 (- shift - 1, 0))
1242 {
1243 status |= sim_fpu_status_inexact;
1244 lfraction |= LSBIT64 (- shift); /* Stick LSBit. */
1245 }
1246 lfraction >>= -shift;
1247 }
1248 else
1249 {
1250 f->normal_exp = r->normal_exp;
1251 }
1252
1253 /* Perform the subtraction. */
1254 if (l->sign)
1255 lfraction = - lfraction;
1256 if (!r->sign)
1257 rfraction = - rfraction;
1258 f->fraction = lfraction + rfraction;
1259
1260 /* zero? */
1261 if (f->fraction == 0)
1262 {
1263 *f = sim_fpu_zero;
1264 return 0;
1265 }
1266
1267 /* sign? */
1268 f->class = sim_fpu_class_number;
1269 if (((signed64) f->fraction) >= 0)
1270 f->sign = 0;
1271 else
1272 {
1273 f->sign = 1;
1274 f->fraction = - f->fraction;
1275 }
1276
1277 /* Normalize it. */
1278 if ((f->fraction & IMPLICIT_2))
1279 {
1280 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1281 f->normal_exp ++;
1282 }
1283 else if (f->fraction < IMPLICIT_1)
1284 {
1285 do
1286 {
1287 f->fraction <<= 1;
1288 f->normal_exp --;
1289 }
1290 while (f->fraction < IMPLICIT_1);
1291 }
1292 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1293 return status;
1294 }
1295 }
1296
1297
1298 INLINE_SIM_FPU (int)
1299 sim_fpu_mul (sim_fpu *f,
1300 const sim_fpu *l,
1301 const sim_fpu *r)
1302 {
1303 if (sim_fpu_is_snan (l))
1304 {
1305 *f = *l;
1306 f->class = sim_fpu_class_qnan;
1307 return sim_fpu_status_invalid_snan;
1308 }
1309 if (sim_fpu_is_snan (r))
1310 {
1311 *f = *r;
1312 f->class = sim_fpu_class_qnan;
1313 return sim_fpu_status_invalid_snan;
1314 }
1315 if (sim_fpu_is_qnan (l))
1316 {
1317 *f = *l;
1318 return 0;
1319 }
1320 if (sim_fpu_is_qnan (r))
1321 {
1322 *f = *r;
1323 return 0;
1324 }
1325 if (sim_fpu_is_infinity (l))
1326 {
1327 if (sim_fpu_is_zero (r))
1328 {
1329 *f = sim_fpu_qnan;
1330 return sim_fpu_status_invalid_imz;
1331 }
1332 *f = *l;
1333 f->sign = l->sign ^ r->sign;
1334 return 0;
1335 }
1336 if (sim_fpu_is_infinity (r))
1337 {
1338 if (sim_fpu_is_zero (l))
1339 {
1340 *f = sim_fpu_qnan;
1341 return sim_fpu_status_invalid_imz;
1342 }
1343 *f = *r;
1344 f->sign = l->sign ^ r->sign;
1345 return 0;
1346 }
1347 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1348 {
1349 *f = sim_fpu_zero;
1350 f->sign = l->sign ^ r->sign;
1351 return 0;
1352 }
1353 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1354 128 bit number. */
1355 {
1356 unsigned64 low;
1357 unsigned64 high;
1358 unsigned64 nl = l->fraction & 0xffffffff;
1359 unsigned64 nh = l->fraction >> 32;
1360 unsigned64 ml = r->fraction & 0xffffffff;
1361 unsigned64 mh = r->fraction >>32;
1362 unsigned64 pp_ll = ml * nl;
1363 unsigned64 pp_hl = mh * nl;
1364 unsigned64 pp_lh = ml * nh;
1365 unsigned64 pp_hh = mh * nh;
1366 unsigned64 res2 = 0;
1367 unsigned64 res0 = 0;
1368 unsigned64 ps_hh__ = pp_hl + pp_lh;
1369 if (ps_hh__ < pp_hl)
1370 res2 += UNSIGNED64 (0x100000000);
1371 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1372 res0 = pp_ll + pp_hl;
1373 if (res0 < pp_ll)
1374 res2++;
1375 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1376 high = res2;
1377 low = res0;
1378
1379 f->normal_exp = l->normal_exp + r->normal_exp;
1380 f->sign = l->sign ^ r->sign;
1381 f->class = sim_fpu_class_number;
1382
1383 /* Input is bounded by [1,2) ; [2^60,2^61)
1384 Output is bounded by [1,4) ; [2^120,2^122) */
1385
1386 /* Adjust the exponent according to where the decimal point ended
1387 up in the high 64 bit word. In the source the decimal point
1388 was at NR_FRAC_GUARD. */
1389 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1390
1391 /* The high word is bounded according to the above. Consequently
1392 it has never overflowed into IMPLICIT_2. */
1393 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1394 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1395 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1396
1397 /* Normalize. */
1398 do
1399 {
1400 f->normal_exp--;
1401 high <<= 1;
1402 if (low & LSBIT64 (63))
1403 high |= 1;
1404 low <<= 1;
1405 }
1406 while (high < IMPLICIT_1);
1407
1408 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1409 if (low != 0)
1410 {
1411 f->fraction = (high | 1); /* sticky */
1412 return sim_fpu_status_inexact;
1413 }
1414 else
1415 {
1416 f->fraction = high;
1417 return 0;
1418 }
1419 return 0;
1420 }
1421 }
1422
1423 INLINE_SIM_FPU (int)
1424 sim_fpu_div (sim_fpu *f,
1425 const sim_fpu *l,
1426 const sim_fpu *r)
1427 {
1428 if (sim_fpu_is_snan (l))
1429 {
1430 *f = *l;
1431 f->class = sim_fpu_class_qnan;
1432 return sim_fpu_status_invalid_snan;
1433 }
1434 if (sim_fpu_is_snan (r))
1435 {
1436 *f = *r;
1437 f->class = sim_fpu_class_qnan;
1438 return sim_fpu_status_invalid_snan;
1439 }
1440 if (sim_fpu_is_qnan (l))
1441 {
1442 *f = *l;
1443 f->class = sim_fpu_class_qnan;
1444 return 0;
1445 }
1446 if (sim_fpu_is_qnan (r))
1447 {
1448 *f = *r;
1449 f->class = sim_fpu_class_qnan;
1450 return 0;
1451 }
1452 if (sim_fpu_is_infinity (l))
1453 {
1454 if (sim_fpu_is_infinity (r))
1455 {
1456 *f = sim_fpu_qnan;
1457 return sim_fpu_status_invalid_idi;
1458 }
1459 else
1460 {
1461 *f = *l;
1462 f->sign = l->sign ^ r->sign;
1463 return 0;
1464 }
1465 }
1466 if (sim_fpu_is_zero (l))
1467 {
1468 if (sim_fpu_is_zero (r))
1469 {
1470 *f = sim_fpu_qnan;
1471 return sim_fpu_status_invalid_zdz;
1472 }
1473 else
1474 {
1475 *f = *l;
1476 f->sign = l->sign ^ r->sign;
1477 return 0;
1478 }
1479 }
1480 if (sim_fpu_is_infinity (r))
1481 {
1482 *f = sim_fpu_zero;
1483 f->sign = l->sign ^ r->sign;
1484 return 0;
1485 }
1486 if (sim_fpu_is_zero (r))
1487 {
1488 f->class = sim_fpu_class_infinity;
1489 f->sign = l->sign ^ r->sign;
1490 return sim_fpu_status_invalid_div0;
1491 }
1492
1493 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1494 128 bit number. */
1495 {
1496 /* quotient = ( ( numerator / denominator)
1497 x 2^(numerator exponent - denominator exponent)
1498 */
1499 unsigned64 numerator;
1500 unsigned64 denominator;
1501 unsigned64 quotient;
1502 unsigned64 bit;
1503
1504 f->class = sim_fpu_class_number;
1505 f->sign = l->sign ^ r->sign;
1506 f->normal_exp = l->normal_exp - r->normal_exp;
1507
1508 numerator = l->fraction;
1509 denominator = r->fraction;
1510
1511 /* Fraction will be less than 1.0 */
1512 if (numerator < denominator)
1513 {
1514 numerator <<= 1;
1515 f->normal_exp--;
1516 }
1517 ASSERT (numerator >= denominator);
1518
1519 /* Gain extra precision, already used one spare bit. */
1520 numerator <<= NR_SPARE;
1521 denominator <<= NR_SPARE;
1522
1523 /* Does divide one bit at a time. Optimize??? */
1524 quotient = 0;
1525 bit = (IMPLICIT_1 << NR_SPARE);
1526 while (bit)
1527 {
1528 if (numerator >= denominator)
1529 {
1530 quotient |= bit;
1531 numerator -= denominator;
1532 }
1533 bit >>= 1;
1534 numerator <<= 1;
1535 }
1536
1537 /* Discard (but save) the extra bits. */
1538 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1539 quotient = (quotient >> NR_SPARE) | 1;
1540 else
1541 quotient = (quotient >> NR_SPARE);
1542
1543 f->fraction = quotient;
1544 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1545 if (numerator != 0)
1546 {
1547 f->fraction |= 1; /* Stick remaining bits. */
1548 return sim_fpu_status_inexact;
1549 }
1550 else
1551 return 0;
1552 }
1553 }
1554
1555
1556 INLINE_SIM_FPU (int)
1557 sim_fpu_rem (sim_fpu *f,
1558 const sim_fpu *l,
1559 const sim_fpu *r)
1560 {
1561 if (sim_fpu_is_snan (l))
1562 {
1563 *f = *l;
1564 f->class = sim_fpu_class_qnan;
1565 return sim_fpu_status_invalid_snan;
1566 }
1567 if (sim_fpu_is_snan (r))
1568 {
1569 *f = *r;
1570 f->class = sim_fpu_class_qnan;
1571 return sim_fpu_status_invalid_snan;
1572 }
1573 if (sim_fpu_is_qnan (l))
1574 {
1575 *f = *l;
1576 f->class = sim_fpu_class_qnan;
1577 return 0;
1578 }
1579 if (sim_fpu_is_qnan (r))
1580 {
1581 *f = *r;
1582 f->class = sim_fpu_class_qnan;
1583 return 0;
1584 }
1585 if (sim_fpu_is_infinity (l))
1586 {
1587 *f = sim_fpu_qnan;
1588 return sim_fpu_status_invalid_irx;
1589 }
1590 if (sim_fpu_is_zero (r))
1591 {
1592 *f = sim_fpu_qnan;
1593 return sim_fpu_status_invalid_div0;
1594 }
1595 if (sim_fpu_is_zero (l))
1596 {
1597 *f = *l;
1598 return 0;
1599 }
1600 if (sim_fpu_is_infinity (r))
1601 {
1602 *f = *l;
1603 return 0;
1604 }
1605 {
1606 sim_fpu n, tmp;
1607
1608 /* Remainder is calculated as l-n*r, where n is l/r rounded to the
1609 nearest integer. The variable n is rounded half even. */
1610
1611 sim_fpu_div (&n, l, r);
1612 sim_fpu_round_64 (&n, 0, 0);
1613
1614 if (n.normal_exp < -1) /* If n looks like zero just return l. */
1615 {
1616 *f = *l;
1617 return 0;
1618 }
1619 else if (n.class == sim_fpu_class_number
1620 && n.normal_exp <= (NR_FRAC_GUARD)) /* If not too large round. */
1621 do_normal_round (&n, (NR_FRAC_GUARD) - n.normal_exp, sim_fpu_round_near);
1622
1623 /* Mark 0's as zero so multiply can detect zero. */
1624 if (n.fraction == 0)
1625 n.class = sim_fpu_class_zero;
1626
1627 /* Calculate n*r. */
1628 sim_fpu_mul (&tmp, &n, r);
1629 sim_fpu_round_64 (&tmp, 0, 0);
1630
1631 /* Finally calculate l-n*r. */
1632 sim_fpu_sub (f, l, &tmp);
1633
1634 return 0;
1635 }
1636 }
1637
1638
1639 INLINE_SIM_FPU (int)
1640 sim_fpu_max (sim_fpu *f,
1641 const sim_fpu *l,
1642 const sim_fpu *r)
1643 {
1644 if (sim_fpu_is_snan (l))
1645 {
1646 *f = *l;
1647 f->class = sim_fpu_class_qnan;
1648 return sim_fpu_status_invalid_snan;
1649 }
1650 if (sim_fpu_is_snan (r))
1651 {
1652 *f = *r;
1653 f->class = sim_fpu_class_qnan;
1654 return sim_fpu_status_invalid_snan;
1655 }
1656 if (sim_fpu_is_qnan (l))
1657 {
1658 *f = *l;
1659 return 0;
1660 }
1661 if (sim_fpu_is_qnan (r))
1662 {
1663 *f = *r;
1664 return 0;
1665 }
1666 if (sim_fpu_is_infinity (l))
1667 {
1668 if (sim_fpu_is_infinity (r)
1669 && l->sign == r->sign)
1670 {
1671 *f = sim_fpu_qnan;
1672 return sim_fpu_status_invalid_isi;
1673 }
1674 if (l->sign)
1675 *f = *r; /* -inf < anything */
1676 else
1677 *f = *l; /* +inf > anything */
1678 return 0;
1679 }
1680 if (sim_fpu_is_infinity (r))
1681 {
1682 if (r->sign)
1683 *f = *l; /* anything > -inf */
1684 else
1685 *f = *r; /* anything < +inf */
1686 return 0;
1687 }
1688 if (l->sign > r->sign)
1689 {
1690 *f = *r; /* -ve < +ve */
1691 return 0;
1692 }
1693 if (l->sign < r->sign)
1694 {
1695 *f = *l; /* +ve > -ve */
1696 return 0;
1697 }
1698 ASSERT (l->sign == r->sign);
1699 if (l->normal_exp > r->normal_exp
1700 || (l->normal_exp == r->normal_exp
1701 && l->fraction > r->fraction))
1702 {
1703 /* |l| > |r| */
1704 if (l->sign)
1705 *f = *r; /* -ve < -ve */
1706 else
1707 *f = *l; /* +ve > +ve */
1708 return 0;
1709 }
1710 else
1711 {
1712 /* |l| <= |r| */
1713 if (l->sign)
1714 *f = *l; /* -ve > -ve */
1715 else
1716 *f = *r; /* +ve < +ve */
1717 return 0;
1718 }
1719 }
1720
1721
1722 INLINE_SIM_FPU (int)
1723 sim_fpu_min (sim_fpu *f,
1724 const sim_fpu *l,
1725 const sim_fpu *r)
1726 {
1727 if (sim_fpu_is_snan (l))
1728 {
1729 *f = *l;
1730 f->class = sim_fpu_class_qnan;
1731 return sim_fpu_status_invalid_snan;
1732 }
1733 if (sim_fpu_is_snan (r))
1734 {
1735 *f = *r;
1736 f->class = sim_fpu_class_qnan;
1737 return sim_fpu_status_invalid_snan;
1738 }
1739 if (sim_fpu_is_qnan (l))
1740 {
1741 *f = *l;
1742 return 0;
1743 }
1744 if (sim_fpu_is_qnan (r))
1745 {
1746 *f = *r;
1747 return 0;
1748 }
1749 if (sim_fpu_is_infinity (l))
1750 {
1751 if (sim_fpu_is_infinity (r)
1752 && l->sign == r->sign)
1753 {
1754 *f = sim_fpu_qnan;
1755 return sim_fpu_status_invalid_isi;
1756 }
1757 if (l->sign)
1758 *f = *l; /* -inf < anything */
1759 else
1760 *f = *r; /* +inf > anthing */
1761 return 0;
1762 }
1763 if (sim_fpu_is_infinity (r))
1764 {
1765 if (r->sign)
1766 *f = *r; /* anything > -inf */
1767 else
1768 *f = *l; /* anything < +inf */
1769 return 0;
1770 }
1771 if (l->sign > r->sign)
1772 {
1773 *f = *l; /* -ve < +ve */
1774 return 0;
1775 }
1776 if (l->sign < r->sign)
1777 {
1778 *f = *r; /* +ve > -ve */
1779 return 0;
1780 }
1781 ASSERT (l->sign == r->sign);
1782 if (l->normal_exp > r->normal_exp
1783 || (l->normal_exp == r->normal_exp
1784 && l->fraction > r->fraction))
1785 {
1786 /* |l| > |r| */
1787 if (l->sign)
1788 *f = *l; /* -ve < -ve */
1789 else
1790 *f = *r; /* +ve > +ve */
1791 return 0;
1792 }
1793 else
1794 {
1795 /* |l| <= |r| */
1796 if (l->sign)
1797 *f = *r; /* -ve > -ve */
1798 else
1799 *f = *l; /* +ve < +ve */
1800 return 0;
1801 }
1802 }
1803
1804
1805 INLINE_SIM_FPU (int)
1806 sim_fpu_neg (sim_fpu *f,
1807 const sim_fpu *r)
1808 {
1809 if (sim_fpu_is_snan (r))
1810 {
1811 *f = *r;
1812 f->class = sim_fpu_class_qnan;
1813 return sim_fpu_status_invalid_snan;
1814 }
1815 if (sim_fpu_is_qnan (r))
1816 {
1817 *f = *r;
1818 return 0;
1819 }
1820 *f = *r;
1821 f->sign = !r->sign;
1822 return 0;
1823 }
1824
1825
1826 INLINE_SIM_FPU (int)
1827 sim_fpu_abs (sim_fpu *f,
1828 const sim_fpu *r)
1829 {
1830 *f = *r;
1831 f->sign = 0;
1832 if (sim_fpu_is_snan (r))
1833 {
1834 f->class = sim_fpu_class_qnan;
1835 return sim_fpu_status_invalid_snan;
1836 }
1837 return 0;
1838 }
1839
1840
1841 INLINE_SIM_FPU (int)
1842 sim_fpu_inv (sim_fpu *f,
1843 const sim_fpu *r)
1844 {
1845 return sim_fpu_div (f, &sim_fpu_one, r);
1846 }
1847
1848
1849 INLINE_SIM_FPU (int)
1850 sim_fpu_sqrt (sim_fpu *f,
1851 const sim_fpu *r)
1852 {
1853 if (sim_fpu_is_snan (r))
1854 {
1855 *f = sim_fpu_qnan;
1856 return sim_fpu_status_invalid_snan;
1857 }
1858 if (sim_fpu_is_qnan (r))
1859 {
1860 *f = sim_fpu_qnan;
1861 return 0;
1862 }
1863 if (sim_fpu_is_zero (r))
1864 {
1865 f->class = sim_fpu_class_zero;
1866 f->sign = r->sign;
1867 f->normal_exp = 0;
1868 return 0;
1869 }
1870 if (sim_fpu_is_infinity (r))
1871 {
1872 if (r->sign)
1873 {
1874 *f = sim_fpu_qnan;
1875 return sim_fpu_status_invalid_sqrt;
1876 }
1877 else
1878 {
1879 f->class = sim_fpu_class_infinity;
1880 f->sign = 0;
1881 f->sign = 0;
1882 return 0;
1883 }
1884 }
1885 if (r->sign)
1886 {
1887 *f = sim_fpu_qnan;
1888 return sim_fpu_status_invalid_sqrt;
1889 }
1890
1891 /* @(#)e_sqrt.c 5.1 93/09/24 */
1892 /*
1893 * ====================================================
1894 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1895 *
1896 * Developed at SunPro, a Sun Microsystems, Inc. business.
1897 * Permission to use, copy, modify, and distribute this
1898 * software is freely granted, provided that this notice
1899 * is preserved.
1900 * ====================================================
1901 */
1902
1903 /* __ieee754_sqrt(x)
1904 * Return correctly rounded sqrt.
1905 * ------------------------------------------
1906 * | Use the hardware sqrt if you have one |
1907 * ------------------------------------------
1908 * Method:
1909 * Bit by bit method using integer arithmetic. (Slow, but portable)
1910 * 1. Normalization
1911 * Scale x to y in [1,4) with even powers of 2:
1912 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1913 * sqrt(x) = 2^k * sqrt(y)
1914 -
1915 - Since:
1916 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1917 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1918 - Define:
1919 - y = ((m even) ? x : 2.x)
1920 - Then:
1921 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1922 - And:
1923 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1924 -
1925 * 2. Bit by bit computation
1926 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1927 * i 0
1928 * i+1 2
1929 * s = 2*q , and y = 2 * ( y - q ). (1)
1930 * i i i i
1931 *
1932 * To compute q from q , one checks whether
1933 * i+1 i
1934 *
1935 * -(i+1) 2
1936 * (q + 2 ) <= y. (2)
1937 * i
1938 * -(i+1)
1939 * If (2) is false, then q = q ; otherwise q = q + 2 .
1940 * i+1 i i+1 i
1941 *
1942 * With some algebraic manipulation, it is not difficult to see
1943 * that (2) is equivalent to
1944 * -(i+1)
1945 * s + 2 <= y (3)
1946 * i i
1947 *
1948 * The advantage of (3) is that s and y can be computed by
1949 * i i
1950 * the following recurrence formula:
1951 * if (3) is false
1952 *
1953 * s = s , y = y ; (4)
1954 * i+1 i i+1 i
1955 *
1956 -
1957 - NOTE: y = 2*y
1958 - i+1 i
1959 -
1960 * otherwise,
1961 * -i -(i+1)
1962 * s = s + 2 , y = y - s - 2 (5)
1963 * i+1 i i+1 i i
1964 *
1965 -
1966 - -(i+1)
1967 - NOTE: y = 2 (y - s - 2 )
1968 - i+1 i i
1969 -
1970 * One may easily use induction to prove (4) and (5).
1971 * Note. Since the left hand side of (3) contain only i+2 bits,
1972 * it does not necessary to do a full (53-bit) comparison
1973 * in (3).
1974 * 3. Final rounding
1975 * After generating the 53 bits result, we compute one more bit.
1976 * Together with the remainder, we can decide whether the
1977 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1978 * (it will never equal to 1/2ulp).
1979 * The rounding mode can be detected by checking whether
1980 * huge + tiny is equal to huge, and whether huge - tiny is
1981 * equal to huge for some floating point number "huge" and "tiny".
1982 *
1983 * Special cases:
1984 * sqrt(+-0) = +-0 ... exact
1985 * sqrt(inf) = inf
1986 * sqrt(-ve) = NaN ... with invalid signal
1987 * sqrt(NaN) = NaN ... with invalid signal for signalling NaN
1988 *
1989 * Other methods : see the appended file at the end of the program below.
1990 *---------------
1991 */
1992
1993 {
1994 /* Generate sqrt(x) bit by bit. */
1995 unsigned64 y;
1996 unsigned64 q;
1997 unsigned64 s;
1998 unsigned64 b;
1999
2000 f->class = sim_fpu_class_number;
2001 f->sign = 0;
2002 y = r->fraction;
2003 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
2004
2005 /* Odd exp, double x to make it even. */
2006 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
2007 if ((r->normal_exp & 1))
2008 {
2009 y += y;
2010 }
2011 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
2012
2013 /* Let loop determine first value of s (either 1 or 2) */
2014 b = IMPLICIT_1;
2015 q = 0;
2016 s = 0;
2017
2018 while (b)
2019 {
2020 unsigned64 t = s + b;
2021 if (t <= y)
2022 {
2023 s |= (b << 1);
2024 y -= t;
2025 q |= b;
2026 }
2027 y <<= 1;
2028 b >>= 1;
2029 }
2030
2031 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
2032 f->fraction = q;
2033 if (y != 0)
2034 {
2035 f->fraction |= 1; /* Stick remaining bits. */
2036 return sim_fpu_status_inexact;
2037 }
2038 else
2039 return 0;
2040 }
2041 }
2042
2043
2044 /* int/long <-> sim_fpu */
2045
2046 INLINE_SIM_FPU (int)
2047 sim_fpu_i32to (sim_fpu *f,
2048 signed32 i,
2049 sim_fpu_round round)
2050 {
2051 i2fpu (f, i, 0);
2052 return 0;
2053 }
2054
2055 INLINE_SIM_FPU (int)
2056 sim_fpu_u32to (sim_fpu *f,
2057 unsigned32 u,
2058 sim_fpu_round round)
2059 {
2060 u2fpu (f, u, 0);
2061 return 0;
2062 }
2063
2064 INLINE_SIM_FPU (int)
2065 sim_fpu_i64to (sim_fpu *f,
2066 signed64 i,
2067 sim_fpu_round round)
2068 {
2069 i2fpu (f, i, 1);
2070 return 0;
2071 }
2072
2073 INLINE_SIM_FPU (int)
2074 sim_fpu_u64to (sim_fpu *f,
2075 unsigned64 u,
2076 sim_fpu_round round)
2077 {
2078 u2fpu (f, u, 1);
2079 return 0;
2080 }
2081
2082
2083 INLINE_SIM_FPU (int)
2084 sim_fpu_to32i (signed32 *i,
2085 const sim_fpu *f,
2086 sim_fpu_round round)
2087 {
2088 signed64 i64;
2089 int status = fpu2i (&i64, f, 0, round);
2090 *i = i64;
2091 return status;
2092 }
2093
2094 INLINE_SIM_FPU (int)
2095 sim_fpu_to32u (unsigned32 *u,
2096 const sim_fpu *f,
2097 sim_fpu_round round)
2098 {
2099 unsigned64 u64;
2100 int status = fpu2u (&u64, f, 0);
2101 *u = u64;
2102 return status;
2103 }
2104
2105 INLINE_SIM_FPU (int)
2106 sim_fpu_to64i (signed64 *i,
2107 const sim_fpu *f,
2108 sim_fpu_round round)
2109 {
2110 return fpu2i (i, f, 1, round);
2111 }
2112
2113
2114 INLINE_SIM_FPU (int)
2115 sim_fpu_to64u (unsigned64 *u,
2116 const sim_fpu *f,
2117 sim_fpu_round round)
2118 {
2119 return fpu2u (u, f, 1);
2120 }
2121
2122
2123
2124 /* sim_fpu -> host format */
2125
2126 #if 0
2127 INLINE_SIM_FPU (float)
2128 sim_fpu_2f (const sim_fpu *f)
2129 {
2130 return fval.d;
2131 }
2132 #endif
2133
2134
2135 INLINE_SIM_FPU (double)
2136 sim_fpu_2d (const sim_fpu *s)
2137 {
2138 sim_fpu_map val;
2139 if (sim_fpu_is_snan (s))
2140 {
2141 /* gag SNaN's */
2142 sim_fpu n = *s;
2143 n.class = sim_fpu_class_qnan;
2144 val.i = pack_fpu (&n, 1);
2145 }
2146 else
2147 {
2148 val.i = pack_fpu (s, 1);
2149 }
2150 return val.d;
2151 }
2152
2153
2154 #if 0
2155 INLINE_SIM_FPU (void)
2156 sim_fpu_f2 (sim_fpu *f,
2157 float s)
2158 {
2159 sim_fpu_map val;
2160 val.d = s;
2161 unpack_fpu (f, val.i, 1);
2162 }
2163 #endif
2164
2165
2166 INLINE_SIM_FPU (void)
2167 sim_fpu_d2 (sim_fpu *f,
2168 double d)
2169 {
2170 sim_fpu_map val;
2171 val.d = d;
2172 unpack_fpu (f, val.i, 1);
2173 }
2174
2175
2176 /* General */
2177
2178 INLINE_SIM_FPU (int)
2179 sim_fpu_is_nan (const sim_fpu *d)
2180 {
2181 switch (d->class)
2182 {
2183 case sim_fpu_class_qnan:
2184 case sim_fpu_class_snan:
2185 return 1;
2186 default:
2187 return 0;
2188 }
2189 }
2190
2191 INLINE_SIM_FPU (int)
2192 sim_fpu_is_qnan (const sim_fpu *d)
2193 {
2194 switch (d->class)
2195 {
2196 case sim_fpu_class_qnan:
2197 return 1;
2198 default:
2199 return 0;
2200 }
2201 }
2202
2203 INLINE_SIM_FPU (int)
2204 sim_fpu_is_snan (const sim_fpu *d)
2205 {
2206 switch (d->class)
2207 {
2208 case sim_fpu_class_snan:
2209 return 1;
2210 default:
2211 return 0;
2212 }
2213 }
2214
2215 INLINE_SIM_FPU (int)
2216 sim_fpu_is_zero (const sim_fpu *d)
2217 {
2218 switch (d->class)
2219 {
2220 case sim_fpu_class_zero:
2221 return 1;
2222 default:
2223 return 0;
2224 }
2225 }
2226
2227 INLINE_SIM_FPU (int)
2228 sim_fpu_is_infinity (const sim_fpu *d)
2229 {
2230 switch (d->class)
2231 {
2232 case sim_fpu_class_infinity:
2233 return 1;
2234 default:
2235 return 0;
2236 }
2237 }
2238
2239 INLINE_SIM_FPU (int)
2240 sim_fpu_is_number (const sim_fpu *d)
2241 {
2242 switch (d->class)
2243 {
2244 case sim_fpu_class_denorm:
2245 case sim_fpu_class_number:
2246 return 1;
2247 default:
2248 return 0;
2249 }
2250 }
2251
2252 INLINE_SIM_FPU (int)
2253 sim_fpu_is_denorm (const sim_fpu *d)
2254 {
2255 switch (d->class)
2256 {
2257 case sim_fpu_class_denorm:
2258 return 1;
2259 default:
2260 return 0;
2261 }
2262 }
2263
2264
2265 INLINE_SIM_FPU (int)
2266 sim_fpu_sign (const sim_fpu *d)
2267 {
2268 return d->sign;
2269 }
2270
2271
2272 INLINE_SIM_FPU (int)
2273 sim_fpu_exp (const sim_fpu *d)
2274 {
2275 return d->normal_exp;
2276 }
2277
2278
2279 INLINE_SIM_FPU (unsigned64)
2280 sim_fpu_fraction (const sim_fpu *d)
2281 {
2282 return d->fraction;
2283 }
2284
2285
2286 INLINE_SIM_FPU (unsigned64)
2287 sim_fpu_guard (const sim_fpu *d, int is_double)
2288 {
2289 unsigned64 rv;
2290 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2291 rv = (d->fraction & guardmask) >> NR_PAD;
2292 return rv;
2293 }
2294
2295
2296 INLINE_SIM_FPU (int)
2297 sim_fpu_is (const sim_fpu *d)
2298 {
2299 switch (d->class)
2300 {
2301 case sim_fpu_class_qnan:
2302 return SIM_FPU_IS_QNAN;
2303 case sim_fpu_class_snan:
2304 return SIM_FPU_IS_SNAN;
2305 case sim_fpu_class_infinity:
2306 if (d->sign)
2307 return SIM_FPU_IS_NINF;
2308 else
2309 return SIM_FPU_IS_PINF;
2310 case sim_fpu_class_number:
2311 if (d->sign)
2312 return SIM_FPU_IS_NNUMBER;
2313 else
2314 return SIM_FPU_IS_PNUMBER;
2315 case sim_fpu_class_denorm:
2316 if (d->sign)
2317 return SIM_FPU_IS_NDENORM;
2318 else
2319 return SIM_FPU_IS_PDENORM;
2320 case sim_fpu_class_zero:
2321 if (d->sign)
2322 return SIM_FPU_IS_NZERO;
2323 else
2324 return SIM_FPU_IS_PZERO;
2325 default:
2326 return -1;
2327 abort ();
2328 }
2329 }
2330
2331 INLINE_SIM_FPU (int)
2332 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2333 {
2334 sim_fpu res;
2335 sim_fpu_sub (&res, l, r);
2336 return sim_fpu_is (&res);
2337 }
2338
2339 INLINE_SIM_FPU (int)
2340 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2341 {
2342 int status;
2343 sim_fpu_lt (&status, l, r);
2344 return status;
2345 }
2346
2347 INLINE_SIM_FPU (int)
2348 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2349 {
2350 int is;
2351 sim_fpu_le (&is, l, r);
2352 return is;
2353 }
2354
2355 INLINE_SIM_FPU (int)
2356 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2357 {
2358 int is;
2359 sim_fpu_eq (&is, l, r);
2360 return is;
2361 }
2362
2363 INLINE_SIM_FPU (int)
2364 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2365 {
2366 int is;
2367 sim_fpu_ne (&is, l, r);
2368 return is;
2369 }
2370
2371 INLINE_SIM_FPU (int)
2372 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2373 {
2374 int is;
2375 sim_fpu_ge (&is, l, r);
2376 return is;
2377 }
2378
2379 INLINE_SIM_FPU (int)
2380 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2381 {
2382 int is;
2383 sim_fpu_gt (&is, l, r);
2384 return is;
2385 }
2386
2387
2388 /* Compare operators */
2389
2390 INLINE_SIM_FPU (int)
2391 sim_fpu_lt (int *is,
2392 const sim_fpu *l,
2393 const sim_fpu *r)
2394 {
2395 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2396 {
2397 sim_fpu_map lval;
2398 sim_fpu_map rval;
2399 lval.i = pack_fpu (l, 1);
2400 rval.i = pack_fpu (r, 1);
2401 (*is) = (lval.d < rval.d);
2402 return 0;
2403 }
2404 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2405 {
2406 *is = 0;
2407 return sim_fpu_status_invalid_snan;
2408 }
2409 else
2410 {
2411 *is = 0;
2412 return sim_fpu_status_invalid_qnan;
2413 }
2414 }
2415
2416 INLINE_SIM_FPU (int)
2417 sim_fpu_le (int *is,
2418 const sim_fpu *l,
2419 const sim_fpu *r)
2420 {
2421 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2422 {
2423 sim_fpu_map lval;
2424 sim_fpu_map rval;
2425 lval.i = pack_fpu (l, 1);
2426 rval.i = pack_fpu (r, 1);
2427 *is = (lval.d <= rval.d);
2428 return 0;
2429 }
2430 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2431 {
2432 *is = 0;
2433 return sim_fpu_status_invalid_snan;
2434 }
2435 else
2436 {
2437 *is = 0;
2438 return sim_fpu_status_invalid_qnan;
2439 }
2440 }
2441
2442 INLINE_SIM_FPU (int)
2443 sim_fpu_eq (int *is,
2444 const sim_fpu *l,
2445 const sim_fpu *r)
2446 {
2447 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2448 {
2449 sim_fpu_map lval;
2450 sim_fpu_map rval;
2451 lval.i = pack_fpu (l, 1);
2452 rval.i = pack_fpu (r, 1);
2453 (*is) = (lval.d == rval.d);
2454 return 0;
2455 }
2456 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2457 {
2458 *is = 0;
2459 return sim_fpu_status_invalid_snan;
2460 }
2461 else
2462 {
2463 *is = 0;
2464 return sim_fpu_status_invalid_qnan;
2465 }
2466 }
2467
2468 INLINE_SIM_FPU (int)
2469 sim_fpu_ne (int *is,
2470 const sim_fpu *l,
2471 const sim_fpu *r)
2472 {
2473 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2474 {
2475 sim_fpu_map lval;
2476 sim_fpu_map rval;
2477 lval.i = pack_fpu (l, 1);
2478 rval.i = pack_fpu (r, 1);
2479 (*is) = (lval.d != rval.d);
2480 return 0;
2481 }
2482 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2483 {
2484 *is = 0;
2485 return sim_fpu_status_invalid_snan;
2486 }
2487 else
2488 {
2489 *is = 0;
2490 return sim_fpu_status_invalid_qnan;
2491 }
2492 }
2493
2494 INLINE_SIM_FPU (int)
2495 sim_fpu_ge (int *is,
2496 const sim_fpu *l,
2497 const sim_fpu *r)
2498 {
2499 return sim_fpu_le (is, r, l);
2500 }
2501
2502 INLINE_SIM_FPU (int)
2503 sim_fpu_gt (int *is,
2504 const sim_fpu *l,
2505 const sim_fpu *r)
2506 {
2507 return sim_fpu_lt (is, r, l);
2508 }
2509
2510
2511 /* A number of useful constants */
2512
2513 #if EXTERN_SIM_FPU_P
2514 const sim_fpu sim_fpu_zero = {
2515 sim_fpu_class_zero, 0, 0, 0
2516 };
2517 const sim_fpu sim_fpu_qnan = {
2518 sim_fpu_class_qnan, 0, 0, 0
2519 };
2520 const sim_fpu sim_fpu_one = {
2521 sim_fpu_class_number, 0, IMPLICIT_1, 0
2522 };
2523 const sim_fpu sim_fpu_two = {
2524 sim_fpu_class_number, 0, IMPLICIT_1, 1
2525 };
2526 const sim_fpu sim_fpu_max32 = {
2527 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2528 };
2529 const sim_fpu sim_fpu_max64 = {
2530 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2531 };
2532 #endif
2533
2534
2535 /* For debugging */
2536
2537 INLINE_SIM_FPU (void)
2538 sim_fpu_print_fpu (const sim_fpu *f,
2539 sim_fpu_print_func *print,
2540 void *arg)
2541 {
2542 sim_fpu_printn_fpu (f, print, -1, arg);
2543 }
2544
2545 INLINE_SIM_FPU (void)
2546 sim_fpu_printn_fpu (const sim_fpu *f,
2547 sim_fpu_print_func *print,
2548 int digits,
2549 void *arg)
2550 {
2551 print (arg, "%s", f->sign ? "-" : "+");
2552 switch (f->class)
2553 {
2554 case sim_fpu_class_qnan:
2555 print (arg, "0.");
2556 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2557 print (arg, "*QuietNaN");
2558 break;
2559 case sim_fpu_class_snan:
2560 print (arg, "0.");
2561 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2562 print (arg, "*SignalNaN");
2563 break;
2564 case sim_fpu_class_zero:
2565 print (arg, "0.0");
2566 break;
2567 case sim_fpu_class_infinity:
2568 print (arg, "INF");
2569 break;
2570 case sim_fpu_class_number:
2571 case sim_fpu_class_denorm:
2572 print (arg, "1.");
2573 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2574 print (arg, "*2^%+d", f->normal_exp);
2575 ASSERT (f->fraction >= IMPLICIT_1);
2576 ASSERT (f->fraction < IMPLICIT_2);
2577 }
2578 }
2579
2580
2581 INLINE_SIM_FPU (void)
2582 sim_fpu_print_status (int status,
2583 sim_fpu_print_func *print,
2584 void *arg)
2585 {
2586 int i = 1;
2587 const char *prefix = "";
2588 while (status >= i)
2589 {
2590 switch ((sim_fpu_status) (status & i))
2591 {
2592 case sim_fpu_status_denorm:
2593 print (arg, "%sD", prefix);
2594 break;
2595 case sim_fpu_status_invalid_snan:
2596 print (arg, "%sSNaN", prefix);
2597 break;
2598 case sim_fpu_status_invalid_qnan:
2599 print (arg, "%sQNaN", prefix);
2600 break;
2601 case sim_fpu_status_invalid_isi:
2602 print (arg, "%sISI", prefix);
2603 break;
2604 case sim_fpu_status_invalid_idi:
2605 print (arg, "%sIDI", prefix);
2606 break;
2607 case sim_fpu_status_invalid_zdz:
2608 print (arg, "%sZDZ", prefix);
2609 break;
2610 case sim_fpu_status_invalid_imz:
2611 print (arg, "%sIMZ", prefix);
2612 break;
2613 case sim_fpu_status_invalid_cvi:
2614 print (arg, "%sCVI", prefix);
2615 break;
2616 case sim_fpu_status_invalid_cmp:
2617 print (arg, "%sCMP", prefix);
2618 break;
2619 case sim_fpu_status_invalid_sqrt:
2620 print (arg, "%sSQRT", prefix);
2621 break;
2622 case sim_fpu_status_invalid_irx:
2623 print (arg, "%sIRX", prefix);
2624 break;
2625 case sim_fpu_status_inexact:
2626 print (arg, "%sX", prefix);
2627 break;
2628 case sim_fpu_status_overflow:
2629 print (arg, "%sO", prefix);
2630 break;
2631 case sim_fpu_status_underflow:
2632 print (arg, "%sU", prefix);
2633 break;
2634 case sim_fpu_status_invalid_div0:
2635 print (arg, "%s/", prefix);
2636 break;
2637 case sim_fpu_status_rounded:
2638 print (arg, "%sR", prefix);
2639 break;
2640 }
2641 i <<= 1;
2642 prefix = ",";
2643 }
2644 }
2645
2646 #endif