]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/config/libbid/bid128_round_integral.c
re PR tree-optimization/33565 (spurious warning: assuming signed overflow does not...
[thirdparty/gcc.git] / libgcc / config / libbid / bid128_round_integral.c
CommitLineData
200359e8
L
1/* Copyright (C) 2007 Free Software Foundation, Inc.
2
3This file is part of GCC.
4
5GCC is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License as published by the Free
7Software Foundation; either version 2, or (at your option) any later
8version.
9
10In addition to the permissions in the GNU General Public License, the
11Free Software Foundation gives you unlimited permission to link the
12compiled version of this file into combinations with other programs,
13and to distribute those combinations without any restriction coming
14from the use of this file. (The General Public License restrictions
15do apply in other respects; for example, they cover modification of
16the file, and distribution when not linked into a combine
17executable.)
18
19GCC is distributed in the hope that it will be useful, but WITHOUT ANY
20WARRANTY; without even the implied warranty of MERCHANTABILITY or
21FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22for more details.
23
24You should have received a copy of the GNU General Public License
25along with GCC; see the file COPYING. If not, write to the Free
26Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2702110-1301, USA. */
28
29#define BID_128RES
30
31#include "bid_internal.h"
32
33/*****************************************************************************
34 * BID128_round_integral_exact
35 ****************************************************************************/
36
37BID128_FUNCTION_ARG1(__bid128_round_integral_exact, x)
38
39 UINT128 res = {{ 0xbaddbaddbaddbaddull, 0xbaddbaddbaddbaddull }};
40 UINT64 x_sign;
41 UINT64 x_exp;
42 int exp; // unbiased exponent
43 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
44 UINT64 tmp64;
45 BID_UI64DOUBLE tmp1;
46 unsigned int x_nr_bits;
47 int q, ind, shift;
48 UINT128 C1;
49 UINT256 fstar;
50 UINT256 P256;
51
52 // check for NaN or Infinity
53 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
54 // x is special
55 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
56 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
57 // set invalid flag
58 *pfpsf |= INVALID_EXCEPTION;
59 // return Quiet (SNaN)
60 res.w[1] = x.w[1] & 0xfdffffffffffffffull;
61 res.w[0] = x.w[0];
62 } else { // x is QNaN
63 // return the input QNaN
64 res.w[1] = x.w[1];
65 res.w[0] = x.w[0];
66 }
67 BID_RETURN (res);
68 } else { // x is not a NaN, so it must be infinity
69 if ((x.w[1] & MASK_SIGN) == 0x0ull) { // x is +inf
70 // return +inf
71 res.w[1] = 0x7800000000000000ull;
72 res.w[0] = 0x0000000000000000ull;
73 } else { // x is -inf
74 // return -inf
75 res.w[1] = 0xf800000000000000ull;
76 res.w[0] = 0x0000000000000000ull;
77 }
78 BID_RETURN (res);
79 }
80 }
81 // unpack x
82 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
83 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
84 C1.w[1] = x.w[1] & MASK_COEFF;
85 C1.w[0] = x.w[0];
86
87 // test for non-canonical values:
88 // - values whose encoding begins with x00, x01, or x10 and whose
89 // coefficient is larger than 10^34 -1, or
90 // - values whose encoding begins with x1100, x1101, x1110 (if NaNs
91 // and infinitis were eliminated already this test is reduced to
92 // checking for x10x)
93
94 // test for non-canonical values of the argument x
95 if ((((C1.w[1] > 0x0001ed09bead87c0ull) ||
96 ((C1.w[1] == 0x0001ed09bead87c0ull) &&
97 (C1.w[0] > 0x378d8e63ffffffffull))) &&
98 ((x.w[1] & 0x6000000000000000ull) != 0x6000000000000000ull)) ||
99 ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
100 x.w[1] = x.w[1] & 0x8000000000000000ull; // preserve the sign bit
101 x.w[0] = 0;
102 x_exp = 0x1820ull << 49;
103 C1.w[1] = 0;
104 C1.w[0] = 0;
105 }
106 // test for input equal to zero
107 if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
108 // x is 0
109 // return 0 preserving the sign bit and the preferred exponent
110 // of MAX(Q(x), 0)
111 if (x_exp <= (0x1820ull << 49)) {
112 res.w[1] =
113 (x.w[1] & 0x8000000000000000ull) | 0x3040000000000000ull;
114 } else {
115 res.w[1] = x.w[1] & 0xfffe000000000000ull;
116 }
117 res.w[0] = 0x0000000000000000ull;
118 BID_RETURN (res);
119 }
120 // x is not special and is not zero
121
122 switch (rnd_mode) {
123 case ROUNDING_TO_NEAREST:
124 case ROUNDING_TIES_AWAY:
125 // if (exp <= -(p+1)) return 0.0
126 if (x_exp <= 0x2ffa000000000000ull) { // 0x2ffa000000000000ull == -35
127 res.w[1] = x_sign | 0x3040000000000000ull;
128 res.w[0] = 0x0000000000000000ull;
129 *pfpsf |= INEXACT_EXCEPTION;
130 BID_RETURN (res);
131 }
132 break;
133 case ROUNDING_DOWN:
134 // if (exp <= -p) return -1.0 or +0.0
135 if (x_exp <= 0x2ffc000000000000ull) { // 0x2ffa000000000000ull == -34
136 if (x_sign) {
137 // if negative, return negative 1, because we know coefficient
138 // is non-zero (would have been caught above)
139 res.w[1] = 0xb040000000000000ull;
140 res.w[0] = 0x0000000000000001ull;
141 } else {
142 // if positive, return positive 0, because we know coefficient is
143 // non-zero (would have been caught above)
144 res.w[1] = 0x3040000000000000ull;
145 res.w[0] = 0x0000000000000000ull;
146 }
147 *pfpsf |= INEXACT_EXCEPTION;
148 BID_RETURN (res);
149 }
150 break;
151 case ROUNDING_UP:
152 // if (exp <= -p) return -0.0 or +1.0
153 if (x_exp <= 0x2ffc000000000000ull) { // 0x2ffc000000000000ull == -34
154 if (x_sign) {
155 // if negative, return negative 0, because we know the coefficient
156 // is non-zero (would have been caught above)
157 res.w[1] = 0xb040000000000000ull;
158 res.w[0] = 0x0000000000000000ull;
159 } else {
160 // if positive, return positive 1, because we know coefficient is
161 // non-zero (would have been caught above)
162 res.w[1] = 0x3040000000000000ull;
163 res.w[0] = 0x0000000000000001ull;
164 }
165 *pfpsf |= INEXACT_EXCEPTION;
166 BID_RETURN (res);
167 }
168 break;
169 case ROUNDING_TO_ZERO:
170 // if (exp <= -p) return -0.0 or +0.0
171 if (x_exp <= 0x2ffc000000000000ull) { // 0x2ffc000000000000ull == -34
172 res.w[1] = x_sign | 0x3040000000000000ull;
173 res.w[0] = 0x0000000000000000ull;
174 *pfpsf |= INEXACT_EXCEPTION;
175 BID_RETURN (res);
176 }
177 break;
178 }
179
180 // q = nr. of decimal digits in x
181 // determine first the nr. of bits in x
182 if (C1.w[1] == 0) {
183 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
184 // split the 64-bit value in two 32-bit halves to avoid rounding errors
185 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
186 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
187 x_nr_bits =
188 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
189 } else { // x < 2^32
190 tmp1.d = (double) (C1.w[0]); // exact conversion
191 x_nr_bits =
192 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
193 }
194 } else { // if x < 2^53
195 tmp1.d = (double) C1.w[0]; // exact conversion
196 x_nr_bits =
197 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
198 }
199 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
200 tmp1.d = (double) C1.w[1]; // exact conversion
201 x_nr_bits =
202 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
203 }
204 q = __bid_nr_digits[x_nr_bits - 1].digits;
205 if (q == 0) {
206 q = __bid_nr_digits[x_nr_bits - 1].digits1;
207 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
208 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
209 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
210 q++;
211 }
212 exp = (x_exp >> 49) - 6176;
213 if (exp >= 0) { // -exp <= 0
214 // the argument is an integer already
215 res.w[1] = x.w[1];
216 res.w[0] = x.w[0];
217 BID_RETURN (res);
218 }
219 // exp < 0
220 switch (rnd_mode) {
221 case ROUNDING_TO_NEAREST:
222 if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
223 // need to shift right -exp digits from the coefficient; exp will be 0
224 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
225 // chop off ind digits from the lower part of C1
226 // C1 = C1 + 1/2 * 10^x where the result C1 fits in 127 bits
227 tmp64 = C1.w[0];
228 if (ind <= 19) {
229 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
230 } else {
231 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
232 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
233 }
234 if (C1.w[0] < tmp64)
235 C1.w[1]++;
236 // calculate C* and f*
237 // C* is actually floor(C*) in this case
238 // C* and f* need shifting and masking, as shown by
239 // __bid_shiftright128[] and __bid_maskhigh128[]
240 // 1 <= x <= 34
241 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
242 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
243 // the approximation of 10^(-x) was rounded up to 118 bits
244 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
245 // determine the value of res and fstar
246
247 // determine inexactness of the rounding of C*
248 // if (0 < f* - 1/2 < 10^(-x)) then
249 // the result is exact
250 // else // if (f* - 1/2 > T*) then
251 // the result is inexact
252 // Note: we are going to use __bid_ten2mk128[] instead of __bid_ten2mk128trunc[]
253
254 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
255 // redundant shift = __bid_shiftright128[ind - 1]; // shift = 0
256 res.w[1] = P256.w[3];
257 res.w[0] = P256.w[2];
258 // redundant fstar.w[3] = 0;
259 // redundant fstar.w[2] = 0;
260 fstar.w[1] = P256.w[1];
261 fstar.w[0] = P256.w[0];
262 // fraction f* < 10^(-x) <=> midpoint
263 // f* is in the right position to be compared with
264 // 10^(-x) from __bid_ten2mk128[]
265 // if 0 < fstar < 10^(-x), subtract 1 if odd (for rounding to even)
266 if ((res.w[0] & 0x0000000000000001ull) && // is result odd?
267 ((fstar.w[1] < (__bid_ten2mk128[ind - 1].w[1]))
268 || ((fstar.w[1] == __bid_ten2mk128[ind - 1].w[1])
269 && (fstar.w[0] < __bid_ten2mk128[ind - 1].w[0])))) {
270 // subract 1 to make even
271 if (res.w[0]-- == 0) {
272 res.w[1]--;
273 }
274 }
275 if (fstar.w[1] > 0x8000000000000000ull ||
276 (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) {
277 // f* > 1/2 and the result may be exact
278 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
279 if ((tmp64 > __bid_ten2mk128[ind - 1].w[1]
280 || (tmp64 == __bid_ten2mk128[ind - 1].w[1]
281 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0]))) {
282 // set the inexact flag
283 *pfpsf |= INEXACT_EXCEPTION;
284 } // else the result is exact
285 } else { // the result is inexact; f2* <= 1/2
286 // set the inexact flag
287 *pfpsf |= INEXACT_EXCEPTION;
288 }
289 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
290 shift = __bid_shiftright128[ind - 1]; // 3 <= shift <= 63
291 res.w[1] = (P256.w[3] >> shift);
292 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
293 // redundant fstar.w[3] = 0;
294 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
295 fstar.w[1] = P256.w[1];
296 fstar.w[0] = P256.w[0];
297 // fraction f* < 10^(-x) <=> midpoint
298 // f* is in the right position to be compared with
299 // 10^(-x) from __bid_ten2mk128[]
300 if ((res.w[0] & 0x0000000000000001ull) && // is result odd?
301 fstar.w[2] == 0 && (fstar.w[1] < __bid_ten2mk128[ind - 1].w[1]
302 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
303 && fstar.w[0] < __bid_ten2mk128[ind - 1].w[0]))) {
304 // subract 1 to make even
305 if (res.w[0]-- == 0) {
306 res.w[1]--;
307 }
308 }
309 if (fstar.w[2] > __bid_one_half128[ind - 1]
310 || (fstar.w[2] == __bid_one_half128[ind - 1]
311 && (fstar.w[1] || fstar.w[0]))) {
312 // f2* > 1/2 and the result may be exact
313 // Calculate f2* - 1/2
314 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
315 if (tmp64 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
316 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
317 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
318 // set the inexact flag
319 *pfpsf |= INEXACT_EXCEPTION;
320 } // else the result is exact
321 } else { // the result is inexact; f2* <= 1/2
322 // set the inexact flag
323 *pfpsf |= INEXACT_EXCEPTION;
324 }
325 } else { // 22 <= ind - 1 <= 33
326 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
327 res.w[1] = 0;
328 res.w[0] = P256.w[3] >> shift;
329 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
330 fstar.w[2] = P256.w[2];
331 fstar.w[1] = P256.w[1];
332 fstar.w[0] = P256.w[0];
333 // fraction f* < 10^(-x) <=> midpoint
334 // f* is in the right position to be compared with
335 // 10^(-x) from __bid_ten2mk128[]
336 if ((res.w[0] & 0x0000000000000001ull) && // is result odd?
337 fstar.w[3] == 0 && fstar.w[2] == 0
338 && (fstar.w[1] < __bid_ten2mk128[ind - 1].w[1]
339 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
340 && fstar.w[0] < __bid_ten2mk128[ind - 1].w[0]))) {
341 // subract 1 to make even
342 if (res.w[0]-- == 0) {
343 res.w[1]--;
344 }
345 }
346 if (fstar.w[3] > __bid_one_half128[ind - 1]
347 || (fstar.w[3] == __bid_one_half128[ind - 1]
348 && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
349 // f2* > 1/2 and the result may be exact
350 // Calculate f2* - 1/2
351 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
352 if (tmp64 || fstar.w[2]
353 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
354 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
355 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
356 // set the inexact flag
357 *pfpsf |= INEXACT_EXCEPTION;
358 } // else the result is exact
359 } else { // the result is inexact; f2* <= 1/2
360 // set the inexact flag
361 *pfpsf |= INEXACT_EXCEPTION;
362 }
363 }
364 res.w[1] = x_sign | 0x3040000000000000ull | res.w[1];
365 BID_RETURN (res);
366 } else { // if ((q + exp) < 0) <=> q < -exp
367 // the result is +0 or -0
368 res.w[1] = x_sign | 0x3040000000000000ull;
369 res.w[0] = 0x0000000000000000ull;
370 *pfpsf |= INEXACT_EXCEPTION;
371 BID_RETURN (res);
372 }
373 break;
374 case ROUNDING_TIES_AWAY:
375 if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
376 // need to shift right -exp digits from the coefficient; exp will be 0
377 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
378 // chop off ind digits from the lower part of C1
379 // C1 = C1 + 1/2 * 10^x where the result C1 fits in 127 bits
380 tmp64 = C1.w[0];
381 if (ind <= 19) {
382 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
383 } else {
384 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
385 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
386 }
387 if (C1.w[0] < tmp64)
388 C1.w[1]++;
389 // calculate C* and f*
390 // C* is actually floor(C*) in this case
391 // C* and f* need shifting and masking, as shown by
392 // __bid_shiftright128[] and __bid_maskhigh128[]
393 // 1 <= x <= 34
394 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
395 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
396 // the approximation of 10^(-x) was rounded up to 118 bits
397 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
398 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
399 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
400 // if (0 < f* < 10^(-x)) then the result is a midpoint
401 // if floor(C*) is even then C* = floor(C*) - logical right
402 // shift; C* has p decimal digits, correct by Prop. 1)
403 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
404 // shift; C* has p decimal digits, correct by Pr. 1)
405 // else
406 // C* = floor(C*) (logical right shift; C has p decimal digits,
407 // correct by Property 1)
408 // n = C* * 10^(e+x)
409
410 // determine also the inexactness of the rounding of C*
411 // if (0 < f* - 1/2 < 10^(-x)) then
412 // the result is exact
413 // else // if (f* - 1/2 > T*) then
414 // the result is inexact
415 // Note: we are going to use __bid_ten2mk128[] instead of __bid_ten2mk128trunc[]
416 // shift right C* by Ex-128 = __bid_shiftright128[ind]
417 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
418 // redundant shift = __bid_shiftright128[ind - 1]; // shift = 0
419 res.w[1] = P256.w[3];
420 res.w[0] = P256.w[2];
421 // redundant fstar.w[3] = 0;
422 // redundant fstar.w[2] = 0;
423 fstar.w[1] = P256.w[1];
424 fstar.w[0] = P256.w[0];
425 if (fstar.w[1] > 0x8000000000000000ull ||
426 (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) {
427 // f* > 1/2 and the result may be exact
428 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
429 if ((tmp64 > __bid_ten2mk128[ind - 1].w[1]
430 || (tmp64 == __bid_ten2mk128[ind - 1].w[1]
431 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0]))) {
432 // set the inexact flag
433 *pfpsf |= INEXACT_EXCEPTION;
434 } // else the result is exact
435 } else { // the result is inexact; f2* <= 1/2
436 // set the inexact flag
437 *pfpsf |= INEXACT_EXCEPTION;
438 }
439 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
440 shift = __bid_shiftright128[ind - 1]; // 3 <= shift <= 63
441 res.w[1] = (P256.w[3] >> shift);
442 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
443 // redundant fstar.w[3] = 0;
444 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
445 fstar.w[1] = P256.w[1];
446 fstar.w[0] = P256.w[0];
447 if (fstar.w[2] > __bid_one_half128[ind - 1]
448 || (fstar.w[2] == __bid_one_half128[ind - 1]
449 && (fstar.w[1] || fstar.w[0]))) {
450 // f2* > 1/2 and the result may be exact
451 // Calculate f2* - 1/2
452 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
453 if (tmp64 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
454 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
455 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
456 // set the inexact flag
457 *pfpsf |= INEXACT_EXCEPTION;
458 } // else the result is exact
459 } else { // the result is inexact; f2* <= 1/2
460 // set the inexact flag
461 *pfpsf |= INEXACT_EXCEPTION;
462 }
463 } else { // 22 <= ind - 1 <= 33
464 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
465 res.w[1] = 0;
466 res.w[0] = P256.w[3] >> shift;
467 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
468 fstar.w[2] = P256.w[2];
469 fstar.w[1] = P256.w[1];
470 fstar.w[0] = P256.w[0];
471 if (fstar.w[3] > __bid_one_half128[ind - 1]
472 || (fstar.w[3] == __bid_one_half128[ind - 1]
473 && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
474 // f2* > 1/2 and the result may be exact
475 // Calculate f2* - 1/2
476 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
477 if (tmp64 || fstar.w[2]
478 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
479 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
480 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
481 // set the inexact flag
482 *pfpsf |= INEXACT_EXCEPTION;
483 } // else the result is exact
484 } else { // the result is inexact; f2* <= 1/2
485 // set the inexact flag
486 *pfpsf |= INEXACT_EXCEPTION;
487 }
488 }
489 // if the result was a midpoint, it was already rounded away from zero
490 res.w[1] |= x_sign | 0x3040000000000000ull;
491 BID_RETURN (res);
492 } else { // if ((q + exp) < 0) <=> q < -exp
493 // the result is +0 or -0
494 res.w[1] = x_sign | 0x3040000000000000ull;
495 res.w[0] = 0x0000000000000000ull;
496 *pfpsf |= INEXACT_EXCEPTION;
497 BID_RETURN (res);
498 }
499 break;
500 case ROUNDING_DOWN:
501 if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
502 // need to shift right -exp digits from the coefficient; exp will be 0
503 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
504 // (number of digits to be chopped off)
505 // chop off ind digits from the lower part of C1
506 // FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
507 // FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
508 // FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
509 // FOR ROUND_TO_NEGATIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF NEGATIVE
510 // tmp64 = C1.w[0];
511 // if (ind <= 19) {
512 // C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
513 // } else {
514 // C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
515 // C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
516 // }
517 // if (C1.w[0] < tmp64) C1.w[1]++;
518 // if carry-out from C1.w[0], increment C1.w[1]
519 // calculate C* and f*
520 // C* is actually floor(C*) in this case
521 // C* and f* need shifting and masking, as shown by
522 // __bid_shiftright128[] and __bid_maskhigh128[]
523 // 1 <= x <= 34
524 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
525 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
526 // the approximation of 10^(-x) was rounded up to 118 bits
527 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
528 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
529 res.w[1] = P256.w[3];
530 res.w[0] = P256.w[2];
531 // redundant fstar.w[3] = 0;
532 // redundant fstar.w[2] = 0;
533 // redundant fstar.w[1] = P256.w[1];
534 // redundant fstar.w[0] = P256.w[0];
535 // fraction f* > 10^(-x) <=> inexact
536 // f* is in the right position to be compared with
537 // 10^(-x) from __bid_ten2mk128[]
538 if ((P256.w[1] > __bid_ten2mk128[ind - 1].w[1])
539 || (P256.w[1] == __bid_ten2mk128[ind - 1].w[1]
540 && (P256.w[0] >= __bid_ten2mk128[ind - 1].w[0]))) {
541 *pfpsf |= INEXACT_EXCEPTION;
542 // if positive, the truncated value is already the correct result
543 if (x_sign) { // if negative
544 if (++res.w[0] == 0) {
545 res.w[1]++;
546 }
547 }
548 }
549 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
550 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
551 res.w[1] = (P256.w[3] >> shift);
552 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
553 // redundant fstar.w[3] = 0;
554 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
555 fstar.w[1] = P256.w[1];
556 fstar.w[0] = P256.w[0];
557 // fraction f* > 10^(-x) <=> inexact
558 // f* is in the right position to be compared with
559 // 10^(-x) from __bid_ten2mk128[]
560 if (fstar.w[2] || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
561 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
562 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
563 *pfpsf |= INEXACT_EXCEPTION;
564 // if positive, the truncated value is already the correct result
565 if (x_sign) { // if negative
566 if (++res.w[0] == 0) {
567 res.w[1]++;
568 }
569 }
570 }
571 } else { // 22 <= ind - 1 <= 33
572 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
573 res.w[1] = 0;
574 res.w[0] = P256.w[3] >> shift;
575 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
576 fstar.w[2] = P256.w[2];
577 fstar.w[1] = P256.w[1];
578 fstar.w[0] = P256.w[0];
579 // fraction f* > 10^(-x) <=> inexact
580 // f* is in the right position to be compared with
581 // 10^(-x) from __bid_ten2mk128[]
582 if (fstar.w[3] || fstar.w[2]
583 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
584 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
585 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
586 *pfpsf |= INEXACT_EXCEPTION;
587 // if positive, the truncated value is already the correct result
588 if (x_sign) { // if negative
589 if (++res.w[0] == 0) {
590 res.w[1]++;
591 }
592 }
593 }
594 }
595 res.w[1] = x_sign | 0x3040000000000000ull | res.w[1];
596 BID_RETURN (res);
597 } else { // if exp < 0 and q + exp <= 0
598 if (x_sign) { // negative rounds down to -1.0
599 res.w[1] = 0xb040000000000000ull;
600 res.w[0] = 0x0000000000000001ull;
601 } else { // positive rpunds down to +0.0
602 res.w[1] = 0x3040000000000000ull;
603 res.w[0] = 0x0000000000000000ull;
604 }
605 *pfpsf |= INEXACT_EXCEPTION;
606 BID_RETURN (res);
607 }
608 break;
609 case ROUNDING_UP:
610 if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
611 // need to shift right -exp digits from the coefficient; exp will be 0
612 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
613 // (number of digits to be chopped off)
614 // chop off ind digits from the lower part of C1
615 // FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
616 // FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
617 // FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
618 // FOR ROUND_TO_NEGATIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF NEGATIVE
619 // tmp64 = C1.w[0];
620 // if (ind <= 19) {
621 // C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
622 // } else {
623 // C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
624 // C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
625 // }
626 // if (C1.w[0] < tmp64) C1.w[1]++;
627 // if carry-out from C1.w[0], increment C1.w[1]
628 // calculate C* and f*
629 // C* is actually floor(C*) in this case
630 // C* and f* need shifting and masking, as shown by
631 // __bid_shiftright128[] and __bid_maskhigh128[]
632 // 1 <= x <= 34
633 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
634 // C* = C1 * 10^(-x)
635 // the approximation of 10^(-x) was rounded up to 118 bits
636 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
637 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
638 res.w[1] = P256.w[3];
639 res.w[0] = P256.w[2];
640 // redundant fstar.w[3] = 0;
641 // redundant fstar.w[2] = 0;
642 // redundant fstar.w[1] = P256.w[1];
643 // redundant fstar.w[0] = P256.w[0];
644 // fraction f* > 10^(-x) <=> inexact
645 // f* is in the right position to be compared with
646 // 10^(-x) from __bid_ten2mk128[]
647 if ((P256.w[1] > __bid_ten2mk128[ind - 1].w[1])
648 || (P256.w[1] == __bid_ten2mk128[ind - 1].w[1]
649 && (P256.w[0] >= __bid_ten2mk128[ind - 1].w[0]))) {
650 *pfpsf |= INEXACT_EXCEPTION;
651 // if negative, the truncated value is already the correct result
652 if (!x_sign) { // if positive
653 if (++res.w[0] == 0) {
654 res.w[1]++;
655 }
656 }
657 }
658 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
659 shift = __bid_shiftright128[ind - 1]; // 3 <= shift <= 63
660 res.w[1] = (P256.w[3] >> shift);
661 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
662 // redundant fstar.w[3] = 0;
663 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
664 fstar.w[1] = P256.w[1];
665 fstar.w[0] = P256.w[0];
666 // fraction f* > 10^(-x) <=> inexact
667 // f* is in the right position to be compared with
668 // 10^(-x) from __bid_ten2mk128[]
669 if (fstar.w[2] || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
670 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
671 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
672 *pfpsf |= INEXACT_EXCEPTION;
673 // if negative, the truncated value is already the correct result
674 if (!x_sign) { // if positive
675 if (++res.w[0] == 0) {
676 res.w[1]++;
677 }
678 }
679 }
680 } else { // 22 <= ind - 1 <= 33
681 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
682 res.w[1] = 0;
683 res.w[0] = P256.w[3] >> shift;
684 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
685 fstar.w[2] = P256.w[2];
686 fstar.w[1] = P256.w[1];
687 fstar.w[0] = P256.w[0];
688 // fraction f* > 10^(-x) <=> inexact
689 // f* is in the right position to be compared with
690 // 10^(-x) from __bid_ten2mk128[]
691 if (fstar.w[3] || fstar.w[2]
692 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
693 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
694 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
695 *pfpsf |= INEXACT_EXCEPTION;
696 // if negative, the truncated value is already the correct result
697 if (!x_sign) { // if positive
698 if (++res.w[0] == 0) {
699 res.w[1]++;
700 }
701 }
702 }
703 }
704 res.w[1] = x_sign | 0x3040000000000000ull | res.w[1];
705 BID_RETURN (res);
706 } else { // if exp < 0 and q + exp <= 0
707 if (x_sign) { // negative rounds up to -0.0
708 res.w[1] = 0xb040000000000000ull;
709 res.w[0] = 0x0000000000000000ull;
710 } else { // positive rpunds up to +1.0
711 res.w[1] = 0x3040000000000000ull;
712 res.w[0] = 0x0000000000000001ull;
713 }
714 *pfpsf |= INEXACT_EXCEPTION;
715 BID_RETURN (res);
716 }
717 break;
718 case ROUNDING_TO_ZERO:
719 if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
720 // need to shift right -exp digits from the coefficient; exp will be 0
721 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
722 // (number of digits to be chopped off)
723 // chop off ind digits from the lower part of C1
724 // FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
725 // FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
726 // FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
727 // FOR ROUND_TO_NEGATIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF NEGATIVE
728 //tmp64 = C1.w[0];
729 // if (ind <= 19) {
730 // C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
731 // } else {
732 // C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
733 // C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
734 // }
735 // if (C1.w[0] < tmp64) C1.w[1]++;
736 // if carry-out from C1.w[0], increment C1.w[1]
737 // calculate C* and f*
738 // C* is actually floor(C*) in this case
739 // C* and f* need shifting and masking, as shown by
740 // __bid_shiftright128[] and __bid_maskhigh128[]
741 // 1 <= x <= 34
742 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
743 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
744 // the approximation of 10^(-x) was rounded up to 118 bits
745 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
746 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
747 res.w[1] = P256.w[3];
748 res.w[0] = P256.w[2];
749 // redundant fstar.w[3] = 0;
750 // redundant fstar.w[2] = 0;
751 // redundant fstar.w[1] = P256.w[1];
752 // redundant fstar.w[0] = P256.w[0];
753 // fraction f* > 10^(-x) <=> inexact
754 // f* is in the right position to be compared with
755 // 10^(-x) from __bid_ten2mk128[]
756 if ((P256.w[1] > __bid_ten2mk128[ind - 1].w[1])
757 || (P256.w[1] == __bid_ten2mk128[ind - 1].w[1]
758 && (P256.w[0] >= __bid_ten2mk128[ind - 1].w[0]))) {
759 *pfpsf |= INEXACT_EXCEPTION;
760 }
761 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
762 shift = __bid_shiftright128[ind - 1]; // 3 <= shift <= 63
763 res.w[1] = (P256.w[3] >> shift);
764 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
765 // redundant fstar.w[3] = 0;
766 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
767 fstar.w[1] = P256.w[1];
768 fstar.w[0] = P256.w[0];
769 // fraction f* > 10^(-x) <=> inexact
770 // f* is in the right position to be compared with
771 // 10^(-x) from __bid_ten2mk128[]
772 if (fstar.w[2] || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
773 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
774 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
775 *pfpsf |= INEXACT_EXCEPTION;
776 }
777 } else { // 22 <= ind - 1 <= 33
778 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
779 res.w[1] = 0;
780 res.w[0] = P256.w[3] >> shift;
781 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
782 fstar.w[2] = P256.w[2];
783 fstar.w[1] = P256.w[1];
784 fstar.w[0] = P256.w[0];
785 // fraction f* > 10^(-x) <=> inexact
786 // f* is in the right position to be compared with
787 // 10^(-x) from __bid_ten2mk128[]
788 if (fstar.w[3] || fstar.w[2]
789 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
790 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
791 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
792 *pfpsf |= INEXACT_EXCEPTION;
793 }
794 }
795 res.w[1] = x_sign | 0x3040000000000000ull | res.w[1];
796 BID_RETURN (res);
797 } else { // if exp < 0 and q + exp <= 0 the result is +0 or -0
798 res.w[1] = x_sign | 0x3040000000000000ull;
799 res.w[0] = 0x0000000000000000ull;
800 *pfpsf |= INEXACT_EXCEPTION;
801 BID_RETURN (res);
802 }
803 break;
804 }
805 BID_RETURN (res);
806}
807
808/*****************************************************************************
809 * BID128_round_integral_nearest_even
810 ****************************************************************************/
811
812BID128_FUNCTION_ARG1_NORND(__bid128_round_integral_nearest_even, x)
813
814 UINT128 res;
815 UINT64 x_sign;
816 UINT64 x_exp;
817 int exp; // unbiased exponent
818 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
819 UINT64 tmp64;
820 BID_UI64DOUBLE tmp1;
821 unsigned int x_nr_bits;
822 int q, ind, shift;
823 UINT128 C1;
824 // UINT128 res is C* at first - represents up to 34 decimal digits ~ 113 bits
825 UINT256 fstar;
826 UINT256 P256;
827
828 // check for NaN or Infinity
829 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
830 // x is special
831 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
832 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
833 // set invalid flag
834 *pfpsf |= INVALID_EXCEPTION;
835 // return Quiet (SNaN)
836 res.w[1] = x.w[1] & 0xfdffffffffffffffull;
837 res.w[0] = x.w[0];
838 } else { // x is QNaN
839 // return the input QNaN
840 res.w[1] = x.w[1];
841 res.w[0] = x.w[0];
842 }
843 BID_RETURN (res);
844 } else { // x is not a NaN, so it must be infinity
845 if ((x.w[1] & MASK_SIGN) == 0x0ull) { // x is +inf
846 // return +inf
847 res.w[1] = 0x7800000000000000ull;
848 res.w[0] = 0x0000000000000000ull;
849 } else { // x is -inf
850 // return -inf
851 res.w[1] = 0xf800000000000000ull;
852 res.w[0] = 0x0000000000000000ull;
853 }
854 BID_RETURN (res);
855 }
856 }
857 // unpack x
858 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
859 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
860 C1.w[1] = x.w[1] & MASK_COEFF;
861 C1.w[0] = x.w[0];
862
863 // test for non-canonical values:
864 // - values whose encoding begins with x00, x01, or x10 and whose
865 // coefficient is larger than 10^34 -1, or
866 // - values whose encoding begins with x1100, x1101, x1110 (if NaNs
867 // and infinitis were eliminated already this test is reduced to
868 // checking for x10x)
869
870 // test for non-canonical values of the argument x
871 if ((((C1.w[1] > 0x0001ed09bead87c0ull)
872 || ((C1.w[1] == 0x0001ed09bead87c0ull)
873 && (C1.w[0] > 0x378d8e63ffffffffull)))
874 && ((x.w[1] & 0x6000000000000000ull) != 0x6000000000000000ull))
875 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
876 x.w[1] = x.w[1] & 0x8000000000000000ull; // preserve the sign bit
877 x.w[0] = 0;
878 x_exp = 0x1820ull << 49;
879 C1.w[1] = 0;
880 C1.w[0] = 0;
881 }
882 // test for input equal to zero
883 if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
884 // x is 0
885 // return 0 preserving the sign bit and the preferred exponent
886 // of MAX(Q(x), 0)
887 if (x_exp <= (0x1820ull << 49)) {
888 res.w[1] =
889 (x.w[1] & 0x8000000000000000ull) | 0x3040000000000000ull;
890 } else {
891 res.w[1] = x.w[1] & 0xfffe000000000000ull;
892 }
893 res.w[0] = 0x0000000000000000ull;
894 BID_RETURN (res);
895 }
896 // x is not special and is not zero
897
898 // if (exp <= -(p+1)) return 0
899 if (x_exp <= 0x2ffa000000000000ull) { // 0x2ffa000000000000ull == -35
900 res.w[1] = x_sign | 0x3040000000000000ull;
901 res.w[0] = 0x0000000000000000ull;
902 BID_RETURN (res);
903 }
904 // q = nr. of decimal digits in x
905 // determine first the nr. of bits in x
906 if (C1.w[1] == 0) {
907 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
908 // split the 64-bit value in two 32-bit halves to avoid rounding errors
909 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
910 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
911 x_nr_bits =
912 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
913 } else { // x < 2^32
914 tmp1.d = (double) (C1.w[0]); // exact conversion
915 x_nr_bits =
916 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
917 }
918 } else { // if x < 2^53
919 tmp1.d = (double) C1.w[0]; // exact conversion
920 x_nr_bits =
921 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
922 }
923 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
924 tmp1.d = (double) C1.w[1]; // exact conversion
925 x_nr_bits =
926 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
927 }
928 q = __bid_nr_digits[x_nr_bits - 1].digits;
929 if (q == 0) {
930 q = __bid_nr_digits[x_nr_bits - 1].digits1;
931 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
932 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
933 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
934 q++;
935 }
936 exp = (x_exp >> 49) - 6176;
937 if (exp >= 0) { // -exp <= 0
938 // the argument is an integer already
939 res.w[1] = x.w[1];
940 res.w[0] = x.w[0];
941 BID_RETURN (res);
942 } else if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
943 // need to shift right -exp digits from the coefficient; the exp will be 0
944 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
945 // chop off ind digits from the lower part of C1
946 // C1 = C1 + 1/2 * 10^x where the result C1 fits in 127 bits
947 tmp64 = C1.w[0];
948 if (ind <= 19) {
949 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
950 } else {
951 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
952 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
953 }
954 if (C1.w[0] < tmp64)
955 C1.w[1]++;
956 // calculate C* and f*
957 // C* is actually floor(C*) in this case
958 // C* and f* need shifting and masking, as shown by
959 // __bid_shiftright128[] and __bid_maskhigh128[]
960 // 1 <= x <= 34
961 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
962 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
963 // the approximation of 10^(-x) was rounded up to 118 bits
964 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
965 // determine the value of res and fstar
966 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
967 // redundant shift = __bid_shiftright128[ind - 1]; // shift = 0
968 res.w[1] = P256.w[3];
969 res.w[0] = P256.w[2];
970 // redundant fstar.w[3] = 0;
971 // redundant fstar.w[2] = 0;
972 // redundant fstar.w[1] = P256.w[1];
973 // redundant fstar.w[0] = P256.w[0];
974 // fraction f* < 10^(-x) <=> midpoint
975 // f* is in the right position to be compared with
976 // 10^(-x) from __bid_ten2mk128[]
977 // if 0 < fstar < 10^(-x), subtract 1 if odd (for rounding to even)
978 if ((res.w[0] & 0x0000000000000001ull) && // is result odd?
979 ((P256.w[1] < (__bid_ten2mk128[ind - 1].w[1]))
980 || ((P256.w[1] == __bid_ten2mk128[ind - 1].w[1])
981 && (P256.w[0] < __bid_ten2mk128[ind - 1].w[0])))) {
982 // subract 1 to make even
983 if (res.w[0]-- == 0) {
984 res.w[1]--;
985 }
986 }
987 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
988 shift = __bid_shiftright128[ind - 1]; // 3 <= shift <= 63
989 res.w[1] = (P256.w[3] >> shift);
990 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
991 // redundant fstar.w[3] = 0;
992 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
993 fstar.w[1] = P256.w[1];
994 fstar.w[0] = P256.w[0];
995 // fraction f* < 10^(-x) <=> midpoint
996 // f* is in the right position to be compared with
997 // 10^(-x) from __bid_ten2mk128[]
998 if ((res.w[0] & 0x0000000000000001ull) && // is result odd?
999 fstar.w[2] == 0 && (fstar.w[1] < __bid_ten2mk128[ind - 1].w[1]
1000 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
1001 && fstar.w[0] < __bid_ten2mk128[ind - 1].w[0]))) {
1002 // subract 1 to make even
1003 if (res.w[0]-- == 0) {
1004 res.w[1]--;
1005 }
1006 }
1007 } else { // 22 <= ind - 1 <= 33
1008 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
1009 res.w[1] = 0;
1010 res.w[0] = P256.w[3] >> shift;
1011 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
1012 fstar.w[2] = P256.w[2];
1013 fstar.w[1] = P256.w[1];
1014 fstar.w[0] = P256.w[0];
1015 // fraction f* < 10^(-x) <=> midpoint
1016 // f* is in the right position to be compared with
1017 // 10^(-x) from __bid_ten2mk128[]
1018 if ((res.w[0] & 0x0000000000000001ull) && // is result odd?
1019 fstar.w[3] == 0 && fstar.w[2] == 0
1020 && (fstar.w[1] < __bid_ten2mk128[ind - 1].w[1]
1021 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
1022 && fstar.w[0] < __bid_ten2mk128[ind - 1].w[0]))) {
1023 // subract 1 to make even
1024 if (res.w[0]-- == 0) {
1025 res.w[1]--;
1026 }
1027 }
1028 }
1029 res.w[1] = x_sign | 0x3040000000000000ull | res.w[1];
1030 BID_RETURN (res);
1031 } else { // if ((q + exp) < 0) <=> q < -exp
1032 // the result is +0 or -0
1033 res.w[1] = x_sign | 0x3040000000000000ull;
1034 res.w[0] = 0x0000000000000000ull;
1035 BID_RETURN (res);
1036 }
1037}
1038
1039/*****************************************************************************
1040 * BID128_round_integral_negative
1041 ****************************************************************************/
1042
1043BID128_FUNCTION_ARG1_NORND(__bid128_round_integral_negative, x)
1044
1045 UINT128 res;
1046 UINT64 x_sign;
1047 UINT64 x_exp;
1048 int exp; // unbiased exponent
1049 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
1050 // (all are UINT64)
1051 BID_UI64DOUBLE tmp1;
1052 unsigned int x_nr_bits;
1053 int q, ind, shift;
1054 UINT128 C1;
1055 // UINT128 res is C* at first - represents up to 34 decimal digits ~
1056 // 113 bits
1057 UINT256 fstar;
1058 UINT256 P256;
1059
1060 // check for NaN or Infinity
1061 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1062 // x is special
1063 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1064 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1065 // set invalid flag
1066 *pfpsf |= INVALID_EXCEPTION;
1067 // return Quiet (SNaN)
1068 res.w[1] = x.w[1] & 0xfdffffffffffffffull;
1069 res.w[0] = x.w[0];
1070 } else { // x is QNaN
1071 // return the input QNaN
1072 res.w[1] = x.w[1];
1073 res.w[0] = x.w[0];
1074 }
1075 BID_RETURN (res);
1076 } else { // x is not a NaN, so it must be infinity
1077 if ((x.w[1] & MASK_SIGN) == 0x0ull) { // x is +inf
1078 // return +inf
1079 res.w[1] = 0x7800000000000000ull;
1080 res.w[0] = 0x0000000000000000ull;
1081 } else { // x is -inf
1082 // return -inf
1083 res.w[1] = 0xf800000000000000ull;
1084 res.w[0] = 0x0000000000000000ull;
1085 }
1086 BID_RETURN (res);
1087 }
1088 }
1089 // unpack x
1090 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1091 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1092 C1.w[1] = x.w[1] & MASK_COEFF;
1093 C1.w[0] = x.w[0];
1094
1095 // test for non-canonical values:
1096 // - values whose encoding begins with x00, x01, or x10 and whose
1097 // coefficient is larger than 10^34 -1, or
1098 // - values whose encoding begins with x1100, x1101, x1110 (if NaNs
1099 // and infinitis were eliminated already this test is reduced to
1100 // checking for x10x)
1101
1102 // test for non-canonical values of the argument x
1103 if ((((C1.w[1] > 0x0001ed09bead87c0ull)
1104 || ((C1.w[1] == 0x0001ed09bead87c0ull)
1105 && (C1.w[0] > 0x378d8e63ffffffffull)))
1106 && ((x.w[1] & 0x6000000000000000ull) != 0x6000000000000000ull))
1107 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1108 x.w[1] = x.w[1] & 0x8000000000000000ull; // preserve the sign bit
1109 x.w[0] = 0;
1110 x_exp = 0x1820ull << 49;
1111 C1.w[1] = 0;
1112 C1.w[0] = 0;
1113 }
1114 // test for input equal to zero
1115 if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1116 // x is 0
1117 // return 0 preserving the sign bit and the preferred exponent
1118 // of MAX(Q(x), 0)
1119 if (x_exp <= (0x1820ull << 49)) {
1120 res.w[1] =
1121 (x.w[1] & 0x8000000000000000ull) | 0x3040000000000000ull;
1122 } else {
1123 res.w[1] = x.w[1] & 0xfffe000000000000ull;
1124 }
1125 res.w[0] = 0x0000000000000000ull;
1126 BID_RETURN (res);
1127 }
1128 // x is not special and is not zero
1129
1130 // if (exp <= -p) return -1.0 or +0.0
1131 if (x_exp <= 0x2ffc000000000000ull) { // 0x2ffc000000000000ull == -34
1132 if (x_sign) {
1133 // if negative, return negative 1, because we know the coefficient
1134 // is non-zero (would have been caught above)
1135 res.w[1] = 0xb040000000000000ull;
1136 res.w[0] = 0x0000000000000001ull;
1137 } else {
1138 // if positive, return positive 0, because we know coefficient is
1139 // non-zero (would have been caught above)
1140 res.w[1] = 0x3040000000000000ull;
1141 res.w[0] = 0x0000000000000000ull;
1142 }
1143 BID_RETURN (res);
1144 }
1145 // q = nr. of decimal digits in x
1146 // determine first the nr. of bits in x
1147 if (C1.w[1] == 0) {
1148 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1149 // split the 64-bit value in two 32-bit halves to avoid rounding errors
1150 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1151 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1152 x_nr_bits =
1153 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1154 } else { // x < 2^32
1155 tmp1.d = (double) (C1.w[0]); // exact conversion
1156 x_nr_bits =
1157 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1158 }
1159 } else { // if x < 2^53
1160 tmp1.d = (double) C1.w[0]; // exact conversion
1161 x_nr_bits =
1162 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1163 }
1164 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
1165 tmp1.d = (double) C1.w[1]; // exact conversion
1166 x_nr_bits =
1167 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1168 }
1169 q = __bid_nr_digits[x_nr_bits - 1].digits;
1170 if (q == 0) {
1171 q = __bid_nr_digits[x_nr_bits - 1].digits1;
1172 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
1173 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
1174 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
1175 q++;
1176 }
1177 exp = (x_exp >> 49) - 6176;
1178 if (exp >= 0) { // -exp <= 0
1179 // the argument is an integer already
1180 res.w[1] = x.w[1];
1181 res.w[0] = x.w[0];
1182 BID_RETURN (res);
1183 } else if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
1184 // need to shift right -exp digits from the coefficient; the exp will be 0
1185 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
1186 // (number of digits to be chopped off)
1187 // chop off ind digits from the lower part of C1
1188 // FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
1189 // FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
1190 // FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
1191 // FOR ROUND_TO_NEGATIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF NEGATIVE
1192 //tmp64 = C1.w[0];
1193 // if (ind <= 19) {
1194 // C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
1195 // } else {
1196 // C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
1197 // C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
1198 // }
1199 // if (C1.w[0] < tmp64) C1.w[1]++;
1200 // if carry-out from C1.w[0], increment C1.w[1]
1201 // calculate C* and f*
1202 // C* is actually floor(C*) in this case
1203 // C* and f* need shifting and masking, as shown by
1204 // __bid_shiftright128[] and __bid_maskhigh128[]
1205 // 1 <= x <= 34
1206 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
1207 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
1208 // the approximation of 10^(-x) was rounded up to 118 bits
1209 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
1210 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
1211 res.w[1] = P256.w[3];
1212 res.w[0] = P256.w[2];
1213 // if positive, the truncated value is already the correct result
1214 if (x_sign) { // if negative
1215 // redundant fstar.w[3] = 0;
1216 // redundant fstar.w[2] = 0;
1217 // redundant fstar.w[1] = P256.w[1];
1218 // redundant fstar.w[0] = P256.w[0];
1219 // fraction f* > 10^(-x) <=> inexact
1220 // f* is in the right position to be compared with
1221 // 10^(-x) from __bid_ten2mk128[]
1222 if ((P256.w[1] > __bid_ten2mk128[ind - 1].w[1])
1223 || (P256.w[1] == __bid_ten2mk128[ind - 1].w[1]
1224 && (P256.w[0] >= __bid_ten2mk128[ind - 1].w[0]))) {
1225 if (++res.w[0] == 0) {
1226 res.w[1]++;
1227 }
1228 }
1229 }
1230 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
1231 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
1232 res.w[1] = (P256.w[3] >> shift);
1233 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
1234 // if positive, the truncated value is already the correct result
1235 if (x_sign) { // if negative
1236 // redundant fstar.w[3] = 0;
1237 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
1238 fstar.w[1] = P256.w[1];
1239 fstar.w[0] = P256.w[0];
1240 // fraction f* > 10^(-x) <=> inexact
1241 // f* is in the right position to be compared with
1242 // 10^(-x) from __bid_ten2mk128[]
1243 if (fstar.w[2] || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
1244 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
1245 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
1246 if (++res.w[0] == 0) {
1247 res.w[1]++;
1248 }
1249 }
1250 }
1251 } else { // 22 <= ind - 1 <= 33
1252 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
1253 res.w[1] = 0;
1254 res.w[0] = P256.w[3] >> shift;
1255 // if positive, the truncated value is already the correct result
1256 if (x_sign) { // if negative
1257 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
1258 fstar.w[2] = P256.w[2];
1259 fstar.w[1] = P256.w[1];
1260 fstar.w[0] = P256.w[0];
1261 // fraction f* > 10^(-x) <=> inexact
1262 // f* is in the right position to be compared with
1263 // 10^(-x) from __bid_ten2mk128[]
1264 if (fstar.w[3] || fstar.w[2]
1265 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
1266 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
1267 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
1268 if (++res.w[0] == 0) {
1269 res.w[1]++;
1270 }
1271 }
1272 }
1273 }
1274 res.w[1] = x_sign | 0x3040000000000000ull | res.w[1];
1275 BID_RETURN (res);
1276 } else { // if exp < 0 and q + exp <= 0
1277 if (x_sign) { // negative rounds down to -1.0
1278 res.w[1] = 0xb040000000000000ull;
1279 res.w[0] = 0x0000000000000001ull;
1280 } else { // positive rpunds down to +0.0
1281 res.w[1] = 0x3040000000000000ull;
1282 res.w[0] = 0x0000000000000000ull;
1283 }
1284 BID_RETURN (res);
1285 }
1286}
1287
1288/*****************************************************************************
1289 * BID128_round_integral_positive
1290 ****************************************************************************/
1291
1292BID128_FUNCTION_ARG1_NORND(__bid128_round_integral_positive, x)
1293
1294 UINT128 res;
1295 UINT64 x_sign;
1296 UINT64 x_exp;
1297 int exp; // unbiased exponent
1298 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
1299 // (all are UINT64)
1300 BID_UI64DOUBLE tmp1;
1301 unsigned int x_nr_bits;
1302 int q, ind, shift;
1303 UINT128 C1;
1304 // UINT128 res is C* at first - represents up to 34 decimal digits ~
1305 // 113 bits
1306 UINT256 fstar;
1307 UINT256 P256;
1308
1309 // check for NaN or Infinity
1310 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1311 // x is special
1312 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1313 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1314 // set invalid flag
1315 *pfpsf |= INVALID_EXCEPTION;
1316 // return Quiet (SNaN)
1317 res.w[1] = x.w[1] & 0xfdffffffffffffffull;
1318 res.w[0] = x.w[0];
1319 } else { // x is QNaN
1320 // return the input QNaN
1321 res.w[1] = x.w[1];
1322 res.w[0] = x.w[0];
1323 }
1324 BID_RETURN (res);
1325 } else { // x is not a NaN, so it must be infinity
1326 if ((x.w[1] & MASK_SIGN) == 0x0ull) { // x is +inf
1327 // return +inf
1328 res.w[1] = 0x7800000000000000ull;
1329 res.w[0] = 0x0000000000000000ull;
1330 } else { // x is -inf
1331 // return -inf
1332 res.w[1] = 0xf800000000000000ull;
1333 res.w[0] = 0x0000000000000000ull;
1334 }
1335 BID_RETURN (res);
1336 }
1337 }
1338 // unpack x
1339 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1340 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1341 C1.w[1] = x.w[1] & MASK_COEFF;
1342 C1.w[0] = x.w[0];
1343
1344 // test for non-canonical values:
1345 // - values whose encoding begins with x00, x01, or x10 and whose
1346 // coefficient is larger than 10^34 -1, or
1347 // - values whose encoding begins with x1100, x1101, x1110 (if NaNs
1348 // and infinitis were eliminated already this test is reduced to
1349 // checking for x10x)
1350
1351 // test for non-canonical values of the argument x
1352 if ((((C1.w[1] > 0x0001ed09bead87c0ull)
1353 || ((C1.w[1] == 0x0001ed09bead87c0ull)
1354 && (C1.w[0] > 0x378d8e63ffffffffull)))
1355 && ((x.w[1] & 0x6000000000000000ull) != 0x6000000000000000ull))
1356 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1357 x.w[1] = x.w[1] & 0x8000000000000000ull; // preserve the sign bit
1358 x.w[0] = 0;
1359 x_exp = 0x1820ull << 49;
1360 C1.w[1] = 0;
1361 C1.w[0] = 0;
1362 }
1363 // test for input equal to zero
1364 if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1365 // x is 0
1366 // return 0 preserving the sign bit and the preferred exponent
1367 // of MAX(Q(x), 0)
1368 if (x_exp <= (0x1820ull << 49)) {
1369 res.w[1] =
1370 (x.w[1] & 0x8000000000000000ull) | 0x3040000000000000ull;
1371 } else {
1372 res.w[1] = x.w[1] & 0xfffe000000000000ull;
1373 }
1374 res.w[0] = 0x0000000000000000ull;
1375 BID_RETURN (res);
1376 }
1377 // x is not special and is not zero
1378
1379 // if (exp <= -p) return -0.0 or +1.0
1380 if (x_exp <= 0x2ffc000000000000ull) { // 0x2ffc000000000000ull == -34
1381 if (x_sign) {
1382 // if negative, return negative 0, because we know the coefficient
1383 // is non-zero (would have been caught above)
1384 res.w[1] = 0xb040000000000000ull;
1385 res.w[0] = 0x0000000000000000ull;
1386 } else {
1387 // if positive, return positive 1, because we know coefficient is
1388 // non-zero (would have been caught above)
1389 res.w[1] = 0x3040000000000000ull;
1390 res.w[0] = 0x0000000000000001ull;
1391 }
1392 BID_RETURN (res);
1393 }
1394 // q = nr. of decimal digits in x
1395 // determine first the nr. of bits in x
1396 if (C1.w[1] == 0) {
1397 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1398 // split 64-bit value in two 32-bit halves to avoid rounding errors
1399 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1400 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1401 x_nr_bits =
1402 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1403 } else { // x < 2^32
1404 tmp1.d = (double) (C1.w[0]); // exact conversion
1405 x_nr_bits =
1406 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1407 }
1408 } else { // if x < 2^53
1409 tmp1.d = (double) C1.w[0]; // exact conversion
1410 x_nr_bits =
1411 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1412 }
1413 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
1414 tmp1.d = (double) C1.w[1]; // exact conversion
1415 x_nr_bits =
1416 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1417 }
1418 q = __bid_nr_digits[x_nr_bits - 1].digits;
1419 if (q == 0) {
1420 q = __bid_nr_digits[x_nr_bits - 1].digits1;
1421 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
1422 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
1423 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
1424 q++;
1425 }
1426 exp = (x_exp >> 49) - 6176;
1427 if (exp >= 0) { // -exp <= 0
1428 // the argument is an integer already
1429 res.w[1] = x.w[1];
1430 res.w[0] = x.w[0];
1431 BID_RETURN (res);
1432 } else if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
1433 // need to shift right -exp digits from the coefficient; exp will be 0
1434 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
1435 // (number of digits to be chopped off)
1436 // chop off ind digits from the lower part of C1
1437 // FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
1438 // FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
1439 // FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
1440 // FOR ROUND_TO_NEGATIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF NEGATIVE
1441 // tmp64 = C1.w[0];
1442 // if (ind <= 19) {
1443 // C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
1444 // } else {
1445 // C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
1446 // C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
1447 // }
1448 // if (C1.w[0] < tmp64) C1.w[1]++;
1449 // if carry-out from C1.w[0], increment C1.w[1]
1450 // calculate C* and f*
1451 // C* is actually floor(C*) in this case
1452 // C* and f* need shifting and masking, as shown by
1453 // __bid_shiftright128[] and __bid_maskhigh128[]
1454 // 1 <= x <= 34
1455 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
1456 // C* = C1 * 10^(-x)
1457 // the approximation of 10^(-x) was rounded up to 118 bits
1458 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
1459 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
1460 res.w[1] = P256.w[3];
1461 res.w[0] = P256.w[2];
1462 // if negative, the truncated value is already the correct result
1463 if (!x_sign) { // if positive
1464 // redundant fstar.w[3] = 0;
1465 // redundant fstar.w[2] = 0;
1466 // redundant fstar.w[1] = P256.w[1];
1467 // redundant fstar.w[0] = P256.w[0];
1468 // fraction f* > 10^(-x) <=> inexact
1469 // f* is in the right position to be compared with
1470 // 10^(-x) from __bid_ten2mk128[]
1471 if ((P256.w[1] > __bid_ten2mk128[ind - 1].w[1])
1472 || (P256.w[1] == __bid_ten2mk128[ind - 1].w[1]
1473 && (P256.w[0] >= __bid_ten2mk128[ind - 1].w[0]))) {
1474 if (++res.w[0] == 0) {
1475 res.w[1]++;
1476 }
1477 }
1478 }
1479 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
1480 shift = __bid_shiftright128[ind - 1]; // 3 <= shift <= 63
1481 res.w[1] = (P256.w[3] >> shift);
1482 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
1483 // if negative, the truncated value is already the correct result
1484 if (!x_sign) { // if positive
1485 // redundant fstar.w[3] = 0;
1486 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
1487 fstar.w[1] = P256.w[1];
1488 fstar.w[0] = P256.w[0];
1489 // fraction f* > 10^(-x) <=> inexact
1490 // f* is in the right position to be compared with
1491 // 10^(-x) from __bid_ten2mk128[]
1492 if (fstar.w[2] || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
1493 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
1494 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
1495 if (++res.w[0] == 0) {
1496 res.w[1]++;
1497 }
1498 }
1499 }
1500 } else { // 22 <= ind - 1 <= 33
1501 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
1502 res.w[1] = 0;
1503 res.w[0] = P256.w[3] >> shift;
1504 // if negative, the truncated value is already the correct result
1505 if (!x_sign) { // if positive
1506 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
1507 fstar.w[2] = P256.w[2];
1508 fstar.w[1] = P256.w[1];
1509 fstar.w[0] = P256.w[0];
1510 // fraction f* > 10^(-x) <=> inexact
1511 // f* is in the right position to be compared with
1512 // 10^(-x) from __bid_ten2mk128[]
1513 if (fstar.w[3] || fstar.w[2]
1514 || fstar.w[1] > __bid_ten2mk128[ind - 1].w[1]
1515 || (fstar.w[1] == __bid_ten2mk128[ind - 1].w[1]
1516 && fstar.w[0] >= __bid_ten2mk128[ind - 1].w[0])) {
1517 if (++res.w[0] == 0) {
1518 res.w[1]++;
1519 }
1520 }
1521 }
1522 }
1523 res.w[1] = x_sign | 0x3040000000000000ull | res.w[1];
1524 BID_RETURN (res);
1525 } else { // if exp < 0 and q + exp <= 0
1526 if (x_sign) { // negative rounds up to -0.0
1527 res.w[1] = 0xb040000000000000ull;
1528 res.w[0] = 0x0000000000000000ull;
1529 } else { // positive rpunds up to +1.0
1530 res.w[1] = 0x3040000000000000ull;
1531 res.w[0] = 0x0000000000000001ull;
1532 }
1533 BID_RETURN (res);
1534 }
1535}
1536
1537/*****************************************************************************
1538 * BID128_round_integral_zero
1539 ****************************************************************************/
1540
1541BID128_FUNCTION_ARG1_NORND(__bid128_round_integral_zero, x)
1542
1543 UINT128 res;
1544 UINT64 x_sign;
1545 UINT64 x_exp;
1546 int exp; // unbiased exponent
1547 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
1548 // (all are UINT64)
1549 BID_UI64DOUBLE tmp1;
1550 unsigned int x_nr_bits;
1551 int q, ind, shift;
1552 UINT128 C1;
1553 // UINT128 res is C* at first - represents up to 34 decimal digits ~
1554 // 113 bits
1555 UINT256 P256;
1556
1557 // check for NaN or Infinity
1558 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1559 // x is special
1560 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1561 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1562 // set invalid flag
1563 *pfpsf |= INVALID_EXCEPTION;
1564 // return Quiet (SNaN)
1565 res.w[1] = x.w[1] & 0xfdffffffffffffffull;
1566 res.w[0] = x.w[0];
1567 } else { // x is QNaN
1568 // return the input QNaN
1569 res.w[1] = x.w[1];
1570 res.w[0] = x.w[0];
1571 }
1572 BID_RETURN (res);
1573 } else { // x is not a NaN, so it must be infinity
1574 if ((x.w[1] & MASK_SIGN) == 0x0ull) { // x is +inf
1575 // return +inf
1576 res.w[1] = 0x7800000000000000ull;
1577 res.w[0] = 0x0000000000000000ull;
1578 } else { // x is -inf
1579 // return -inf
1580 res.w[1] = 0xf800000000000000ull;
1581 res.w[0] = 0x0000000000000000ull;
1582 }
1583 BID_RETURN (res);
1584 }
1585 }
1586 // unpack x
1587 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1588 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1589 C1.w[1] = x.w[1] & MASK_COEFF;
1590 C1.w[0] = x.w[0];
1591
1592 // test for non-canonical values:
1593 // - values whose encoding begins with x00, x01, or x10 and whose
1594 // coefficient is larger than 10^34 -1, or
1595 // - values whose encoding begins with x1100, x1101, x1110 (if NaNs
1596 // and infinitis were eliminated already this test is reduced to
1597 // checking for x10x)
1598
1599 // test for non-canonical values of the argument x
1600 if ((((C1.w[1] > 0x0001ed09bead87c0ull)
1601 || ((C1.w[1] == 0x0001ed09bead87c0ull)
1602 && (C1.w[0] > 0x378d8e63ffffffffull)))
1603 && ((x.w[1] & 0x6000000000000000ull) != 0x6000000000000000ull))
1604 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1605 x.w[1] = x.w[1] & 0x8000000000000000ull; // preserve the sign bit
1606 x.w[0] = 0;
1607 x_exp = 0x1820ull << 49;
1608 C1.w[1] = 0;
1609 C1.w[0] = 0;
1610 }
1611 // test for input equal to zero
1612 if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1613 // x is 0
1614 // return 0 preserving the sign bit and the preferred exponent
1615 // of MAX(Q(x), 0)
1616 if (x_exp <= (0x1820ull << 49)) {
1617 res.w[1] =
1618 (x.w[1] & 0x8000000000000000ull) | 0x3040000000000000ull;
1619 } else {
1620 res.w[1] = x.w[1] & 0xfffe000000000000ull;
1621 }
1622 res.w[0] = 0x0000000000000000ull;
1623 BID_RETURN (res);
1624 }
1625 // x is not special and is not zero
1626
1627 // if (exp <= -p) return -0.0 or +0.0
1628 if (x_exp <= 0x2ffc000000000000ull) { // 0x2ffc000000000000ull == -34
1629 res.w[1] = x_sign | 0x3040000000000000ull;
1630 res.w[0] = 0x0000000000000000ull;
1631 BID_RETURN (res);
1632 }
1633 // q = nr. of decimal digits in x
1634 // determine first the nr. of bits in x
1635 if (C1.w[1] == 0) {
1636 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1637 // split the 64-bit value in two 32-bit halves to avoid rounding errors
1638 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1639 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1640 x_nr_bits =
1641 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1642 } else { // x < 2^32
1643 tmp1.d = (double) (C1.w[0]); // exact conversion
1644 x_nr_bits =
1645 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1646 }
1647 } else { // if x < 2^53
1648 tmp1.d = (double) C1.w[0]; // exact conversion
1649 x_nr_bits =
1650 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1651 }
1652 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
1653 tmp1.d = (double) C1.w[1]; // exact conversion
1654 x_nr_bits =
1655 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1656 }
1657 q = __bid_nr_digits[x_nr_bits - 1].digits;
1658 if (q == 0) {
1659 q = __bid_nr_digits[x_nr_bits - 1].digits1;
1660 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
1661 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
1662 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
1663 q++;
1664 }
1665 exp = (x_exp >> 49) - 6176;
1666 if (exp >= 0) { // -exp <= 0
1667 // the argument is an integer already
1668 res.w[1] = x.w[1];
1669 res.w[0] = x.w[0];
1670 BID_RETURN (res);
1671 } else if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
1672 // need to shift right -exp digits from the coefficient; the exp will be 0
1673 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
1674 // (number of digits to be chopped off)
1675 // chop off ind digits from the lower part of C1
1676 // FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
1677 // FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
1678 // FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
1679 // FOR ROUND_TO_NEGATIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF NEGATIVE
1680 //tmp64 = C1.w[0];
1681 // if (ind <= 19) {
1682 // C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
1683 // } else {
1684 // C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
1685 // C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
1686 // }
1687 // if (C1.w[0] < tmp64) C1.w[1]++;
1688 // if carry-out from C1.w[0], increment C1.w[1]
1689 // calculate C* and f*
1690 // C* is actually floor(C*) in this case
1691 // C* and f* need shifting and masking, as shown by
1692 // __bid_shiftright128[] and __bid_maskhigh128[]
1693 // 1 <= x <= 34
1694 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
1695 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
1696 // the approximation of 10^(-x) was rounded up to 118 bits
1697 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
1698 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
1699 res.w[1] = P256.w[3];
1700 res.w[0] = P256.w[2];
1701 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
1702 shift = __bid_shiftright128[ind - 1]; // 3 <= shift <= 63
1703 res.w[1] = (P256.w[3] >> shift);
1704 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
1705 } else { // 22 <= ind - 1 <= 33
1706 shift = __bid_shiftright128[ind - 1] - 64; // 2 <= shift <= 38
1707 res.w[1] = 0;
1708 res.w[0] = P256.w[3] >> shift;
1709 }
1710 res.w[1] = x_sign | 0x3040000000000000ull | res.w[1];
1711 BID_RETURN (res);
1712 } else { // if exp < 0 and q + exp <= 0 the result is +0 or -0
1713 res.w[1] = x_sign | 0x3040000000000000ull;
1714 res.w[0] = 0x0000000000000000ull;
1715 BID_RETURN (res);
1716 }
1717}
1718
1719/*****************************************************************************
1720 * BID128_round_integral_nearest_away
1721 ****************************************************************************/
1722
1723BID128_FUNCTION_ARG1_NORND(__bid128_round_integral_nearest_away, x)
1724
1725 UINT128 res;
1726 UINT64 x_sign;
1727 UINT64 x_exp;
1728 int exp; // unbiased exponent
1729 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
1730 // (all are UINT64)
1731 UINT64 tmp64;
1732 BID_UI64DOUBLE tmp1;
1733 unsigned int x_nr_bits;
1734 int q, ind, shift;
1735 UINT128 C1;
1736 // UINT128 res is C* at first - represents up to 34 decimal digits ~
1737 // 113 bits
1738 // UINT256 fstar;
1739 UINT256 P256;
1740
1741 // check for NaN or Infinity
1742 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1743 // x is special
1744 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1745 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1746 // set invalid flag
1747 *pfpsf |= INVALID_EXCEPTION;
1748 // return Quiet (SNaN)
1749 res.w[1] = x.w[1] & 0xfdffffffffffffffull;
1750 res.w[0] = x.w[0];
1751 } else { // x is QNaN
1752 // return the input QNaN
1753 res.w[1] = x.w[1];
1754 res.w[0] = x.w[0];
1755 }
1756 BID_RETURN (res);
1757 } else { // x is not a NaN, so it must be infinity
1758 if ((x.w[1] & MASK_SIGN) == 0x0ull) { // x is +inf
1759 // return +inf
1760 res.w[1] = 0x7800000000000000ull;
1761 res.w[0] = 0x0000000000000000ull;
1762 } else { // x is -inf
1763 // return -inf
1764 res.w[1] = 0xf800000000000000ull;
1765 res.w[0] = 0x0000000000000000ull;
1766 }
1767 BID_RETURN (res);
1768 }
1769 }
1770 // unpack x
1771 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1772 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1773 C1.w[1] = x.w[1] & MASK_COEFF;
1774 C1.w[0] = x.w[0];
1775
1776 // test for non-canonical values:
1777 // - values whose encoding begins with x00, x01, or x10 and whose
1778 // coefficient is larger than 10^34 -1, or
1779 // - values whose encoding begins with x1100, x1101, x1110 (if NaNs
1780 // and infinitis were eliminated already this test is reduced to
1781 // checking for x10x)
1782
1783 // test for non-canonical values of the argument x
1784 if ((((C1.w[1] > 0x0001ed09bead87c0ull)
1785 || ((C1.w[1] == 0x0001ed09bead87c0ull)
1786 && (C1.w[0] > 0x378d8e63ffffffffull)))
1787 && ((x.w[1] & 0x6000000000000000ull) != 0x6000000000000000ull))
1788 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1789 x.w[1] = x.w[1] & 0x8000000000000000ull; // preserve the sign bit
1790 x.w[0] = 0;
1791 x_exp = 0x1820ull << 49;
1792 C1.w[1] = 0;
1793 C1.w[0] = 0;
1794 }
1795 // test for input equal to zero
1796 if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1797 // x is 0
1798 // return 0 preserving the sign bit and the preferred exponent
1799 // of MAX(Q(x), 0)
1800 if (x_exp <= (0x1820ull << 49)) {
1801 res.w[1] =
1802 (x.w[1] & 0x8000000000000000ull) | 0x3040000000000000ull;
1803 } else {
1804 res.w[1] = x.w[1] & 0xfffe000000000000ull;
1805 }
1806 res.w[0] = 0x0000000000000000ull;
1807 BID_RETURN (res);
1808 }
1809 // x is not special and is not zero
1810
1811 // if (exp <= -(p+1)) return 0.0
1812 if (x_exp <= 0x2ffa000000000000ull) { // 0x2ffa000000000000ull == -35
1813 res.w[1] = x_sign | 0x3040000000000000ull;
1814 res.w[0] = 0x0000000000000000ull;
1815 BID_RETURN (res);
1816 }
1817 // q = nr. of decimal digits in x
1818 // determine first the nr. of bits in x
1819 if (C1.w[1] == 0) {
1820 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1821 // split the 64-bit value in two 32-bit halves to avoid rounding errors
1822 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1823 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1824 x_nr_bits =
1825 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1826 } else { // x < 2^32
1827 tmp1.d = (double) (C1.w[0]); // exact conversion
1828 x_nr_bits =
1829 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1830 }
1831 } else { // if x < 2^53
1832 tmp1.d = (double) C1.w[0]; // exact conversion
1833 x_nr_bits =
1834 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1835 }
1836 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
1837 tmp1.d = (double) C1.w[1]; // exact conversion
1838 x_nr_bits =
1839 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1840 }
1841 q = __bid_nr_digits[x_nr_bits - 1].digits;
1842 if (q == 0) {
1843 q = __bid_nr_digits[x_nr_bits - 1].digits1;
1844 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
1845 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
1846 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
1847 q++;
1848 }
1849 exp = (x_exp >> 49) - 6176;
1850 if (exp >= 0) { // -exp <= 0
1851 // the argument is an integer already
1852 res.w[1] = x.w[1];
1853 res.w[0] = x.w[0];
1854 BID_RETURN (res);
1855 } else if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
1856 // need to shift right -exp digits from the coefficient; the exp will be 0
1857 ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
1858 // chop off ind digits from the lower part of C1
1859 // C1 = C1 + 1/2 * 10^x where the result C1 fits in 127 bits
1860 tmp64 = C1.w[0];
1861 if (ind <= 19) {
1862 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
1863 } else {
1864 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
1865 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
1866 }
1867 if (C1.w[0] < tmp64)
1868 C1.w[1]++;
1869 // calculate C* and f*
1870 // C* is actually floor(C*) in this case
1871 // C* and f* need shifting and masking, as shown by
1872 // __bid_shiftright128[] and __bid_maskhigh128[]
1873 // 1 <= x <= 34
1874 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
1875 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
1876 // the approximation of 10^(-x) was rounded up to 118 bits
1877 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
1878 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
1879 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
1880 // if (0 < f* < 10^(-x)) then the result is a midpoint
1881 // if floor(C*) is even then C* = floor(C*) - logical right
1882 // shift; C* has p decimal digits, correct by Prop. 1)
1883 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
1884 // shift; C* has p decimal digits, correct by Pr. 1)
1885 // else
1886 // C* = floor(C*) (logical right shift; C has p decimal digits,
1887 // correct by Property 1)
1888 // n = C* * 10^(e+x)
1889
1890 // shift right C* by Ex-128 = __bid_shiftright128[ind]
1891 if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
1892 res.w[1] = P256.w[3];
1893 res.w[0] = P256.w[2];
1894 } else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
1895 shift = __bid_shiftright128[ind - 1]; // 3 <= shift <= 63
1896 res.w[0] = (P256.w[3] << (64 - shift)) | (P256.w[2] >> shift);
1897 res.w[1] = (P256.w[3] >> shift);
1898 } else { // 22 <= ind - 1 <= 33
1899 shift = __bid_shiftright128[ind - 1]; // 2 <= shift <= 38
1900 res.w[1] = 0;
1901 res.w[0] = (P256.w[3] >> (shift - 64)); // 2 <= shift - 64 <= 38
1902 }
1903 // if the result was a midpoint, it was already rounded away from zero
1904 res.w[1] |= x_sign | 0x3040000000000000ull;
1905 BID_RETURN (res);
1906 } else { // if ((q + exp) < 0) <=> q < -exp
1907 // the result is +0 or -0
1908 res.w[1] = x_sign | 0x3040000000000000ull;
1909 res.w[0] = 0x0000000000000000ull;
1910 BID_RETURN (res);
1911 }
1912}