2013-03-29 Siddhesh Poyarekar <siddhesh@redhat.com>
+ * sysdeps/ieee754/dbl-64/mpa.h (ZERO, MZERO): Remove defines.
+ (__pow_mp): Replace ZERO and MZERO with their values.
+ * sysdeps/ieee754/dbl-64/e_atan2.c (__ieee754_atan2): Likewise.
+ * sysdeps/ieee754/dbl-64/e_log.c (__ieee754_log): Likewise.
+ * sysdeps/ieee754/dbl-64/mpatan2.c (__mpatan2): Likewise.
+ * sysdeps/ieee754/dbl-64/mpexp.c (__mpexp): Likewise.
+ * sysdeps/ieee754/dbl-64/s_atan.c (atan): Likewise.
+ * sysdeps/powerpc/power4/fpu/mpa.c (__mul): Likewise.
+ (__sqr): Likewise.
+
* sysdeps/ieee754/dbl-64/s_atan.c: Fix formatting.
* sysdeps/ieee754/dbl-64/e_log.c: Fix formatting.
if (dy == 0x00000000)
{
if ((ux & 0x80000000) == 0x00000000)
- return ZERO;
+ return 0;
else
return opi.d;
}
if (dy == 0x00000000)
{
if ((ux & 0x80000000) == 0x00000000)
- return MZERO;
+ return -0.0;
else
return mopi.d;
}
}
/* x=+-0 */
- if (x == ZERO)
+ if (x == 0)
{
if ((uy & 0x80000000) == 0x00000000)
return hpi.d;
else
{
if ((uy & 0x80000000) == 0x00000000)
- return ZERO;
+ return 0;
else
- return MZERO;
+ return -0.0;
}
}
}
}
/* either x/y or y/x is very close to zero */
- ax = (x < ZERO) ? -x : x;
- ay = (y < ZERO) ? -y : y;
+ ax = (x < 0) ? -x : x;
+ ay = (y < 0) ? -y : y;
de = (uy & 0x7ff00000) - (ux & 0x7ff00000);
if (de >= ep)
{
- return ((y > ZERO) ? hpi.d : mhpi.d);
+ return ((y > 0) ? hpi.d : mhpi.d);
}
else if (de <= em)
{
- if (x > ZERO)
+ if (x > 0)
{
if ((z = ay / ax) < TWOM1022)
return normalized (ax, ay, y, z);
}
else
{
- return ((y > ZERO) ? opi.d : mopi.d);
+ return ((y > 0) ? opi.d : mopi.d);
}
}
du = ((ax - v) - vv) / ay;
}
- if (x > ZERO)
+ if (x > 0)
{
/* (i) x>0, abs(y)< abs(x): atan(ay/ax) */
if (ay < ax)
MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
s1 = v * (f11.d + v * (f13.d
+ v * (f15.d + v * (f17.d + v * f19.d))));
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ v * (hij[i][13].d
+ v * (hij[i][14].d
+ v * hij[i][15].d))));
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
s1 = v * (f11.d
+ v * (f13.d
+ v * (f15.d + v * (f17.d + v * f19.d))));
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ v * (hij[i][14].d
+ v * hij[i][15].d))));
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
s1 = v * (f11.d
+ v * (f13.d + v * (f15.d + v * (f17.d + v * f19.d))));
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ v * (hij[i][13].d
+ v * (hij[i][14].d
+ v * hij[i][15].d))));
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
s1 = v * (f11.d + v * (f13.d + v * (f15.d + v * (f17.d + v * f19.d))));
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ v * (hij[i][13].d
+ v * (hij[i][14].d + v * hij[i][15].d))));
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
if (__builtin_expect (ux < 0x00100000, 0))
{
if (__builtin_expect (((ux & 0x7fffffff) | dx) == 0, 0))
- return MHALF / ZERO; /* return -INF */
+ return MHALF / 0.0; /* return -INF */
if (__builtin_expect (ux < 0, 0))
- return (x - x) / ZERO; /* return NaN */
+ return (x - x) / 0.0; /* return NaN */
n -= 54;
x *= two54.d; /* scale x */
num.d = x;
EMULV (w, a, s2, ss2, t1, t2, t3, t4, t5);
ADD2 (d10.d, dd10.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (d9.d, dd9.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (d8.d, dd8.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (d7.d, dd7.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (d6.d, dd6.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (d5.d, dd5.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (d4.d, dd4.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (d3.d, dd3.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (d2.d, dd2.d, s2, ss2, s3, ss3, t1, t2);
- MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
- MUL2 (w, ZERO, s2, ss2, s3, ss3, t1, t2, t3, t4, t5, t6, t7, t8);
- ADD2 (w, ZERO, s3, ss3, b, bb, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s2, ss2, s3, ss3, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (w, 0, s3, ss3, b, bb, t1, t2);
/* End stage II, case abs(x-1) < 0.03 */
if ((y = b + (bb + b * E4)) == b + (bb - b * E4))
EADD (r0, t, ra, rb);
/* Compute w */
- MUL2 (q, ZERO, ra, rb, w, ww, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (q, 0, ra, rb, w, ww, t1, t2, t3, t4, t5, t6, t7, t8);
EADD (A, B0, a0, aa0);
# define TWO52 0x1.0p52 /* 2^52 */
#endif
-#define ZERO 0.0 /* 0 */
-#define MZERO -0.0 /* 0 with the sign bit set */
#define ONE 1.0 /* 1 */
#define MONE -1.0 /* -1 */
#define TWO 2.0 /* 2 */
Y[0] = ONE;
Y[1] = 1 << rem;
- /* Everything else is ZERO. */
+ /* Everything else is 0. */
for (i = 2; i <= p; i++)
- Y[i] = ZERO;
+ Y[i] = 0;
}
{
mp_no mpt1, mpt2, mpt3;
- if (X[0] <= ZERO)
+ if (X[0] <= 0)
{
__dvd (x, y, &mpt1, p);
__mul (&mpt1, &mpt1, &mpt2, p);
- if (mpt1.d[0] != ZERO)
+ if (mpt1.d[0] != 0)
mpt1.d[0] = ONE;
__add (&mpt2, &mpone, &mpt3, p);
__mpsqrt (&mpt3, &mpt2, p);
{
for (i = 2; i <= p; i++)
{
- if (X[i] != ZERO)
+ if (X[i] != 0)
break;
}
if (i == p + 1)
return x + x;
/* Regular values of x, including denormals +-0 and +-INF */
- u = (x < ZERO) ? -x : x;
+ u = (x < 0) ? -x : x;
if (u < C)
{
if (u < B)
s1 = f11.d + v * s1;
s1 *= v;
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
- MUL2 (x, ZERO, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7,
+ MUL2 (x, 0, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7,
t8);
- ADD2 (x, ZERO, s2, ss2, s1, ss1, t1, t2);
+ ADD2 (x, 0, s2, ss2, s1, ss1, t1, t2);
if ((y = s1 + (ss1 - U5 * s1)) == s1 + (ss1 + U5 * s1))
return y;
s1 = hij[i][11].d + z * s1;
s1 *= z;
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
- MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
+ MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
- MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
- MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
- MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
if ((y = s2 + (ss2 - U6 * s2)) == s2 + (ss2 + U6 * s2))
return __signArctan (x, y);
if ((y = t1 + (yy - u3)) == t1 + (yy + u3))
return __signArctan (x, y);
- DIV2 (ONE, ZERO, u, ZERO, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ DIV2 (ONE, 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
t10);
t1 = w - hij[i][0].d;
EADD (t1, ww, z, zz);
s1 = hij[i][11].d + z * s1;
s1 *= z;
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
if ((y = t3 + (yy - U4)) == t3 + (yy + U4))
return __signArctan (x, y);
- DIV2 (ONE, ZERO, u, ZERO, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
+ DIV2 (ONE, 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10);
MUL2 (w, ww, w, ww, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
s1 = f11.d + v * s1;
s1 *= v;
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
double u, zk, zk2;
/* Is z=0? */
- if (__glibc_unlikely (X[0] * Y[0] == ZERO))
+ if (__glibc_unlikely (X[0] * Y[0] == 0))
{
- Z[0] = ZERO;
+ Z[0] = 0;
return;
}
/* Multiply, add and carry */
k2 = (p2 < 3) ? p2 + p2 : p2 + 3;
- zk = Z[k2] = ZERO;
+ zk = Z[k2] = 0;
for (k = k2; k > 1;)
{
if (k > p2)
int e = EX + EY;
/* Is there a carry beyond the most significant digit? */
- if (Z[1] == ZERO)
+ if (Z[1] == 0)
{
for (i = 1; i <= p2; i++)
Z[i] = Z[i + 1];
double u, yk;
/* Is z=0? */
- if (__glibc_unlikely (X[0] == ZERO))
+ if (__glibc_unlikely (X[0] == 0))
{
- Y[0] = ZERO;
+ Y[0] = 0;
return;
}
/* We need not iterate through all X's since it's pointless to
multiply zeroes. */
for (ip = p; ip > 0; ip--)
- if (X[ip] != ZERO)
+ if (X[ip] != 0)
break;
k = (__glibc_unlikely (p < 3)) ? p + p : p + 3;
while (k > 2 * ip + 1)
- Y[k--] = ZERO;
+ Y[k--] = 0;
- yk = ZERO;
+ yk = 0;
while (k > p)
{
int e = EX * 2;
/* Is there a carry beyond the most significant digit? */
- if (__glibc_unlikely (Y[1] == ZERO))
+ if (__glibc_unlikely (Y[1] == 0))
{
for (i = 1; i <= p; i++)
Y[i] = Y[i + 1];