From: Wilco Dijkstra Date: Tue, 11 Nov 2025 17:46:19 +0000 (+0000) Subject: math: Remove float_t and double_t [BZ #33563] X-Git-Tag: glibc-2.43~231 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=989e538224de5eea508ceda6d273cb64df6cca56;p=thirdparty%2Fglibc.git math: Remove float_t and double_t [BZ #33563] Remove uses of float_t and double_t. This is not useful on modern machines, and does not help given GCC defaults to -fexcess-precision=fast. One use of double_t remains to allow forcing the precision to double on targets where FLT_EVAL_METHOD=2. This fixes BZ #33563 on i486-pc-linux-gnu. Reviewed-by: Adhemerval Zanella  --- diff --git a/sysdeps/aarch64/fpu/math_private.h b/sysdeps/aarch64/fpu/math_private.h index 1030d9780c..d727c241d1 100644 --- a/sysdeps/aarch64/fpu/math_private.h +++ b/sysdeps/aarch64/fpu/math_private.h @@ -25,14 +25,14 @@ /* Use inline round and lround instructions. */ #define TOINT_INTRINSICS 1 -static inline double_t -roundtoint (double_t x) +static inline double +roundtoint (double x) { return round (x); } static inline int32_t -converttoint (double_t x) +converttoint (double x) { return lround (x); } diff --git a/sysdeps/aarch64/fpu/pow_sve.c b/sysdeps/aarch64/fpu/pow_sve.c index b8c1b39dca..8fe51b7d4b 100644 --- a/sysdeps/aarch64/fpu/pow_sve.c +++ b/sysdeps/aarch64/fpu/pow_sve.c @@ -374,7 +374,7 @@ pow_sc (double x, double y) } if (__glibc_unlikely (zeroinfnan (ix))) { - double_t x2 = x * x; + double x2 = x * x; if (ix >> 63 && checkint (iy) == 1) x2 = -x2; return (iy >> 63) ? 1 / x2 : x2; diff --git a/sysdeps/aarch64/fpu/powf_sve.c b/sysdeps/aarch64/fpu/powf_sve.c index 65e9bd29d9..22e6cc54fb 100644 --- a/sysdeps/aarch64/fpu/powf_sve.c +++ b/sysdeps/aarch64/fpu/powf_sve.c @@ -137,7 +137,7 @@ powf_specialcase (float x, float y, float z) } if (__glibc_unlikely (zeroinfnan (ix))) { - float_t x2 = x * x; + float x2 = x * x; if (ix & 0x80000000 && checkint (iy) == 1) x2 = -x2; return iy & 0x80000000 ? 1 / x2 : x2; diff --git a/sysdeps/ieee754/dbl-64/e_exp.c b/sysdeps/ieee754/dbl-64/e_exp.c index b4f4cb97b8..f1ecbe122b 100644 --- a/sysdeps/ieee754/dbl-64/e_exp.c +++ b/sysdeps/ieee754/dbl-64/e_exp.c @@ -44,9 +44,9 @@ adjustment of scale, positive k here means the result may overflow and negative k means the result may underflow. */ static inline double -specialcase (double_t tmp, uint64_t sbits, uint64_t ki) +specialcase (double tmp, uint64_t sbits, uint64_t ki) { - double_t scale, y; + double scale, y; if ((ki & 0x80000000) == 0) { @@ -66,7 +66,7 @@ specialcase (double_t tmp, uint64_t sbits, uint64_t ki) range to avoid double rounding that can cause 0.5+E/2 ulp error where E is the worst-case ulp error outside the subnormal range. So this is only useful if the goal is better than 1 ulp worst-case error. */ - double_t hi, lo; + double hi, lo; lo = scale - y + scale * tmp; hi = 1.0 + y; lo = 1.0 - hi + y + lo; @@ -98,8 +98,7 @@ __exp (double x) { uint32_t abstop; uint64_t ki, idx, top, sbits; - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t kd, z, r, r2, scale, tail, tmp; + double kd, z, r, r2, scale, tail, tmp; abstop = top12 (x) & 0x7ff; if (__glibc_unlikely (abstop - top12 (0x1p-54) diff --git a/sysdeps/ieee754/dbl-64/e_exp10.c b/sysdeps/ieee754/dbl-64/e_exp10.c index cd7ecfa752..f1f831e2cd 100644 --- a/sysdeps/ieee754/dbl-64/e_exp10.c +++ b/sysdeps/ieee754/dbl-64/e_exp10.c @@ -34,9 +34,9 @@ #define C(i) __exp_data.exp10_poly[i] static double -special_case (uint64_t sbits, double_t tmp, uint64_t ki) +special_case (uint64_t sbits, double tmp, uint64_t ki) { - double_t scale, y; + double scale, y; if ((ki & 0x80000000) == 0) { @@ -58,8 +58,8 @@ special_case (uint64_t sbits, double_t tmp, uint64_t ki) range to avoid double rounding that can cause 0.5+E/2 ulp error where E is the worst-case ulp error outside the subnormal range. So this is only useful if the goal is better than 1 ulp worst-case error. */ - double_t lo = scale - y + scale * tmp; - double_t hi = 1.0 + y; + double lo = scale - y + scale * tmp; + double hi = 1.0 + y; lo = 1.0 - hi + y + lo; y = math_narrow_eval (hi + lo) - 1.0; /* Avoid -0.0 with downward rounding. */ @@ -98,8 +98,8 @@ __exp10 (double x) } /* Reduce x: z = x * N / log10(2), k = round(z). */ - double_t z = __exp_data.invlog10_2N * x; - double_t kd; + double z = __exp_data.invlog10_2N * x; + double kd; uint64_t ki; #if TOINT_INTRINSICS kd = roundtoint (z); @@ -111,7 +111,7 @@ __exp10 (double x) #endif /* r = x - k * log10(2), r in [-0.5, 0.5]. */ - double_t r = x; + double r = x; r = __exp_data.neglog10_2hiN * kd + r; r = __exp_data.neglog10_2loN * kd + r; @@ -124,12 +124,12 @@ __exp10 (double x) uint64_t u = __exp_data.tab[i + 1]; uint64_t sbits = u + e; - double_t tail = asdouble (__exp_data.tab[i]); + double tail = asdouble (__exp_data.tab[i]); /* 2^(r/N) ~= 1 + r * Poly(r). */ - double_t r2 = r * r; - double_t p = C (0) + r * C (1); - double_t y = C (2) + r * C (3); + double r2 = r * r; + double p = C (0) + r * C (1); + double y = C (2) + r * C (3); y = y + r2 * C (4); y = p + r2 * y; y = tail + y * r; @@ -140,7 +140,7 @@ __exp10 (double x) /* Assemble components: y = 2^(r/N) * 2^(k/N) ~= (y + 1) * s. */ - double_t s = asdouble (sbits); + double s = asdouble (sbits); return s * y + s; } diff --git a/sysdeps/ieee754/dbl-64/e_exp2.c b/sysdeps/ieee754/dbl-64/e_exp2.c index 53fb553730..ac041ad928 100644 --- a/sysdeps/ieee754/dbl-64/e_exp2.c +++ b/sysdeps/ieee754/dbl-64/e_exp2.c @@ -42,9 +42,9 @@ adjustment of scale, positive k here means the result may overflow and negative k means the result may underflow. */ static inline double -specialcase (double_t tmp, uint64_t sbits, uint64_t ki) +specialcase (double tmp, uint64_t sbits, uint64_t ki) { - double_t scale, y; + double scale, y; if ((ki & 0x80000000) == 0) { @@ -64,7 +64,7 @@ specialcase (double_t tmp, uint64_t sbits, uint64_t ki) range to avoid double rounding that can cause 0.5+E/2 ulp error where E is the worst-case ulp error outside the subnormal range. So this is only useful if the goal is better than 1 ulp worst-case error. */ - double_t hi, lo; + double hi, lo; lo = scale - y + scale * tmp; hi = 1.0 + y; lo = 1.0 - hi + y + lo; @@ -91,8 +91,7 @@ __exp2 (double x) { uint32_t abstop; uint64_t ki, idx, top, sbits; - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t kd, r, r2, scale, tail, tmp; + double kd, r, r2, scale, tail, tmp; abstop = top12 (x) & 0x7ff; if (__glibc_unlikely (abstop - top12 (0x1p-54) diff --git a/sysdeps/ieee754/dbl-64/e_log.c b/sysdeps/ieee754/dbl-64/e_log.c index 740db232b4..0cf661ca42 100644 --- a/sysdeps/ieee754/dbl-64/e_log.c +++ b/sysdeps/ieee754/dbl-64/e_log.c @@ -47,8 +47,7 @@ double SECTION __log (double x) { - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t w, z, r, r2, r3, y, invc, logc, kd, hi, lo; + double w, z, r, r2, r3, y, invc, logc, kd, hi, lo; uint64_t ix, iz, tmp; uint32_t top; int k, i; @@ -72,8 +71,8 @@ __log (double x) + r3 * (B[7] + r * B[8] + r2 * B[9] + r3 * B[10]))); /* Worst-case error is around 0.507 ULP. */ w = r * 0x1p27; - double_t rhi = r + w - w; - double_t rlo = r - rhi; + double rhi = r + w - w; + double rlo = r - rhi; w = rhi * rhi * B[0]; /* B[0] == -0.5. */ hi = r + w; lo = r - hi + w; @@ -116,7 +115,7 @@ __log (double x) /* rounding error: 0x1p-55/N + 0x1p-66. */ r = (z - T2[i].chi - T2[i].clo) * invc; #endif - kd = (double_t) k; + kd = (double) k; /* hi + lo = r + log(c) + k*Ln2. */ w = kd * Ln2hi + logc; diff --git a/sysdeps/ieee754/dbl-64/e_log2.c b/sysdeps/ieee754/dbl-64/e_log2.c index b7762d0695..8f4c368196 100644 --- a/sysdeps/ieee754/dbl-64/e_log2.c +++ b/sysdeps/ieee754/dbl-64/e_log2.c @@ -42,8 +42,7 @@ top16 (double x) double __log2 (double x) { - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t z, r, r2, r4, y, invc, logc, kd, hi, lo, t1, t2, t3, p; + double z, r, r2, r4, y, invc, logc, kd, hi, lo, t1, t2, t3, p; uint64_t ix, iz, tmp; uint32_t top; int k, i; @@ -64,7 +63,7 @@ __log2 (double x) hi = r * InvLn2hi; lo = r * InvLn2lo + __builtin_fma (r, InvLn2hi, -hi); #else - double_t rhi, rlo; + double rhi, rlo; rhi = asdouble (asuint64 (r) & -1ULL << 32); rlo = r - rhi; hi = rhi * InvLn2hi; @@ -105,7 +104,7 @@ __log2 (double x) invc = T[i].invc; logc = T[i].logc; z = asdouble (iz); - kd = (double_t) k; + kd = (double) k; /* log2(x) = log2(z/c) + log2(c) + k. */ /* r ~= z/c - 1, |r| < 1/(2*N). */ @@ -115,7 +114,7 @@ __log2 (double x) t1 = r * InvLn2hi; t2 = r * InvLn2lo + __builtin_fma (r, InvLn2hi, -t1); #else - double_t rhi, rlo; + double rhi, rlo; /* rounding error: 0x1p-55/N + 0x1p-65. */ r = (z - T2[i].chi - T2[i].clo) * invc; rhi = asdouble (asuint64 (r) & -1ULL << 32); diff --git a/sysdeps/ieee754/dbl-64/e_pow.c b/sysdeps/ieee754/dbl-64/e_pow.c index 3dc171885a..3252ac36aa 100644 --- a/sysdeps/ieee754/dbl-64/e_pow.c +++ b/sysdeps/ieee754/dbl-64/e_pow.c @@ -48,11 +48,10 @@ top12 (double x) /* Compute y+TAIL = log(x) where the rounded result is y and TAIL has about additional 15 bits precision. IX is the bit representation of x, but normalized in the subnormal range using the sign bit for the exponent. */ -static inline double_t -log_inline (uint64_t ix, double_t *tail) +static inline double +log_inline (uint64_t ix, double *tail) { - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t z, r, y, invc, logc, logctail, kd, hi, t1, t2, lo, lo1, lo2, p; + double z, r, y, invc, logc, logctail, kd, hi, t1, t2, lo, lo1, lo2, p; uint64_t iz, tmp; int k, i; @@ -64,7 +63,7 @@ log_inline (uint64_t ix, double_t *tail) k = (int64_t) tmp >> 52; /* arithmetic shift */ iz = ix - (tmp & 0xfffULL << 52); z = asdouble (iz); - kd = (double_t) k; + kd = (double) k; /* log(x) = k*Ln2 + log(c) + log1p(z/c-1). */ invc = T[i].invc; @@ -77,10 +76,10 @@ log_inline (uint64_t ix, double_t *tail) r = __builtin_fma (z, invc, -1.0); #else /* Split z such that rhi, rlo and rhi*rhi are exact and |rlo| <= |r|. */ - double_t zhi = asdouble ((iz + (1ULL << 31)) & (-1ULL << 32)); - double_t zlo = z - zhi; - double_t rhi = zhi * invc - 1.0; - double_t rlo = zlo * invc; + double zhi = asdouble ((iz + (1ULL << 31)) & (-1ULL << 32)); + double zlo = z - zhi; + double rhi = zhi * invc - 1.0; + double rlo = zlo * invc; r = rhi + rlo; #endif @@ -91,7 +90,7 @@ log_inline (uint64_t ix, double_t *tail) lo2 = t1 - t2 + r; /* Evaluation is optimized assuming superscalar pipelined execution. */ - double_t ar, ar2, ar3, lo3, lo4; + double ar, ar2, ar3, lo3, lo4; ar = A[0] * r; /* A[0] = -0.5. */ ar2 = r * ar; ar3 = r * ar2; @@ -101,8 +100,8 @@ log_inline (uint64_t ix, double_t *tail) lo3 = __builtin_fma (ar, r, -ar2); lo4 = t2 - hi + ar2; #else - double_t arhi = A[0] * rhi; - double_t arhi2 = rhi * arhi; + double arhi = A[0] * rhi; + double arhi2 = rhi * arhi; hi = t2 + arhi2; lo3 = rlo * (ar + arhi); lo4 = t2 - hi + arhi2; @@ -138,9 +137,9 @@ log_inline (uint64_t ix, double_t *tail) adjustment of scale, positive k here means the result may overflow and negative k means the result may underflow. */ static inline double -specialcase (double_t tmp, uint64_t sbits, uint64_t ki) +specialcase (double tmp, uint64_t sbits, uint64_t ki) { - double_t scale, y; + double scale, y; if ((ki & 0x80000000) == 0) { @@ -167,7 +166,7 @@ specialcase (double_t tmp, uint64_t sbits, uint64_t ki) range to avoid double rounding that can cause 0.5+E/2 ulp error where E is the worst-case ulp error outside the subnormal range. So this is only useful if the goal is better than 1 ulp worst-case error. */ - double_t hi, lo, one = 1.0; + double hi, lo, one = 1.0; if (y < 0.0) one = -1.0; lo = scale - y + scale * tmp; @@ -193,8 +192,7 @@ exp_inline (double x, double xtail, uint32_t sign_bias) { uint32_t abstop; uint64_t ki, idx, top, sbits; - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t kd, z, r, r2, scale, tail, tmp; + double kd, z, r, r2, scale, tail, tmp; abstop = top12 (x) & 0x7ff; if (__glibc_unlikely (abstop - top12 (0x1p-54) @@ -204,7 +202,7 @@ exp_inline (double x, double xtail, uint32_t sign_bias) { /* Avoid spurious underflow for tiny x. */ /* Note: 0 is common input. */ - double_t one = WANT_ROUNDING ? 1.0 + x : 1.0; + double one = WANT_ROUNDING ? 1.0 + x : 1.0; return sign_bias ? -one : one; } if (abstop >= top12 (1024.0)) @@ -319,7 +317,7 @@ __pow (double x, double y) } if (__glibc_unlikely (zeroinfnan (ix))) { - double_t x2 = x * x; + double x2 = x * x; if (ix >> 63 && checkint (iy) == 1) { x2 = -x2; @@ -368,17 +366,17 @@ __pow (double x, double y) } } - double_t lo; - double_t hi = log_inline (ix, &lo); - double_t ehi, elo; + double lo; + double hi = log_inline (ix, &lo); + double ehi, elo; #ifdef __FP_FAST_FMA ehi = y * hi; elo = y * lo + __builtin_fma (y, hi, -ehi); #else - double_t yhi = asdouble (iy & -1ULL << 27); - double_t ylo = y - yhi; - double_t lhi = asdouble (asuint64 (hi) & -1ULL << 27); - double_t llo = hi - lhi + lo; + double yhi = asdouble (iy & -1ULL << 27); + double ylo = y - yhi; + double lhi = asdouble (asuint64 (hi) & -1ULL << 27); + double llo = hi - lhi + lo; ehi = yhi * lhi; elo = ylo * lhi + y * llo; /* |elo| < |ehi| * 2^-25. */ #endif diff --git a/sysdeps/ieee754/dbl-64/math_config.h b/sysdeps/ieee754/dbl-64/math_config.h index 711af92a61..36a47ae7db 100644 --- a/sysdeps/ieee754/dbl-64/math_config.h +++ b/sysdeps/ieee754/dbl-64/math_config.h @@ -75,14 +75,14 @@ roundeven_finite (double x) /* Round x to nearest int in all rounding modes, ties have to be rounded consistently with converttoint so the results match. If the result would be outside of [-2^31, 2^31-1] then the semantics is unspecified. */ -static inline double_t -roundtoint (double_t x); +static inline double +roundtoint (double x); /* Convert x to nearest int in all rounding modes, ties have to be rounded consistently with roundtoint. If the result is not representible in an int32_t then the semantics is unspecified. */ static inline int32_t -converttoint (double_t x); +converttoint (double x); #endif static inline uint64_t diff --git a/sysdeps/ieee754/flt-32/e_exp2f.c b/sysdeps/ieee754/flt-32/e_exp2f.c index 8a61227f6b..7aec0c4cac 100644 --- a/sysdeps/ieee754/flt-32/e_exp2f.c +++ b/sysdeps/ieee754/flt-32/e_exp2f.c @@ -49,10 +49,9 @@ __exp2f (float x) { uint32_t abstop; uint64_t ki, t; - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t kd, xd, z, r, r2, y, s; + double kd, xd, z, r, r2, y, s; - xd = (double_t) x; + xd = x; abstop = top12 (x) & 0x7ff; if (__glibc_unlikely (abstop >= top12 (128.0f))) { diff --git a/sysdeps/ieee754/flt-32/e_expf.c b/sysdeps/ieee754/flt-32/e_expf.c index 1d92828209..25ff01921e 100644 --- a/sysdeps/ieee754/flt-32/e_expf.c +++ b/sysdeps/ieee754/flt-32/e_expf.c @@ -54,10 +54,9 @@ __expf (float x) { uint32_t abstop; uint64_t ki, t; - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t kd, xd, z, r, r2, y, s; + double kd, xd, z, r, r2, y, s; - xd = (double_t) x; + xd = x; abstop = top12 (x) & 0x7ff; if (__glibc_unlikely (abstop >= top12 (88.0f))) { diff --git a/sysdeps/ieee754/flt-32/e_log2f.c b/sysdeps/ieee754/flt-32/e_log2f.c index 65e2e59e69..a0e9f344c4 100644 --- a/sysdeps/ieee754/flt-32/e_log2f.c +++ b/sysdeps/ieee754/flt-32/e_log2f.c @@ -38,8 +38,7 @@ Relative error: 1.9 * 2^-26 (before rounding.) float __log2f (float x) { - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t z, r, r2, p, y, y0, invc, logc; + double z, r, r2, p, y, y0, invc, logc; uint32_t ix, iz, top, tmp; int k, i; @@ -73,11 +72,11 @@ __log2f (float x) k = (int32_t) tmp >> 23; /* arithmetic shift */ invc = T[i].invc; logc = T[i].logc; - z = (double_t) asfloat (iz); + z = asfloat (iz); /* log2(x) = log1p(z/c-1)/ln2 + log2(c) + k */ r = z * invc - 1; - y0 = logc + (double_t) k; + y0 = logc + (double) k; /* Pipelined polynomial evaluation to approximate log1p(r)/ln2. */ r2 = r * r; diff --git a/sysdeps/ieee754/flt-32/e_logf.c b/sysdeps/ieee754/flt-32/e_logf.c index 207151c76a..783765057c 100644 --- a/sysdeps/ieee754/flt-32/e_logf.c +++ b/sysdeps/ieee754/flt-32/e_logf.c @@ -39,8 +39,7 @@ Relative error: 1.957 * 2^-26 (before rounding.) float __logf (float x) { - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t z, r, r2, y, y0, invc, logc; + double z, r, r2, y, y0, invc, logc; uint32_t ix, iz, tmp; int k, i; @@ -73,11 +72,11 @@ __logf (float x) iz = ix - (tmp & 0xff800000); invc = T[i].invc; logc = T[i].logc; - z = (double_t) asfloat (iz); + z = asfloat (iz); /* log(x) = log1p(z/c-1) + log(c) + k*Ln2 */ r = z * invc - 1; - y0 = logc + (double_t) k * Ln2; + y0 = logc + (double) k * Ln2; /* Pipelined polynomial evaluation to approximate log1p(r). */ r2 = r * r; diff --git a/sysdeps/ieee754/flt-32/e_powf.c b/sysdeps/ieee754/flt-32/e_powf.c index 07b68c2bac..4f60b02a84 100644 --- a/sysdeps/ieee754/flt-32/e_powf.c +++ b/sysdeps/ieee754/flt-32/e_powf.c @@ -41,11 +41,10 @@ relerr_exp2: 1.69 * 2^-34 (Relative error of exp2(ylogx).) /* Subnormal input is normalized so ix has negative biased exponent. Output is multiplied by N (POWF_SCALE) if TOINT_INTRINICS is set. */ -static inline double_t +static inline double log2_inline (uint32_t ix) { - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t z, r, r2, r4, p, q, y, y0, invc, logc; + double z, r, r2, r4, p, q, y, y0, invc, logc; uint32_t iz, top, tmp; int k, i; @@ -59,11 +58,11 @@ log2_inline (uint32_t ix) k = (int32_t) top >> (23 - POWF_SCALE_BITS); /* arithmetic shift */ invc = T[i].invc; logc = T[i].logc; - z = (double_t) asfloat (iz); + z = asfloat (iz); /* log2(x) = log1p(z/c-1)/ln2 + log2(c) + k */ r = z * invc - 1; - y0 = logc + (double_t) k; + y0 = logc + (double) k; /* Pipelined polynomial evaluation to approximate log1p(r)/ln2. */ r2 = r * r; @@ -85,12 +84,11 @@ log2_inline (uint32_t ix) /* The output of log2 and thus the input of exp2 is either scaled by N (in case of fast toint intrinsics) or not. The unscaled xd must be in [-1021,1023], sign_bias sets the sign of the result. */ -static inline double_t +static inline double exp2_inline (double_t xd, uint32_t sign_bias) { uint64_t ki, ski, t; - /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */ - double_t kd, z, r, r2, y, s; + double kd, z, r, r2, y, s; #if TOINT_INTRINSICS # define C __exp2f_data.poly_scaled @@ -101,6 +99,7 @@ exp2_inline (double_t xd, uint32_t sign_bias) # define C __exp2f_data.poly # define SHIFT __exp2f_data.shift_scaled /* x = k/N + r with r in [-1/(2N), 1/(2N)] */ + xd = (double) xd; /* Force double precision if FLT_EVAL_METHOD == 2. */ kd = (double) (xd + SHIFT); /* Rounding to double precision is required. */ ki = asuint64 (kd); kd -= SHIFT; /* k/N */ @@ -171,7 +170,7 @@ __powf (float x, float y) } if (__glibc_unlikely (zeroinfnan (ix))) { - float_t x2 = x * x; + float x2 = x * x; if (ix & 0x80000000 && checkint (iy) == 1) { x2 = -x2; @@ -203,7 +202,7 @@ __powf (float x, float y) } } /* y * log2(x) cannot overflow since y is single precision. */ - double_t ylogx = (double) y * log2_inline (ix); + double ylogx = (double) y * log2_inline (ix); /* Check whether |y*log(x)| >= 126. */ if (__glibc_unlikely ((asuint64 (ylogx) >> 47 & 0xffff) diff --git a/sysdeps/ieee754/flt-32/math_config.h b/sysdeps/ieee754/flt-32/math_config.h index e927bdb2f9..fb78ade9e2 100644 --- a/sysdeps/ieee754/flt-32/math_config.h +++ b/sysdeps/ieee754/flt-32/math_config.h @@ -48,14 +48,14 @@ /* Round x to nearest int in all rounding modes, ties have to be rounded consistently with converttoint so the results match. If the result would be outside of [-2^31, 2^31-1] then the semantics is unspecified. */ -static inline double_t -roundtoint (double_t x); +static inline double +roundtoint (double x); /* Convert x to nearest int in all rounding modes, ties have to be rounded consistently with roundtoint. If the result is not representible in an int32_t then the semantics is unspecified. */ static inline int32_t -converttoint (double_t x); +converttoint (double x); #endif #ifndef ROUNDEVEN_INTRINSICS