}
} fop_div;
+bool
+operator_cast::fold_range (frange &r, tree type, const frange &op1,
+ const frange &, relation_trio) const
+{
+ REAL_VALUE_TYPE lb, ub;
+ enum machine_mode mode = TYPE_MODE (type);
+ bool mode_composite = MODE_COMPOSITE_P (mode);
+
+ if (empty_range_varying (r, type, op1, op1))
+ return true;
+ if (!MODE_HAS_NANS (mode) && op1.maybe_isnan ())
+ {
+ r.set_varying (type);
+ return true;
+ }
+ if (op1.known_isnan ())
+ {
+ r.set_nan (type);
+ return true;
+ }
+
+ const REAL_VALUE_TYPE &lh_lb = op1.lower_bound ();
+ const REAL_VALUE_TYPE &lh_ub = op1.upper_bound ();
+ real_convert (&lb, mode, &lh_lb);
+ real_convert (&ub, mode, &lh_ub);
+
+ if (flag_rounding_math)
+ {
+ if (real_less (&lh_lb, &lb))
+ {
+ if (mode_composite
+ && (real_isdenormal (&lb, mode) || real_iszero (&lb)))
+ {
+ // IBM extended denormals only have DFmode precision.
+ REAL_VALUE_TYPE tmp, tmp2;
+ real_convert (&tmp2, DFmode, &lh_lb);
+ real_nextafter (&tmp, REAL_MODE_FORMAT (DFmode), &tmp2,
+ &dconstninf);
+ real_convert (&lb, mode, &tmp);
+ }
+ else
+ frange_nextafter (mode, lb, dconstninf);
+ }
+ if (real_less (&ub, &lh_ub))
+ {
+ if (mode_composite
+ && (real_isdenormal (&ub, mode) || real_iszero (&ub)))
+ {
+ // IBM extended denormals only have DFmode precision.
+ REAL_VALUE_TYPE tmp, tmp2;
+ real_convert (&tmp2, DFmode, &lh_ub);
+ real_nextafter (&tmp, REAL_MODE_FORMAT (DFmode), &tmp2,
+ &dconstinf);
+ real_convert (&ub, mode, &tmp);
+ }
+ else
+ frange_nextafter (mode, ub, dconstinf);
+ }
+ }
+
+ r.set (type, lb, ub, op1.get_nan_state ());
+
+ if (flag_trapping_math
+ && MODE_HAS_INFINITIES (TYPE_MODE (type))
+ && r.known_isinf ()
+ && !op1.known_isinf ())
+ {
+ REAL_VALUE_TYPE inf = r.lower_bound ();
+ if (real_isneg (&inf))
+ {
+ REAL_VALUE_TYPE min = real_min_representable (type);
+ r.set (type, inf, min);
+ }
+ else
+ {
+ REAL_VALUE_TYPE max = real_max_representable (type);
+ r.set (type, max, inf);
+ }
+ }
+
+ r.flush_denormals_to_zero ();
+ return true;
+}
+
+// Implement fold for a cast from float to another float.
+bool
+operator_cast::op1_range (frange &r, tree type, const frange &lhs,
+ const frange &op2, relation_trio) const
+{
+ if (lhs.undefined_p ())
+ return false;
+ tree lhs_type = lhs.type ();
+ enum machine_mode mode = TYPE_MODE (type);
+ enum machine_mode lhs_mode = TYPE_MODE (lhs_type);
+ frange wlhs;
+ bool rm;
+ if (REAL_MODE_FORMAT (mode)->ieee_bits
+ && REAL_MODE_FORMAT (lhs_mode)->ieee_bits
+ && (REAL_MODE_FORMAT (lhs_mode)->ieee_bits
+ >= REAL_MODE_FORMAT (mode)->ieee_bits)
+ && pow2p_hwi (REAL_MODE_FORMAT (mode)->ieee_bits))
+ {
+ /* If the cast is widening from IEEE exchange mode to
+ wider exchange mode or extended mode, no need to extend
+ the range on reverse operation. */
+ rm = false;
+ wlhs = lhs;
+ }
+ else
+ {
+ rm = true;
+ wlhs = float_widen_lhs_range (lhs_type, lhs);
+ }
+ auto save_flag_rounding_math = flag_rounding_math;
+ flag_rounding_math = rm;
+ bool ret = float_binary_op_range_finish (fold_range (r, type, wlhs, op2),
+ r, type, lhs);
+ flag_rounding_math = save_flag_rounding_math;
+ return ret;
+}
+
// Implement fold for a cast from float to an int.
bool
operator_cast::fold_range (irange &, tree, const frange &,
bool fold_range (prange &r, tree type,
const irange &op1, const prange &op2,
relation_trio rel = TRIO_VARYING) const final override;
+ bool fold_range (frange &r, tree type,
+ const frange &op1, const frange &op2,
+ relation_trio = TRIO_VARYING) const final override;
bool fold_range (irange &r, tree type,
- const frange &lh,
- const irange &rh,
- relation_trio = TRIO_VARYING) const;
+ const frange &op1, const irange &op2,
+ relation_trio = TRIO_VARYING) const final override;
bool fold_range (frange &r, tree type,
- const irange &lh,
- const frange &rh,
- relation_trio = TRIO_VARYING) const;
+ const irange &op1, const frange &op2,
+ relation_trio = TRIO_VARYING) const final override;
bool op1_range (irange &r, tree type,
const irange &lhs, const irange &op2,
const irange &lhs, const prange &op2,
relation_trio rel = TRIO_VARYING) const final override;
bool op1_range (frange &r, tree type,
- const irange &lhs,
- const irange &op2,
- relation_trio = TRIO_VARYING) const;
+ const frange &lhs, const frange &op2,
+ relation_trio = TRIO_VARYING) const final override;
+ bool op1_range (frange &r, tree type,
+ const irange &lhs, const irange &op2,
+ relation_trio = TRIO_VARYING) const final override;
bool op1_range (irange &r, tree type,
- const frange &lhs,
- const frange &op2,
- relation_trio = TRIO_VARYING) const;
+ const frange &lhs, const frange &op2,
+ relation_trio = TRIO_VARYING) const final override;
relation_kind lhs_op1_relation (const irange &lhs,
const irange &op1, const irange &op2,
--- /dev/null
+/* PR tree-optimization/120231 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+/* { dg-add-options float32 } */
+/* { dg-add-options float64 } */
+/* { dg-add-options float128 } */
+/* { dg-require-effective-target float32 } */
+/* { dg-require-effective-target float64 } */
+/* { dg-require-effective-target float128 } */
+/* { dg-final { scan-tree-dump-not "link_failure \\\(\\\);" "optimized" } } */
+
+void link_failure (void);
+
+void
+foo (_Float64 x)
+{
+ if (x >= -64.0f64 && x <= 0x1.p+140f64)
+ {
+ _Float32 z = x;
+ _Float128 w = z;
+ _Float128 v = x;
+ if (__builtin_isnan (z)
+ || __builtin_isnan (w)
+ || __builtin_isnan (v)
+ || z < -64.0f32
+ || w < -64.0f128
+ || __builtin_isinf (v)
+ || v < -64.0f128
+ || v > 0x1.p+140f128)
+ link_failure ();
+ }
+}
+
+void
+bar (_Float64 x)
+{
+ _Float32 z = x;
+ if (z >= -64.0f32 && z <= 0x1.p+38f32)
+ {
+ if (__builtin_isnan (x)
+ || __builtin_isinf (x)
+ || x < -0x1.000001p+6f64
+ || x > 0x1.000001p+38f64)
+ link_failure ();
+ }
+}
+
+void
+baz (_Float64 x)
+{
+ _Float128 w = x;
+ if (w >= -64.0f128 && w <= 0x1.p+1026f128)
+ {
+ if (__builtin_isnan (x)
+ || __builtin_isinf (x)
+ || x < -64.0f64)
+ link_failure ();
+ }
+ if (w >= 128.25f128 && w <= 0x1.p+1020f128)
+ {
+ if (__builtin_isnan (x)
+ || __builtin_isinf (x)
+ || x < 128.25f64
+ || x > 0x1.p+1020f64)
+ link_failure ();
+ }
+}