/* Constant folding for calls to built-in and internal functions.
- Copyright (C) 1988-2015 Free Software Foundation, Inc.
+ Copyright (C) 1988-2021 Free Software Foundation, Inc.
This file is part of GCC.
#include "tree.h"
#include "stor-layout.h"
#include "options.h"
+#include "fold-const.h"
#include "fold-const-call.h"
+#include "case-cfn-macros.h"
+#include "tm.h" /* For C[LT]Z_DEFINED_AT_ZERO. */
+#include "builtins.h"
+#include "gimple-expr.h"
+#include "tree-vector-builder.h"
/* Functions that test for certain constant types, abstracting away the
decision about whether to check for overflow. */
return TREE_CODE (t) == COMPLEX_CST;
}
+/* Return true if ARG is a size_type_node constant.
+ Store it in *SIZE_OUT if so. */
+
+static inline bool
+size_t_cst_p (tree t, unsigned HOST_WIDE_INT *size_out)
+{
+ if (types_compatible_p (size_type_node, TREE_TYPE (t))
+ && integer_cst_p (t)
+ && tree_fits_uhwi_p (t))
+ {
+ *size_out = tree_to_uhwi (t);
+ return true;
+ }
+ return false;
+}
+
+/* RES is the result of a comparison in which < 0 means "less", 0 means
+ "equal" and > 0 means "more". Canonicalize it to -1, 0 or 1 and
+ return it in type TYPE. */
+
+tree
+build_cmp_result (tree type, int res)
+{
+ return build_int_cst (type, res < 0 ? -1 : res > 0 ? 1 : 0);
+}
+
/* M is the result of trying to constant-fold an expression (starting
with clear MPFR flags) and INEXACT says whether the result in M is
exact or inexact. Return true if M can be used as a constant-folded
return false;
REAL_VALUE_TYPE tmp;
- real_from_mpfr (&tmp, m, format, GMP_RNDN);
+ real_from_mpfr (&tmp, m, format, MPFR_RNDN);
/* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
If the REAL_VALUE_TYPE is zero but the mpft_t is not, then we
return false;
int prec = format->p;
- mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
+ mpfr_rnd_t rnd = format->round_towards_zero ? MPFR_RNDZ : MPFR_RNDN;
mpfr_t m;
mpfr_init2 (m, prec);
- mpfr_from_real (m, arg, GMP_RNDN);
+ mpfr_from_real (m, arg, MPFR_RNDN);
mpfr_clear_flags ();
bool inexact = func (m, m, rnd);
bool ok = do_mpfr_ckconv (result, m, inexact, format);
return false;
int prec = format->p;
- mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
+ mpfr_rnd_t rnd = format->round_towards_zero ? MPFR_RNDZ : MPFR_RNDN;
mpfr_t m, ms, mc;
mpfr_inits2 (prec, m, ms, mc, NULL);
- mpfr_from_real (m, arg, GMP_RNDN);
+ mpfr_from_real (m, arg, MPFR_RNDN);
mpfr_clear_flags ();
bool inexact = mpfr_sin_cos (ms, mc, m, rnd);
bool ok = (do_mpfr_ckconv (result_sin, ms, inexact, format)
return false;
int prec = format->p;
- mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
+ mpfr_rnd_t rnd = format->round_towards_zero ? MPFR_RNDZ : MPFR_RNDN;
mpfr_t m0, m1;
mpfr_inits2 (prec, m0, m1, NULL);
- mpfr_from_real (m0, arg0, GMP_RNDN);
- mpfr_from_real (m1, arg1, GMP_RNDN);
+ mpfr_from_real (m0, arg0, MPFR_RNDN);
+ mpfr_from_real (m1, arg1, MPFR_RNDN);
mpfr_clear_flags ();
bool inexact = func (m0, m0, m1, rnd);
bool ok = do_mpfr_ckconv (result, m0, inexact, format);
static bool
do_mpfr_arg2 (real_value *result,
- int (*func) (mpfr_ptr, long, mpfr_srcptr, mp_rnd_t),
+ int (*func) (mpfr_ptr, long, mpfr_srcptr, mpfr_rnd_t),
const wide_int_ref &arg0, const real_value *arg1,
const real_format *format)
{
return false;
int prec = format->p;
- mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
+ mpfr_rnd_t rnd = format->round_towards_zero ? MPFR_RNDZ : MPFR_RNDN;
mpfr_t m;
mpfr_init2 (m, prec);
- mpfr_from_real (m, arg1, GMP_RNDN);
+ mpfr_from_real (m, arg1, MPFR_RNDN);
mpfr_clear_flags ();
bool inexact = func (m, arg0.to_shwi (), m, rnd);
bool ok = do_mpfr_ckconv (result, m, inexact, format);
return false;
int prec = format->p;
- mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
+ mpfr_rnd_t rnd = format->round_towards_zero ? MPFR_RNDZ : MPFR_RNDN;
mpfr_t m0, m1, m2;
mpfr_inits2 (prec, m0, m1, m2, NULL);
- mpfr_from_real (m0, arg0, GMP_RNDN);
- mpfr_from_real (m1, arg1, GMP_RNDN);
- mpfr_from_real (m2, arg2, GMP_RNDN);
+ mpfr_from_real (m0, arg0, MPFR_RNDN);
+ mpfr_from_real (m1, arg1, MPFR_RNDN);
+ mpfr_from_real (m2, arg2, MPFR_RNDN);
mpfr_clear_flags ();
bool inexact = func (m0, m0, m1, m2, rnd);
bool ok = do_mpfr_ckconv (result, m0, inexact, format);
return false;
REAL_VALUE_TYPE tmp_real, tmp_imag;
- real_from_mpfr (&tmp_real, mpc_realref (m), format, GMP_RNDN);
- real_from_mpfr (&tmp_imag, mpc_imagref (m), format, GMP_RNDN);
+ real_from_mpfr (&tmp_real, mpc_realref (m), format, MPFR_RNDN);
+ real_from_mpfr (&tmp_imag, mpc_imagref (m), format, MPFR_RNDN);
/* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
If the REAL_VALUE_TYPE is zero but the mpft_t is not, then we
mpc_t m;
mpc_init2 (m, prec);
- mpfr_from_real (mpc_realref (m), arg_real, GMP_RNDN);
- mpfr_from_real (mpc_imagref (m), arg_imag, GMP_RNDN);
+ mpfr_from_real (mpc_realref (m), arg_real, MPFR_RNDN);
+ mpfr_from_real (mpc_imagref (m), arg_imag, MPFR_RNDN);
mpfr_clear_flags ();
bool inexact = func (m, m, crnd);
bool ok = do_mpc_ckconv (result_real, result_imag, m, inexact, format);
mpc_init2 (m0, prec);
mpc_init2 (m1, prec);
- mpfr_from_real (mpc_realref (m0), arg0_real, GMP_RNDN);
- mpfr_from_real (mpc_imagref (m0), arg0_imag, GMP_RNDN);
- mpfr_from_real (mpc_realref (m1), arg1_real, GMP_RNDN);
- mpfr_from_real (mpc_imagref (m1), arg1_imag, GMP_RNDN);
+ mpfr_from_real (mpc_realref (m0), arg0_real, MPFR_RNDN);
+ mpfr_from_real (mpc_imagref (m0), arg0_imag, MPFR_RNDN);
+ mpfr_from_real (mpc_realref (m1), arg1_real, MPFR_RNDN);
+ mpfr_from_real (mpc_imagref (m1), arg1_imag, MPFR_RNDN);
mpfr_clear_flags ();
bool inexact = func (m0, m0, m1, crnd);
bool ok = do_mpc_ckconv (result_real, result_imag, m0, inexact, format);
|| !real_equal (arg0, &dconst0)))
{
bool inexact = real_powi (result, format, arg0, n1);
- if (flag_unsafe_math_optimizations || !inexact)
+ /* Avoid the folding if flag_signaling_nans is on. */
+ if (flag_unsafe_math_optimizations
+ || (!inexact
+ && !(flag_signaling_nans
+ && REAL_VALUE_ISSIGNALING_NAN (*arg0))))
return true;
}
return false;
}
+/* Try to evaluate:
+
+ *RESULT = nextafter (*ARG0, *ARG1)
+
+ or
+
+ *RESULT = nexttoward (*ARG0, *ARG1)
+
+ in format FORMAT. Return true on success. */
+
+static bool
+fold_const_nextafter (real_value *result, const real_value *arg0,
+ const real_value *arg1, const real_format *format)
+{
+ if (REAL_VALUE_ISSIGNALING_NAN (*arg0)
+ || REAL_VALUE_ISSIGNALING_NAN (*arg1))
+ return false;
+
+ /* Don't handle composite modes, nor decimal, nor modes without
+ inf or denorm at least for now. */
+ if (format->pnan < format->p
+ || format->b == 10
+ || !format->has_inf
+ || !format->has_denorm)
+ return false;
+
+ if (real_nextafter (result, format, arg0, arg1)
+ /* If raising underflow or overflow and setting errno to ERANGE,
+ fail if we care about those side-effects. */
+ && (flag_trapping_math || flag_errno_math))
+ return false;
+ /* Similarly for nextafter (0, 1) raising underflow. */
+ else if (flag_trapping_math
+ && arg0->cl == rvc_zero
+ && result->cl != rvc_zero)
+ return false;
+
+ real_convert (result, format, result);
+
+ return true;
+}
+
/* Try to evaluate:
*RESULT = ldexp (*ARG0, ARG1)
if (wi::les_p (arg1, -max_exp_adj) || wi::ges_p (arg1, max_exp_adj))
return false;
+ /* Don't perform operation if we honor signaling NaNs and
+ operand is a signaling NaN. */
+ if (!flag_unsafe_math_optimizations
+ && flag_signaling_nans
+ && REAL_VALUE_ISSIGNALING_NAN (*arg0))
+ return false;
+
REAL_VALUE_TYPE initial_result;
real_ldexp (&initial_result, arg0, arg1.to_shwi ());
return real_equal (&initial_result, result);
}
+/* Fold a call to __builtin_nan or __builtin_nans with argument ARG and
+ return type TYPE. QUIET is true if a quiet rather than signalling
+ NaN is required. */
+
+static tree
+fold_const_builtin_nan (tree type, tree arg, bool quiet)
+{
+ REAL_VALUE_TYPE real;
+ const char *str = c_getstr (arg);
+ if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
+ return build_real (type, real);
+ return NULL_TREE;
+}
+
+/* Fold a call to IFN_REDUC_<CODE> (ARG), returning a value of type TYPE. */
+
+static tree
+fold_const_reduction (tree type, tree arg, tree_code code)
+{
+ unsigned HOST_WIDE_INT nelts;
+ if (TREE_CODE (arg) != VECTOR_CST
+ || !VECTOR_CST_NELTS (arg).is_constant (&nelts))
+ return NULL_TREE;
+
+ tree res = VECTOR_CST_ELT (arg, 0);
+ for (unsigned HOST_WIDE_INT i = 1; i < nelts; i++)
+ {
+ res = const_binop (code, type, res, VECTOR_CST_ELT (arg, i));
+ if (res == NULL_TREE || !CONSTANT_CLASS_P (res))
+ return NULL_TREE;
+ }
+ return res;
+}
+
+/* Fold a call to IFN_VEC_CONVERT (ARG) returning TYPE. */
+
+static tree
+fold_const_vec_convert (tree ret_type, tree arg)
+{
+ enum tree_code code = NOP_EXPR;
+ tree arg_type = TREE_TYPE (arg);
+ if (TREE_CODE (arg) != VECTOR_CST)
+ return NULL_TREE;
+
+ gcc_checking_assert (VECTOR_TYPE_P (ret_type) && VECTOR_TYPE_P (arg_type));
+
+ if (INTEGRAL_TYPE_P (TREE_TYPE (ret_type))
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (arg_type)))
+ code = FIX_TRUNC_EXPR;
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (arg_type))
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (ret_type)))
+ code = FLOAT_EXPR;
+
+ /* We can't handle steps directly when extending, since the
+ values need to wrap at the original precision first. */
+ bool step_ok_p
+ = (INTEGRAL_TYPE_P (TREE_TYPE (ret_type))
+ && INTEGRAL_TYPE_P (TREE_TYPE (arg_type))
+ && (TYPE_PRECISION (TREE_TYPE (ret_type))
+ <= TYPE_PRECISION (TREE_TYPE (arg_type))));
+ tree_vector_builder elts;
+ if (!elts.new_unary_operation (ret_type, arg, step_ok_p))
+ return NULL_TREE;
+
+ unsigned int count = elts.encoded_nelts ();
+ for (unsigned int i = 0; i < count; ++i)
+ {
+ tree elt = fold_unary (code, TREE_TYPE (ret_type),
+ VECTOR_CST_ELT (arg, i));
+ if (elt == NULL_TREE || !CONSTANT_CLASS_P (elt))
+ return NULL_TREE;
+ elts.quick_push (elt);
+ }
+
+ return elts.build ();
+}
+
+/* Try to evaluate:
+
+ IFN_WHILE_ULT (ARG0, ARG1, (TYPE) { ... })
+
+ Return the value on success and null on failure. */
+
+static tree
+fold_while_ult (tree type, poly_uint64 arg0, poly_uint64 arg1)
+{
+ if (known_ge (arg0, arg1))
+ return build_zero_cst (type);
+
+ if (maybe_ge (arg0, arg1))
+ return NULL_TREE;
+
+ poly_uint64 diff = arg1 - arg0;
+ poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
+ if (known_ge (diff, nelts))
+ return build_all_ones_cst (type);
+
+ unsigned HOST_WIDE_INT const_diff;
+ if (known_le (diff, nelts) && diff.is_constant (&const_diff))
+ {
+ tree minus_one = build_minus_one_cst (TREE_TYPE (type));
+ tree zero = build_zero_cst (TREE_TYPE (type));
+ return build_vector_a_then_b (type, const_diff, minus_one, zero);
+ }
+ return NULL_TREE;
+}
+
/* Try to evaluate:
*RESULT = FN (*ARG)
in format FORMAT. Return true on success. */
static bool
-fold_const_call_ss (real_value *result, built_in_function fn,
+fold_const_call_ss (real_value *result, combined_fn fn,
const real_value *arg, const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_SQRT):
+ CASE_CFN_SQRT:
+ CASE_CFN_SQRT_FN:
return (real_compare (GE_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_sqrt, arg, format));
- CASE_FLT_FN (BUILT_IN_CBRT):
+ CASE_CFN_CBRT:
return do_mpfr_arg1 (result, mpfr_cbrt, arg, format);
- CASE_FLT_FN (BUILT_IN_ASIN):
+ CASE_CFN_ASIN:
return (real_compare (GE_EXPR, arg, &dconstm1)
&& real_compare (LE_EXPR, arg, &dconst1)
&& do_mpfr_arg1 (result, mpfr_asin, arg, format));
- CASE_FLT_FN (BUILT_IN_ACOS):
+ CASE_CFN_ACOS:
return (real_compare (GE_EXPR, arg, &dconstm1)
&& real_compare (LE_EXPR, arg, &dconst1)
&& do_mpfr_arg1 (result, mpfr_acos, arg, format));
- CASE_FLT_FN (BUILT_IN_ATAN):
+ CASE_CFN_ATAN:
return do_mpfr_arg1 (result, mpfr_atan, arg, format);
- CASE_FLT_FN (BUILT_IN_ASINH):
+ CASE_CFN_ASINH:
return do_mpfr_arg1 (result, mpfr_asinh, arg, format);
- CASE_FLT_FN (BUILT_IN_ACOSH):
+ CASE_CFN_ACOSH:
return (real_compare (GE_EXPR, arg, &dconst1)
&& do_mpfr_arg1 (result, mpfr_acosh, arg, format));
- CASE_FLT_FN (BUILT_IN_ATANH):
+ CASE_CFN_ATANH:
return (real_compare (GE_EXPR, arg, &dconstm1)
&& real_compare (LE_EXPR, arg, &dconst1)
&& do_mpfr_arg1 (result, mpfr_atanh, arg, format));
- CASE_FLT_FN (BUILT_IN_SIN):
+ CASE_CFN_SIN:
return do_mpfr_arg1 (result, mpfr_sin, arg, format);
- CASE_FLT_FN (BUILT_IN_COS):
+ CASE_CFN_COS:
return do_mpfr_arg1 (result, mpfr_cos, arg, format);
- CASE_FLT_FN (BUILT_IN_TAN):
+ CASE_CFN_TAN:
return do_mpfr_arg1 (result, mpfr_tan, arg, format);
- CASE_FLT_FN (BUILT_IN_SINH):
+ CASE_CFN_SINH:
return do_mpfr_arg1 (result, mpfr_sinh, arg, format);
- CASE_FLT_FN (BUILT_IN_COSH):
+ CASE_CFN_COSH:
return do_mpfr_arg1 (result, mpfr_cosh, arg, format);
- CASE_FLT_FN (BUILT_IN_TANH):
+ CASE_CFN_TANH:
return do_mpfr_arg1 (result, mpfr_tanh, arg, format);
- CASE_FLT_FN (BUILT_IN_ERF):
+ CASE_CFN_ERF:
return do_mpfr_arg1 (result, mpfr_erf, arg, format);
- CASE_FLT_FN (BUILT_IN_ERFC):
+ CASE_CFN_ERFC:
return do_mpfr_arg1 (result, mpfr_erfc, arg, format);
- CASE_FLT_FN (BUILT_IN_TGAMMA):
+ CASE_CFN_TGAMMA:
return do_mpfr_arg1 (result, mpfr_gamma, arg, format);
- CASE_FLT_FN (BUILT_IN_EXP):
+ CASE_CFN_EXP:
return do_mpfr_arg1 (result, mpfr_exp, arg, format);
- CASE_FLT_FN (BUILT_IN_EXP2):
+ CASE_CFN_EXP2:
return do_mpfr_arg1 (result, mpfr_exp2, arg, format);
- CASE_FLT_FN (BUILT_IN_EXP10):
- CASE_FLT_FN (BUILT_IN_POW10):
+ CASE_CFN_EXP10:
+ CASE_CFN_POW10:
return do_mpfr_arg1 (result, mpfr_exp10, arg, format);
- CASE_FLT_FN (BUILT_IN_EXPM1):
+ CASE_CFN_EXPM1:
return do_mpfr_arg1 (result, mpfr_expm1, arg, format);
- CASE_FLT_FN (BUILT_IN_LOG):
+ CASE_CFN_LOG:
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_log, arg, format));
- CASE_FLT_FN (BUILT_IN_LOG2):
+ CASE_CFN_LOG2:
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_log2, arg, format));
- CASE_FLT_FN (BUILT_IN_LOG10):
+ CASE_CFN_LOG10:
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_log10, arg, format));
- CASE_FLT_FN (BUILT_IN_LOG1P):
+ CASE_CFN_LOG1P:
return (real_compare (GT_EXPR, arg, &dconstm1)
&& do_mpfr_arg1 (result, mpfr_log1p, arg, format));
- CASE_FLT_FN (BUILT_IN_J0):
+ CASE_CFN_J0:
return do_mpfr_arg1 (result, mpfr_j0, arg, format);
- CASE_FLT_FN (BUILT_IN_J1):
+ CASE_CFN_J1:
return do_mpfr_arg1 (result, mpfr_j1, arg, format);
- CASE_FLT_FN (BUILT_IN_Y0):
+ CASE_CFN_Y0:
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_y0, arg, format));
- CASE_FLT_FN (BUILT_IN_Y1):
+ CASE_CFN_Y1:
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_y1, arg, format));
- CASE_FLT_FN (BUILT_IN_FLOOR):
- if (!REAL_VALUE_ISNAN (*arg) || !flag_errno_math)
+ CASE_CFN_FLOOR:
+ CASE_CFN_FLOOR_FN:
+ if (!REAL_VALUE_ISSIGNALING_NAN (*arg))
{
real_floor (result, format, arg);
return true;
}
return false;
- CASE_FLT_FN (BUILT_IN_CEIL):
- if (!REAL_VALUE_ISNAN (*arg) || !flag_errno_math)
+ CASE_CFN_CEIL:
+ CASE_CFN_CEIL_FN:
+ if (!REAL_VALUE_ISSIGNALING_NAN (*arg))
{
real_ceil (result, format, arg);
return true;
}
return false;
- CASE_FLT_FN (BUILT_IN_TRUNC):
- real_trunc (result, format, arg);
- return true;
+ CASE_CFN_TRUNC:
+ CASE_CFN_TRUNC_FN:
+ if (!REAL_VALUE_ISSIGNALING_NAN (*arg))
+ {
+ real_trunc (result, format, arg);
+ return true;
+ }
+ return false;
- CASE_FLT_FN (BUILT_IN_ROUND):
- if (!REAL_VALUE_ISNAN (*arg) || !flag_errno_math)
+ CASE_CFN_ROUND:
+ CASE_CFN_ROUND_FN:
+ if (!REAL_VALUE_ISSIGNALING_NAN (*arg))
{
real_round (result, format, arg);
return true;
}
return false;
- CASE_FLT_FN (BUILT_IN_LOGB):
+ CASE_CFN_ROUNDEVEN:
+ CASE_CFN_ROUNDEVEN_FN:
+ if (!REAL_VALUE_ISSIGNALING_NAN (*arg))
+ {
+ real_roundeven (result, format, arg);
+ return true;
+ }
+ return false;
+
+ CASE_CFN_LOGB:
return fold_const_logb (result, arg, format);
- CASE_FLT_FN (BUILT_IN_SIGNIFICAND):
+ CASE_CFN_SIGNIFICAND:
return fold_const_significand (result, arg, format);
default:
significant bits in the result. Return true on success. */
static bool
-fold_const_call_ss (wide_int *result, built_in_function fn,
+fold_const_call_ss (wide_int *result, combined_fn fn,
const real_value *arg, unsigned int precision,
const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_SIGNBIT):
+ CASE_CFN_SIGNBIT:
if (real_isneg (arg))
*result = wi::one (precision);
else
*result = wi::zero (precision);
return true;
- CASE_FLT_FN (BUILT_IN_ILOGB):
+ CASE_CFN_ILOGB:
/* For ilogb we don't know FP_ILOGB0, so only handle normal values.
Proceed iff radix == 2. In GCC, normalized significands are in
the range [0.5, 1.0). We want the exponent as if they were
}
return false;
- CASE_FLT_FN (BUILT_IN_ICEIL):
- CASE_FLT_FN (BUILT_IN_LCEIL):
- CASE_FLT_FN (BUILT_IN_LLCEIL):
+ CASE_CFN_ICEIL:
+ CASE_CFN_LCEIL:
+ CASE_CFN_LLCEIL:
return fold_const_conversion (result, real_ceil, arg,
precision, format);
- CASE_FLT_FN (BUILT_IN_LFLOOR):
- CASE_FLT_FN (BUILT_IN_IFLOOR):
- CASE_FLT_FN (BUILT_IN_LLFLOOR):
+ CASE_CFN_LFLOOR:
+ CASE_CFN_IFLOOR:
+ CASE_CFN_LLFLOOR:
return fold_const_conversion (result, real_floor, arg,
precision, format);
- CASE_FLT_FN (BUILT_IN_IROUND):
- CASE_FLT_FN (BUILT_IN_LROUND):
- CASE_FLT_FN (BUILT_IN_LLROUND):
+ CASE_CFN_IROUND:
+ CASE_CFN_LROUND:
+ CASE_CFN_LLROUND:
return fold_const_conversion (result, real_round, arg,
precision, format);
- CASE_FLT_FN (BUILT_IN_IRINT):
- CASE_FLT_FN (BUILT_IN_LRINT):
- CASE_FLT_FN (BUILT_IN_LLRINT):
+ CASE_CFN_IRINT:
+ CASE_CFN_LRINT:
+ CASE_CFN_LLRINT:
/* Not yet folded to a constant. */
return false;
+ CASE_CFN_FINITE:
+ case CFN_BUILT_IN_FINITED32:
+ case CFN_BUILT_IN_FINITED64:
+ case CFN_BUILT_IN_FINITED128:
+ case CFN_BUILT_IN_ISFINITE:
+ *result = wi::shwi (real_isfinite (arg) ? 1 : 0, precision);
+ return true;
+
+ CASE_CFN_ISINF:
+ case CFN_BUILT_IN_ISINFD32:
+ case CFN_BUILT_IN_ISINFD64:
+ case CFN_BUILT_IN_ISINFD128:
+ if (real_isinf (arg))
+ *result = wi::shwi (arg->sign ? -1 : 1, precision);
+ else
+ *result = wi::shwi (0, precision);
+ return true;
+
+ CASE_CFN_ISNAN:
+ case CFN_BUILT_IN_ISNAND32:
+ case CFN_BUILT_IN_ISNAND64:
+ case CFN_BUILT_IN_ISNAND128:
+ *result = wi::shwi (real_isnan (arg) ? 1 : 0, precision);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Try to evaluate:
+
+ *RESULT = FN (ARG)
+
+ where ARG_TYPE is the type of ARG and PRECISION is the number of bits
+ in the result. Return true on success. */
+
+static bool
+fold_const_call_ss (wide_int *result, combined_fn fn, const wide_int_ref &arg,
+ unsigned int precision, tree arg_type)
+{
+ switch (fn)
+ {
+ CASE_CFN_FFS:
+ *result = wi::shwi (wi::ffs (arg), precision);
+ return true;
+
+ CASE_CFN_CLZ:
+ {
+ int tmp;
+ if (wi::ne_p (arg, 0))
+ tmp = wi::clz (arg);
+ else if (!CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (arg_type),
+ tmp))
+ tmp = TYPE_PRECISION (arg_type);
+ *result = wi::shwi (tmp, precision);
+ return true;
+ }
+
+ CASE_CFN_CTZ:
+ {
+ int tmp;
+ if (wi::ne_p (arg, 0))
+ tmp = wi::ctz (arg);
+ else if (!CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (arg_type),
+ tmp))
+ tmp = TYPE_PRECISION (arg_type);
+ *result = wi::shwi (tmp, precision);
+ return true;
+ }
+
+ CASE_CFN_CLRSB:
+ *result = wi::shwi (wi::clrsb (arg), precision);
+ return true;
+
+ CASE_CFN_POPCOUNT:
+ *result = wi::shwi (wi::popcount (arg), precision);
+ return true;
+
+ CASE_CFN_PARITY:
+ *result = wi::shwi (wi::parity (arg), precision);
+ return true;
+
+ case CFN_BUILT_IN_BSWAP16:
+ case CFN_BUILT_IN_BSWAP32:
+ case CFN_BUILT_IN_BSWAP64:
+ case CFN_BUILT_IN_BSWAP128:
+ *result = wide_int::from (arg, precision, TYPE_SIGN (arg_type)).bswap ();
+ return true;
+
default:
return false;
}
static bool
fold_const_call_cs (real_value *result_real, real_value *result_imag,
- built_in_function fn, const real_value *arg,
+ combined_fn fn, const real_value *arg,
const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_CEXPI):
+ CASE_CFN_CEXPI:
/* cexpi(x+yi) = cos(x)+sin(y)*i. */
return do_mpfr_sincos (result_imag, result_real, arg, format);
success. */
static bool
-fold_const_call_sc (real_value *result, built_in_function fn,
+fold_const_call_sc (real_value *result, combined_fn fn,
const real_value *arg_real, const real_value *arg_imag,
const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_CABS):
+ CASE_CFN_CABS:
return do_mpfr_arg2 (result, mpfr_hypot, arg_real, arg_imag, format);
default:
static bool
fold_const_call_cc (real_value *result_real, real_value *result_imag,
- built_in_function fn, const real_value *arg_real,
+ combined_fn fn, const real_value *arg_real,
const real_value *arg_imag, const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_CCOS):
+ CASE_CFN_CCOS:
return do_mpc_arg1 (result_real, result_imag, mpc_cos,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CCOSH):
+ CASE_CFN_CCOSH:
return do_mpc_arg1 (result_real, result_imag, mpc_cosh,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CPROJ):
+ CASE_CFN_CPROJ:
if (real_isinf (arg_real) || real_isinf (arg_imag))
{
real_inf (result_real);
}
return true;
- CASE_FLT_FN (BUILT_IN_CSIN):
+ CASE_CFN_CSIN:
return do_mpc_arg1 (result_real, result_imag, mpc_sin,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CSINH):
+ CASE_CFN_CSINH:
return do_mpc_arg1 (result_real, result_imag, mpc_sinh,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CTAN):
+ CASE_CFN_CTAN:
return do_mpc_arg1 (result_real, result_imag, mpc_tan,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CTANH):
+ CASE_CFN_CTANH:
return do_mpc_arg1 (result_real, result_imag, mpc_tanh,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CLOG):
+ CASE_CFN_CLOG:
return do_mpc_arg1 (result_real, result_imag, mpc_log,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CSQRT):
+ CASE_CFN_CSQRT:
return do_mpc_arg1 (result_real, result_imag, mpc_sqrt,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CASIN):
+ CASE_CFN_CASIN:
return do_mpc_arg1 (result_real, result_imag, mpc_asin,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CACOS):
+ CASE_CFN_CACOS:
return do_mpc_arg1 (result_real, result_imag, mpc_acos,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CATAN):
+ CASE_CFN_CATAN:
return do_mpc_arg1 (result_real, result_imag, mpc_atan,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CASINH):
+ CASE_CFN_CASINH:
return do_mpc_arg1 (result_real, result_imag, mpc_asinh,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CACOSH):
+ CASE_CFN_CACOSH:
return do_mpc_arg1 (result_real, result_imag, mpc_acosh,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CATANH):
+ CASE_CFN_CATANH:
return do_mpc_arg1 (result_real, result_imag, mpc_atanh,
arg_real, arg_imag, format);
- CASE_FLT_FN (BUILT_IN_CEXP):
+ CASE_CFN_CEXP:
return do_mpc_arg1 (result_real, result_imag, mpc_exp,
arg_real, arg_imag, format);
}
}
-/* Try to fold FN (ARG) to a constant. Return the constant on success,
- otherwise return null. TYPE is the type of the return value. */
+/* Subroutine of fold_const_call, with the same interface. Handle cases
+ where the arguments and result are numerical. */
-tree
-fold_const_call (built_in_function fn, tree type, tree arg)
+static tree
+fold_const_call_1 (combined_fn fn, tree type, tree arg)
{
machine_mode mode = TYPE_MODE (type);
machine_mode arg_mode = TYPE_MODE (TREE_TYPE (arg));
+ if (integer_cst_p (arg))
+ {
+ if (SCALAR_INT_MODE_P (mode))
+ {
+ wide_int result;
+ if (fold_const_call_ss (&result, fn, wi::to_wide (arg),
+ TYPE_PRECISION (type), TREE_TYPE (arg)))
+ return wide_int_to_tree (type, result);
+ }
+ return NULL_TREE;
+ }
+
if (real_cst_p (arg))
{
gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg_mode));
return NULL_TREE;
}
+/* Try to fold FN (ARG) to a constant. Return the constant on success,
+ otherwise return null. TYPE is the type of the return value. */
+
+tree
+fold_const_call (combined_fn fn, tree type, tree arg)
+{
+ switch (fn)
+ {
+ case CFN_BUILT_IN_STRLEN:
+ if (const char *str = c_getstr (arg))
+ return build_int_cst (type, strlen (str));
+ return NULL_TREE;
+
+ CASE_CFN_NAN:
+ CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NAN):
+ case CFN_BUILT_IN_NAND32:
+ case CFN_BUILT_IN_NAND64:
+ case CFN_BUILT_IN_NAND128:
+ return fold_const_builtin_nan (type, arg, true);
+
+ CASE_CFN_NANS:
+ CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NANS):
+ case CFN_BUILT_IN_NANSD32:
+ case CFN_BUILT_IN_NANSD64:
+ case CFN_BUILT_IN_NANSD128:
+ return fold_const_builtin_nan (type, arg, false);
+
+ case CFN_REDUC_PLUS:
+ return fold_const_reduction (type, arg, PLUS_EXPR);
+
+ case CFN_REDUC_MAX:
+ return fold_const_reduction (type, arg, MAX_EXPR);
+
+ case CFN_REDUC_MIN:
+ return fold_const_reduction (type, arg, MIN_EXPR);
+
+ case CFN_REDUC_AND:
+ return fold_const_reduction (type, arg, BIT_AND_EXPR);
+
+ case CFN_REDUC_IOR:
+ return fold_const_reduction (type, arg, BIT_IOR_EXPR);
+
+ case CFN_REDUC_XOR:
+ return fold_const_reduction (type, arg, BIT_XOR_EXPR);
+
+ case CFN_VEC_CONVERT:
+ return fold_const_vec_convert (type, arg);
+
+ default:
+ return fold_const_call_1 (fn, type, arg);
+ }
+}
+
+/* Fold a call to IFN_FOLD_LEFT_<CODE> (ARG0, ARG1), returning a value
+ of type TYPE. */
+
+static tree
+fold_const_fold_left (tree type, tree arg0, tree arg1, tree_code code)
+{
+ if (TREE_CODE (arg1) != VECTOR_CST)
+ return NULL_TREE;
+
+ unsigned HOST_WIDE_INT nelts;
+ if (!VECTOR_CST_NELTS (arg1).is_constant (&nelts))
+ return NULL_TREE;
+
+ for (unsigned HOST_WIDE_INT i = 0; i < nelts; i++)
+ {
+ arg0 = const_binop (code, type, arg0, VECTOR_CST_ELT (arg1, i));
+ if (arg0 == NULL_TREE || !CONSTANT_CLASS_P (arg0))
+ return NULL_TREE;
+ }
+ return arg0;
+}
+
/* Try to evaluate:
*RESULT = FN (*ARG0, *ARG1)
in format FORMAT. Return true on success. */
static bool
-fold_const_call_sss (real_value *result, built_in_function fn,
+fold_const_call_sss (real_value *result, combined_fn fn,
const real_value *arg0, const real_value *arg1,
const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_DREM):
- CASE_FLT_FN (BUILT_IN_REMAINDER):
+ CASE_CFN_DREM:
+ CASE_CFN_REMAINDER:
return do_mpfr_arg2 (result, mpfr_remainder, arg0, arg1, format);
- CASE_FLT_FN (BUILT_IN_ATAN2):
+ CASE_CFN_ATAN2:
return do_mpfr_arg2 (result, mpfr_atan2, arg0, arg1, format);
- CASE_FLT_FN (BUILT_IN_FDIM):
+ CASE_CFN_FDIM:
return do_mpfr_arg2 (result, mpfr_dim, arg0, arg1, format);
- CASE_FLT_FN (BUILT_IN_HYPOT):
+ CASE_CFN_FMOD:
+ return do_mpfr_arg2 (result, mpfr_fmod, arg0, arg1, format);
+
+ CASE_CFN_HYPOT:
return do_mpfr_arg2 (result, mpfr_hypot, arg0, arg1, format);
- CASE_FLT_FN (BUILT_IN_COPYSIGN):
+ CASE_CFN_COPYSIGN:
+ CASE_CFN_COPYSIGN_FN:
*result = *arg0;
real_copysign (result, arg1);
return true;
- CASE_FLT_FN (BUILT_IN_FMIN):
+ CASE_CFN_FMIN:
+ CASE_CFN_FMIN_FN:
return do_mpfr_arg2 (result, mpfr_min, arg0, arg1, format);
- CASE_FLT_FN (BUILT_IN_FMAX):
+ CASE_CFN_FMAX:
+ CASE_CFN_FMAX_FN:
return do_mpfr_arg2 (result, mpfr_max, arg0, arg1, format);
- CASE_FLT_FN (BUILT_IN_POW):
+ CASE_CFN_POW:
return fold_const_pow (result, arg0, arg1, format);
+ CASE_CFN_NEXTAFTER:
+ CASE_CFN_NEXTTOWARD:
+ return fold_const_nextafter (result, arg0, arg1, format);
+
default:
return false;
}
success. */
static bool
-fold_const_call_sss (real_value *result, built_in_function fn,
+fold_const_call_sss (real_value *result, combined_fn fn,
const real_value *arg0, const wide_int_ref &arg1,
const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_LDEXP):
+ CASE_CFN_LDEXP:
return fold_const_builtin_load_exponent (result, arg0, arg1, format);
- CASE_FLT_FN (BUILT_IN_SCALBN):
- CASE_FLT_FN (BUILT_IN_SCALBLN):
+ CASE_CFN_SCALBN:
+ CASE_CFN_SCALBLN:
return (format->b == 2
&& fold_const_builtin_load_exponent (result, arg0, arg1,
format));
- CASE_FLT_FN (BUILT_IN_POWI):
+ CASE_CFN_POWI:
+ /* Avoid the folding if flag_signaling_nans is on and
+ operand is a signaling NaN. */
+ if (!flag_unsafe_math_optimizations
+ && flag_signaling_nans
+ && REAL_VALUE_ISSIGNALING_NAN (*arg0))
+ return false;
+
real_powi (result, format, arg0, arg1.to_shwi ());
return true;
success. */
static bool
-fold_const_call_sss (real_value *result, built_in_function fn,
+fold_const_call_sss (real_value *result, combined_fn fn,
const wide_int_ref &arg0, const real_value *arg1,
const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_JN):
+ CASE_CFN_JN:
return do_mpfr_arg2 (result, mpfr_jn, arg0, arg1, format);
- CASE_FLT_FN (BUILT_IN_YN):
+ CASE_CFN_YN:
return (real_compare (GT_EXPR, arg1, &dconst0)
&& do_mpfr_arg2 (result, mpfr_yn, arg0, arg1, format));
static bool
fold_const_call_ccc (real_value *result_real, real_value *result_imag,
- built_in_function fn, const real_value *arg0_real,
+ combined_fn fn, const real_value *arg0_real,
const real_value *arg0_imag, const real_value *arg1_real,
const real_value *arg1_imag, const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_CPOW):
+ CASE_CFN_CPOW:
return do_mpc_arg2 (result_real, result_imag, mpc_pow,
arg0_real, arg0_imag, arg1_real, arg1_imag, format);
}
}
-/* Try to fold FN (ARG0, ARG1) to a constant. Return the constant on success,
- otherwise return null. TYPE is the type of the return value. */
+/* Subroutine of fold_const_call, with the same interface. Handle cases
+ where the arguments and result are numerical. */
-tree
-fold_const_call (built_in_function fn, tree type, tree arg0, tree arg1)
+static tree
+fold_const_call_1 (combined_fn fn, tree type, tree arg0, tree arg1)
{
machine_mode mode = TYPE_MODE (type);
machine_mode arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
machine_mode arg1_mode = TYPE_MODE (TREE_TYPE (arg1));
- if (arg0_mode == arg1_mode
+ if (mode == arg0_mode
&& real_cst_p (arg0)
&& real_cst_p (arg1))
{
gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode));
- if (mode == arg0_mode)
+ REAL_VALUE_TYPE result;
+ if (arg0_mode == arg1_mode)
{
/* real, real -> real. */
- REAL_VALUE_TYPE result;
if (fold_const_call_sss (&result, fn, TREE_REAL_CST_PTR (arg0),
TREE_REAL_CST_PTR (arg1),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
+ else if (arg1_mode == TYPE_MODE (long_double_type_node))
+ switch (fn)
+ {
+ CASE_CFN_NEXTTOWARD:
+ /* real, long double -> real. */
+ if (fold_const_call_sss (&result, fn, TREE_REAL_CST_PTR (arg0),
+ TREE_REAL_CST_PTR (arg1),
+ REAL_MODE_FORMAT (mode)))
+ return build_real (type, result);
+ break;
+ default:
+ break;
+ }
return NULL_TREE;
}
/* real, int -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_sss (&result, fn, TREE_REAL_CST_PTR (arg0),
- arg1, REAL_MODE_FORMAT (mode)))
+ wi::to_wide (arg1),
+ REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
return NULL_TREE;
{
/* int, real -> real. */
REAL_VALUE_TYPE result;
- if (fold_const_call_sss (&result, fn, arg0,
+ if (fold_const_call_sss (&result, fn, wi::to_wide (arg0),
TREE_REAL_CST_PTR (arg1),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
return NULL_TREE;
}
+/* Try to fold FN (ARG0, ARG1) to a constant. Return the constant on success,
+ otherwise return null. TYPE is the type of the return value. */
+
+tree
+fold_const_call (combined_fn fn, tree type, tree arg0, tree arg1)
+{
+ const char *p0, *p1;
+ char c;
+ switch (fn)
+ {
+ case CFN_BUILT_IN_STRSPN:
+ if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
+ return build_int_cst (type, strspn (p0, p1));
+ return NULL_TREE;
+
+ case CFN_BUILT_IN_STRCSPN:
+ if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
+ return build_int_cst (type, strcspn (p0, p1));
+ return NULL_TREE;
+
+ case CFN_BUILT_IN_STRCMP:
+ if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
+ return build_cmp_result (type, strcmp (p0, p1));
+ return NULL_TREE;
+
+ case CFN_BUILT_IN_STRCASECMP:
+ if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
+ {
+ int r = strcmp (p0, p1);
+ if (r == 0)
+ return build_cmp_result (type, r);
+ }
+ return NULL_TREE;
+
+ case CFN_BUILT_IN_INDEX:
+ case CFN_BUILT_IN_STRCHR:
+ if ((p0 = c_getstr (arg0)) && target_char_cst_p (arg1, &c))
+ {
+ const char *r = strchr (p0, c);
+ if (r == NULL)
+ return build_int_cst (type, 0);
+ return fold_convert (type,
+ fold_build_pointer_plus_hwi (arg0, r - p0));
+ }
+ return NULL_TREE;
+
+ case CFN_BUILT_IN_RINDEX:
+ case CFN_BUILT_IN_STRRCHR:
+ if ((p0 = c_getstr (arg0)) && target_char_cst_p (arg1, &c))
+ {
+ const char *r = strrchr (p0, c);
+ if (r == NULL)
+ return build_int_cst (type, 0);
+ return fold_convert (type,
+ fold_build_pointer_plus_hwi (arg0, r - p0));
+ }
+ return NULL_TREE;
+
+ case CFN_BUILT_IN_STRSTR:
+ if ((p1 = c_getstr (arg1)))
+ {
+ if ((p0 = c_getstr (arg0)))
+ {
+ const char *r = strstr (p0, p1);
+ if (r == NULL)
+ return build_int_cst (type, 0);
+ return fold_convert (type,
+ fold_build_pointer_plus_hwi (arg0, r - p0));
+ }
+ if (*p1 == '\0')
+ return fold_convert (type, arg0);
+ }
+ return NULL_TREE;
+
+ case CFN_FOLD_LEFT_PLUS:
+ return fold_const_fold_left (type, arg0, arg1, PLUS_EXPR);
+
+ default:
+ return fold_const_call_1 (fn, type, arg0, arg1);
+ }
+}
+
/* Try to evaluate:
*RESULT = FN (*ARG0, *ARG1, *ARG2)
in format FORMAT. Return true on success. */
static bool
-fold_const_call_ssss (real_value *result, built_in_function fn,
+fold_const_call_ssss (real_value *result, combined_fn fn,
const real_value *arg0, const real_value *arg1,
const real_value *arg2, const real_format *format)
{
switch (fn)
{
- CASE_FLT_FN (BUILT_IN_FMA):
+ CASE_CFN_FMA:
+ CASE_CFN_FMA_FN:
return do_mpfr_arg3 (result, mpfr_fma, arg0, arg1, arg2, format);
+ case CFN_FMS:
+ {
+ real_value new_arg2 = real_value_negate (arg2);
+ return do_mpfr_arg3 (result, mpfr_fma, arg0, arg1, &new_arg2, format);
+ }
+
+ case CFN_FNMA:
+ {
+ real_value new_arg0 = real_value_negate (arg0);
+ return do_mpfr_arg3 (result, mpfr_fma, &new_arg0, arg1, arg2, format);
+ }
+
+ case CFN_FNMS:
+ {
+ real_value new_arg0 = real_value_negate (arg0);
+ real_value new_arg2 = real_value_negate (arg2);
+ return do_mpfr_arg3 (result, mpfr_fma, &new_arg0, arg1,
+ &new_arg2, format);
+ }
+
default:
return false;
}
}
-/* Try to fold FN (ARG0, ARG1, ARG2) to a constant. Return the constant on
- success, otherwise return null. TYPE is the type of the return value. */
+/* Subroutine of fold_const_call, with the same interface. Handle cases
+ where the arguments and result are numerical. */
-tree
-fold_const_call (built_in_function fn, tree type, tree arg0, tree arg1,
- tree arg2)
+static tree
+fold_const_call_1 (combined_fn fn, tree type, tree arg0, tree arg1, tree arg2)
{
machine_mode mode = TYPE_MODE (type);
machine_mode arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
return NULL_TREE;
}
-/* Fold a fma operation with arguments ARG[012]. */
+/* Try to fold FN (ARG0, ARG1, ARG2) to a constant. Return the constant on
+ success, otherwise return null. TYPE is the type of the return value. */
tree
-fold_fma (location_t, tree type, tree arg0, tree arg1, tree arg2)
+fold_const_call (combined_fn fn, tree type, tree arg0, tree arg1, tree arg2)
{
- REAL_VALUE_TYPE result;
- if (real_cst_p (arg0)
- && real_cst_p (arg1)
- && real_cst_p (arg2)
- && do_mpfr_arg3 (&result, mpfr_fma, TREE_REAL_CST_PTR (arg0),
- TREE_REAL_CST_PTR (arg1), TREE_REAL_CST_PTR (arg2),
- REAL_MODE_FORMAT (TYPE_MODE (type))))
- return build_real (type, result);
+ const char *p0, *p1;
+ char c;
+ unsigned HOST_WIDE_INT s0, s1, s2 = 0;
+ switch (fn)
+ {
+ case CFN_BUILT_IN_STRNCMP:
+ if (!size_t_cst_p (arg2, &s2))
+ return NULL_TREE;
+ if (s2 == 0
+ && !TREE_SIDE_EFFECTS (arg0)
+ && !TREE_SIDE_EFFECTS (arg1))
+ return build_int_cst (type, 0);
+ else if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
+ return build_int_cst (type, strncmp (p0, p1, MIN (s2, SIZE_MAX)));
+ return NULL_TREE;
- return NULL_TREE;
+ case CFN_BUILT_IN_STRNCASECMP:
+ if (!size_t_cst_p (arg2, &s2))
+ return NULL_TREE;
+ if (s2 == 0
+ && !TREE_SIDE_EFFECTS (arg0)
+ && !TREE_SIDE_EFFECTS (arg1))
+ return build_int_cst (type, 0);
+ else if ((p0 = c_getstr (arg0))
+ && (p1 = c_getstr (arg1))
+ && strncmp (p0, p1, MIN (s2, SIZE_MAX)) == 0)
+ return build_int_cst (type, 0);
+ return NULL_TREE;
+
+ case CFN_BUILT_IN_BCMP:
+ case CFN_BUILT_IN_MEMCMP:
+ if (!size_t_cst_p (arg2, &s2))
+ return NULL_TREE;
+ if (s2 == 0
+ && !TREE_SIDE_EFFECTS (arg0)
+ && !TREE_SIDE_EFFECTS (arg1))
+ return build_int_cst (type, 0);
+ if ((p0 = getbyterep (arg0, &s0))
+ && (p1 = getbyterep (arg1, &s1))
+ && s2 <= s0
+ && s2 <= s1)
+ return build_cmp_result (type, memcmp (p0, p1, s2));
+ return NULL_TREE;
+
+ case CFN_BUILT_IN_MEMCHR:
+ if (!size_t_cst_p (arg2, &s2))
+ return NULL_TREE;
+ if (s2 == 0
+ && !TREE_SIDE_EFFECTS (arg0)
+ && !TREE_SIDE_EFFECTS (arg1))
+ return build_int_cst (type, 0);
+ if ((p0 = getbyterep (arg0, &s0))
+ && s2 <= s0
+ && target_char_cst_p (arg1, &c))
+ {
+ const char *r = (const char *) memchr (p0, c, s2);
+ if (r == NULL)
+ return build_int_cst (type, 0);
+ return fold_convert (type,
+ fold_build_pointer_plus_hwi (arg0, r - p0));
+ }
+ return NULL_TREE;
+
+ case CFN_WHILE_ULT:
+ {
+ poly_uint64 parg0, parg1;
+ if (poly_int_tree_p (arg0, &parg0) && poly_int_tree_p (arg1, &parg1))
+ return fold_while_ult (type, parg0, parg1);
+ return NULL_TREE;
+ }
+
+ default:
+ return fold_const_call_1 (fn, type, arg0, arg1, arg2);
+ }
}