#include "langhooks.h"
#include "md5.h"
#include "gimple.h"
-#include "tree-flow.h"
+#include "tree-dfa.h"
/* Nonzero if we are folding constants inside an initializer; zero
otherwise. */
HOST_WIDE_INT *,
enum machine_mode *, int *, int *,
tree *, tree *);
-static int all_ones_mask_p (const_tree, int);
static tree sign_bit_p (tree, const_tree);
static int simple_operand_p (const_tree);
static bool simple_operand_p_2 (tree);
return x;
}
\f
-/* If ARG2 divides ARG1 with zero remainder, carries out the division
- of type CODE and returns the quotient.
- Otherwise returns NULL_TREE. */
+/* If ARG2 divides ARG1 with zero remainder, carries out the exact
+ division and returns the quotient. Otherwise returns
+ NULL_TREE. */
tree
-div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
+div_if_zero_remainder (const_tree arg1, const_tree arg2)
{
- double_int quo, rem;
- int uns;
+ wide_int quo;
+ wide_int warg1 = arg1;
+ wide_int warg2 = arg2;
+ signop sgn = TYPE_SIGN (TREE_TYPE (arg1));
+ signop sgn2 = TYPE_SIGN (TREE_TYPE (arg2));
- /* The sign of the division is according to operand two, that
- does the correct thing for POINTER_PLUS_EXPR where we want
- a signed division. */
- uns = TYPE_UNSIGNED (TREE_TYPE (arg2));
-
- quo = tree_to_double_int (arg1).divmod (tree_to_double_int (arg2),
- uns, code, &rem);
+ if (sgn != sgn2)
+ {
+ /* When signedness mismatches, we promote the unsigned value to
+ a signed value. We preserve the value by extending the
+ precision by 1 bit, iff the top bit is set. */
+ if (sgn == UNSIGNED)
+ {
+ if (wi::neg_p (warg1))
+ warg1 = wide_int::from (warg1, warg1.get_precision () + 1, sgn);
+ sgn = SIGNED;
+ }
+ else
+ {
+ if (wi::neg_p (warg2))
+ warg2 = wide_int::from (warg2, warg2.get_precision () + 1, sgn2);
+ }
+ }
- if (rem.is_zero ())
- return build_int_cst_wide (TREE_TYPE (arg1), quo.low, quo.high);
+ if (wi::multiple_of_p (warg1, warg2, sgn, &quo))
+ return wide_int_to_tree (TREE_TYPE (arg1), quo);
return NULL_TREE;
}
bool
may_negate_without_overflow_p (const_tree t)
{
- unsigned HOST_WIDE_INT val;
- unsigned int prec;
tree type;
gcc_assert (TREE_CODE (t) == INTEGER_CST);
if (TYPE_UNSIGNED (type))
return false;
- prec = TYPE_PRECISION (type);
- if (prec > HOST_BITS_PER_WIDE_INT)
- {
- if (TREE_INT_CST_LOW (t) != 0)
- return true;
- prec -= HOST_BITS_PER_WIDE_INT;
- val = TREE_INT_CST_HIGH (t);
- }
- else
- val = TREE_INT_CST_LOW (t);
- if (prec < HOST_BITS_PER_WIDE_INT)
- val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
- return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
+ return !wi::only_sign_bit_p (t);
}
/* Determine whether an expression T can be cheaply negated using
return negate_expr_p (TREE_REALPART (t))
&& negate_expr_p (TREE_IMAGPART (t));
+ case VECTOR_CST:
+ {
+ if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))
+ return true;
+
+ int count = TYPE_VECTOR_SUBPARTS (type), i;
+
+ for (i = 0; i < count; i++)
+ if (!negate_expr_p (VECTOR_CST_ELT (t, i)))
+ return false;
+
+ return true;
+ }
+
case COMPLEX_EXPR:
return negate_expr_p (TREE_OPERAND (t, 0))
&& negate_expr_p (TREE_OPERAND (t, 1));
and actually traps on some architectures. But if overflow is
undefined, we can negate, because - (INT_MIN / 1) is an
overflow. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (t))
- && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
- break;
- return negate_expr_p (TREE_OPERAND (t, 1))
- || negate_expr_p (TREE_OPERAND (t, 0));
+ if (INTEGRAL_TYPE_P (TREE_TYPE (t)))
+ {
+ if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
+ break;
+ /* If overflow is undefined then we have to be careful because
+ we ask whether it's ok to associate the negate with the
+ division which is not ok for example for
+ -((a - b) / c) where (-(a - b)) / c may invoke undefined
+ overflow because of negating INT_MIN. So do not use
+ negate_expr_p here but open-code the two important cases. */
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == NEGATE_EXPR
+ || (TREE_CODE (TREE_OPERAND (t, 0)) == INTEGER_CST
+ && may_negate_without_overflow_p (TREE_OPERAND (t, 0))))
+ return true;
+ }
+ else if (negate_expr_p (TREE_OPERAND (t, 0)))
+ return true;
+ return negate_expr_p (TREE_OPERAND (t, 1));
case NOP_EXPR:
/* Negate -((double)float) as (double)(-float). */
break;
case RSHIFT_EXPR:
- /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
+ /* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (TREE_INT_CST_HIGH (op1) == 0
- && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
- == TREE_INT_CST_LOW (op1))
+ if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
return true;
}
break;
}
break;
+ case VECTOR_CST:
+ {
+ int count = TYPE_VECTOR_SUBPARTS (type), i;
+ tree *elts = XALLOCAVEC (tree, count);
+
+ for (i = 0; i < count; i++)
+ {
+ elts[i] = fold_negate_expr (loc, VECTOR_CST_ELT (t, i));
+ if (elts[i] == NULL_TREE)
+ return NULL_TREE;
+ }
+
+ return build_vector (type, elts);
+ }
+
case COMPLEX_EXPR:
if (negate_expr_p (t))
return fold_build2_loc (loc, COMPLEX_EXPR, type,
return fold_build2_loc (loc, TREE_CODE (t), type,
TREE_OPERAND (t, 0), negate_expr (tem));
}
+ /* If overflow is undefined then we have to be careful because
+ we ask whether it's ok to associate the negate with the
+ division which is not ok for example for
+ -((a - b) / c) where (-(a - b)) / c may invoke undefined
+ overflow because of negating INT_MIN. So do not use
+ negate_expr_p here but open-code the two important cases. */
tem = TREE_OPERAND (t, 0);
- if (negate_expr_p (tem))
- {
- if (INTEGRAL_TYPE_P (type)
- && (TREE_CODE (tem) != INTEGER_CST
- || tree_int_cst_equal (tem, TYPE_MIN_VALUE (type))))
- fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
- return fold_build2_loc (loc, TREE_CODE (t), type,
- negate_expr (tem), TREE_OPERAND (t, 1));
- }
+ if ((INTEGRAL_TYPE_P (type)
+ && (TREE_CODE (tem) == NEGATE_EXPR
+ || (TREE_CODE (tem) == INTEGER_CST
+ && may_negate_without_overflow_p (tem))))
+ || !INTEGRAL_TYPE_P (type))
+ return fold_build2_loc (loc, TREE_CODE (t), type,
+ negate_expr (tem), TREE_OPERAND (t, 1));
}
break;
break;
case RSHIFT_EXPR:
- /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
+ /* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (TREE_INT_CST_HIGH (op1) == 0
- && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
- == TREE_INT_CST_LOW (op1))
+ if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
{
tree ntype = TYPE_UNSIGNED (type)
? signed_type_for (type)
to evaluate CODE at compile-time. */
static tree
-int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
+int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
int overflowable)
{
- double_int op1, op2, res, tmp;
+ wide_int res;
tree t;
tree type = TREE_TYPE (arg1);
- bool uns = TYPE_UNSIGNED (type);
+ signop sign = TYPE_SIGN (type);
bool overflow = false;
- op1 = tree_to_double_int (arg1);
- op2 = tree_to_double_int (arg2);
+ wide_int arg2 = wide_int::from (parg2, TYPE_PRECISION (type),
+ TYPE_SIGN (TREE_TYPE (parg2)));
switch (code)
{
case BIT_IOR_EXPR:
- res = op1 | op2;
+ res = wi::bit_or (arg1, arg2);
break;
case BIT_XOR_EXPR:
- res = op1 ^ op2;
+ res = wi::bit_xor (arg1, arg2);
break;
case BIT_AND_EXPR:
- res = op1 & op2;
+ res = wi::bit_and (arg1, arg2);
break;
case RSHIFT_EXPR:
- res = op1.rshift (op2.to_shwi (), TYPE_PRECISION (type), !uns);
- break;
-
case LSHIFT_EXPR:
- /* It's unclear from the C standard whether shifts can overflow.
- The following code ignores overflow; perhaps a C standard
- interpretation ruling is needed. */
- res = op1.lshift (op2.to_shwi (), TYPE_PRECISION (type), !uns);
+ if (wi::neg_p (arg2))
+ {
+ arg2 = -arg2;
+ if (code == RSHIFT_EXPR)
+ code = LSHIFT_EXPR;
+ else
+ code = RSHIFT_EXPR;
+ }
+
+ if (code == RSHIFT_EXPR)
+ /* It's unclear from the C standard whether shifts can overflow.
+ The following code ignores overflow; perhaps a C standard
+ interpretation ruling is needed. */
+ res = wi::rshift (arg1, arg2, sign,
+ SHIFT_COUNT_TRUNCATED
+ ? GET_MODE_BITSIZE (TYPE_MODE (type)) : 0);
+ else
+ res = wi::lshift (arg1, arg2,
+ SHIFT_COUNT_TRUNCATED
+ ? GET_MODE_BITSIZE (TYPE_MODE (type)) : 0);
break;
-
+
case RROTATE_EXPR:
- res = op1.rrotate (op2.to_shwi (), TYPE_PRECISION (type));
- break;
-
case LROTATE_EXPR:
- res = op1.lrotate (op2.to_shwi (), TYPE_PRECISION (type));
+ if (wi::neg_p (arg2))
+ {
+ arg2 = -arg2;
+ if (code == RROTATE_EXPR)
+ code = LROTATE_EXPR;
+ else
+ code = RROTATE_EXPR;
+ }
+
+ if (code == RROTATE_EXPR)
+ res = wi::rrotate (arg1, arg2);
+ else
+ res = wi::lrotate (arg1, arg2);
break;
case PLUS_EXPR:
- res = op1.add_with_sign (op2, false, &overflow);
+ res = wi::add (arg1, arg2, sign, &overflow);
break;
case MINUS_EXPR:
- res = op1.sub_with_overflow (op2, &overflow);
+ res = wi::sub (arg1, arg2, sign, &overflow);
break;
-
+
case MULT_EXPR:
- res = op1.mul_with_sign (op2, false, &overflow);
+ res = wi::mul (arg1, arg2, sign, &overflow);
break;
case MULT_HIGHPART_EXPR:
- if (TYPE_PRECISION (type) > HOST_BITS_PER_WIDE_INT)
- {
- bool dummy_overflow;
- if (TYPE_PRECISION (type) != 2 * HOST_BITS_PER_WIDE_INT)
- return NULL_TREE;
- op1.wide_mul_with_sign (op2, uns, &res, &dummy_overflow);
- }
- else
- {
- bool dummy_overflow;
- /* MULT_HIGHPART_EXPR can't ever oveflow, as the multiplication
- is performed in twice the precision of arguments. */
- tmp = op1.mul_with_sign (op2, false, &dummy_overflow);
- res = tmp.rshift (TYPE_PRECISION (type),
- 2 * TYPE_PRECISION (type), !uns);
- }
+ res = wi::mul_high (arg1, arg2, sign);
break;
case TRUNC_DIV_EXPR:
- case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
- /* This is a shortcut for a common special case. */
- if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
- && !TREE_OVERFLOW (arg1)
- && !TREE_OVERFLOW (arg2)
- && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
- {
- if (code == CEIL_DIV_EXPR)
- op1.low += op2.low - 1;
+ res = wi::div_trunc (arg1, arg2, sign, &overflow);
+ if (overflow)
+ return NULL_TREE;
+ break;
- res.low = op1.low / op2.low, res.high = 0;
- break;
- }
+ case FLOOR_DIV_EXPR:
+ res = wi::div_floor (arg1, arg2, sign, &overflow);
+ if (overflow)
+ return NULL_TREE;
+ break;
- /* ... fall through ... */
+ case CEIL_DIV_EXPR:
+ res = wi::div_ceil (arg1, arg2, sign, &overflow);
+ if (overflow)
+ return NULL_TREE;
+ break;
case ROUND_DIV_EXPR:
- if (op2.is_zero ())
+ res = wi::div_round (arg1, arg2, sign, &overflow);
+ if (overflow)
return NULL_TREE;
- if (op2.is_one ())
- {
- res = op1;
- break;
- }
- if (op1 == op2 && !op1.is_zero ())
- {
- res = double_int_one;
- break;
- }
- res = op1.divmod_with_overflow (op2, uns, code, &tmp, &overflow);
break;
case TRUNC_MOD_EXPR:
- case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
- /* This is a shortcut for a common special case. */
- if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
- && !TREE_OVERFLOW (arg1)
- && !TREE_OVERFLOW (arg2)
- && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
- {
- if (code == CEIL_MOD_EXPR)
- op1.low += op2.low - 1;
- res.low = op1.low % op2.low, res.high = 0;
- break;
- }
+ res = wi::mod_trunc (arg1, arg2, sign, &overflow);
+ if (overflow)
+ return NULL_TREE;
+ break;
- /* ... fall through ... */
+ case FLOOR_MOD_EXPR:
+ res = wi::mod_floor (arg1, arg2, sign, &overflow);
+ if (overflow)
+ return NULL_TREE;
+ break;
+
+ case CEIL_MOD_EXPR:
+ res = wi::mod_ceil (arg1, arg2, sign, &overflow);
+ if (overflow)
+ return NULL_TREE;
+ break;
case ROUND_MOD_EXPR:
- if (op2.is_zero ())
+ res = wi::mod_round (arg1, arg2, sign, &overflow);
+ if (overflow)
return NULL_TREE;
- tmp = op1.divmod_with_overflow (op2, uns, code, &res, &overflow);
break;
case MIN_EXPR:
- res = op1.min (op2, uns);
+ res = wi::min (arg1, arg2, sign);
break;
case MAX_EXPR:
- res = op1.max (op2, uns);
+ res = wi::max (arg1, arg2, sign);
break;
default:
return NULL_TREE;
}
- t = force_fit_type_double (TREE_TYPE (arg1), res, overflowable,
- (!uns && overflow)
- | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
+ t = force_fit_type (type, res, overflowable,
+ (((sign == SIGNED || overflowable == -1)
+ && overflow)
+ | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (parg2)));
return t;
}
case LSHIFT_EXPR:
case RSHIFT_EXPR:
- f2.data.high = TREE_INT_CST_HIGH (arg2);
- f2.data.low = TREE_INT_CST_LOW (arg2);
- f2.mode = SImode;
+ {
+ wide_int w2 = arg2;
+ f2.data.high = w2.elt (1);
+ f2.data.low = w2.elt (0);
+ f2.mode = SImode;
+ }
break;
default:
if (code == VEC_LSHIFT_EXPR
|| code == VEC_RSHIFT_EXPR)
{
- if (!host_integerp (arg2, 1))
+ if (!tree_fits_uhwi_p (arg2))
return NULL_TREE;
- unsigned HOST_WIDE_INT shiftc = tree_low_cst (arg2, 1);
- unsigned HOST_WIDE_INT outerc = tree_low_cst (TYPE_SIZE (type), 1);
+ unsigned HOST_WIDE_INT shiftc = tree_to_uhwi (arg2);
+ unsigned HOST_WIDE_INT outerc = tree_to_uhwi (TYPE_SIZE (type));
unsigned HOST_WIDE_INT innerc
- = tree_low_cst (TYPE_SIZE (TREE_TYPE (type)), 1);
+ = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)));
if (shiftc >= outerc || (shiftc % innerc) != 0)
return NULL_TREE;
int offset = shiftc / innerc;
static tree
fold_convert_const_int_from_int (tree type, const_tree arg1)
{
- tree t;
-
/* Given an integer constant, make new constant with new type,
- appropriately sign-extended or truncated. */
- t = force_fit_type_double (type, tree_to_double_int (arg1),
- !POINTER_TYPE_P (TREE_TYPE (arg1)),
- (TREE_INT_CST_HIGH (arg1) < 0
- && (TYPE_UNSIGNED (type)
- < TYPE_UNSIGNED (TREE_TYPE (arg1))))
- | TREE_OVERFLOW (arg1));
-
- return t;
+ appropriately sign-extended or truncated. Use widest_int
+ so that any extension is done according ARG1's type. */
+ return force_fit_type (type, wi::to_widest (arg1),
+ !POINTER_TYPE_P (TREE_TYPE (arg1)),
+ TREE_OVERFLOW (arg1));
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
static tree
fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
{
- int overflow = 0;
+ bool overflow = false;
tree t;
/* The following code implements the floating point to integer
C and C++ standards that simply state that the behavior of
FP-to-integer conversion is unspecified upon overflow. */
- double_int val;
+ wide_int val;
REAL_VALUE_TYPE r;
REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
/* If R is NaN, return zero and show we have an overflow. */
if (REAL_VALUE_ISNAN (r))
{
- overflow = 1;
- val = double_int_zero;
+ overflow = true;
+ val = wi::zero (TYPE_PRECISION (type));
}
/* See if R is less than the lower bound or greater than the
REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
if (REAL_VALUES_LESS (r, l))
{
- overflow = 1;
- val = tree_to_double_int (lt);
+ overflow = true;
+ val = lt;
}
}
REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
if (REAL_VALUES_LESS (u, r))
{
- overflow = 1;
- val = tree_to_double_int (ut);
+ overflow = true;
+ val = ut;
}
}
}
if (! overflow)
- real_to_integer2 ((HOST_WIDE_INT *) &val.low, &val.high, &r);
+ val = real_to_integer (&r, &overflow, TYPE_PRECISION (type));
- t = force_fit_type_double (type, val, -1, overflow | TREE_OVERFLOW (arg1));
+ t = force_fit_type (type, val, -1, overflow | TREE_OVERFLOW (arg1));
return t;
}
/* Given a fixed-point constant, make new constant with new type,
appropriately sign-extended or truncated. */
- t = force_fit_type_double (type, temp, -1,
- (temp.is_negative ()
- && (TYPE_UNSIGNED (type)
- < TYPE_UNSIGNED (TREE_TYPE (arg1))))
- | TREE_OVERFLOW (arg1));
-
+ t = force_fit_type (type, temp, -1,
+ (temp.is_negative ()
+ && (TYPE_UNSIGNED (type)
+ < TYPE_UNSIGNED (TREE_TYPE (arg1))))
+ | TREE_OVERFLOW (arg1));
+
return t;
}
FIXED_VALUE_TYPE value;
tree t;
bool overflow_p;
+ double_int di;
+
+ gcc_assert (TREE_INT_CST_NUNITS (arg1) <= 2);
+
+ di.low = TREE_INT_CST_ELT (arg1, 0);
+ if (TREE_INT_CST_NUNITS (arg1) == 1)
+ di.high = (HOST_WIDE_INT)di.low < 0 ? (HOST_WIDE_INT)-1 : 0;
+ else
+ di.high = TREE_INT_CST_ELT (arg1, 1);
overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type),
- TREE_INT_CST (arg1),
+ di,
TYPE_UNSIGNED (TREE_TYPE (arg1)),
TYPE_SATURATING (type));
t = build_fixed (type, value);
}
if (TREE_CODE (arg0) != TREE_CODE (arg1)
- /* This is needed for conversions and for COMPONENT_REF.
- Might as well play it safe and always test this. */
- || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
+ /* NOP_EXPR and CONVERT_EXPR are considered equal. */
+ && !(CONVERT_EXPR_P (arg0) && CONVERT_EXPR_P (arg1)))
+ return 0;
+
+ /* This is needed for conversions and for COMPONENT_REF.
+ Might as well play it safe and always test this. */
+ if (TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
|| TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
|| TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
return 0;
&& operand_equal_p (TYPE_SIZE (TREE_TYPE (arg0)),
TYPE_SIZE (TREE_TYPE (arg1)), flags)))
&& types_compatible_p (TREE_TYPE (arg0), TREE_TYPE (arg1))
- && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (arg0, 1)))
- == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (arg1, 1))))
+ && alias_ptr_types_compatible_p
+ (TREE_TYPE (TREE_OPERAND (arg0, 1)),
+ TREE_TYPE (TREE_OPERAND (arg1, 1)))
&& OP_SAME (0) && OP_SAME (1));
case ARRAY_REF:
case COMPONENT_REF:
/* Handle operand 2 the same as for ARRAY_REF. Operand 0
may be NULL when we're called to compare MEM_EXPRs. */
- if (!OP_SAME_WITH_NULL (0))
+ if (!OP_SAME_WITH_NULL (0)
+ || !OP_SAME (1))
return 0;
flags &= ~OEP_CONSTANT_ADDRESS_OF;
- return OP_SAME (1) && OP_SAME_WITH_NULL (2);
+ return OP_SAME_WITH_NULL (2);
case BIT_FIELD_REF:
if (!OP_SAME (0))
tree size = TYPE_SIZE (TREE_TYPE (inner));
if ((INTEGRAL_TYPE_P (TREE_TYPE (inner))
|| POINTER_TYPE_P (TREE_TYPE (inner)))
- && host_integerp (size, 0)
- && tree_low_cst (size, 0) == bitsize)
+ && tree_fits_shwi_p (size)
+ && tree_to_shwi (size) == bitsize)
return fold_convert_loc (loc, type, inner);
}
tree mask;
tree offset;
- /* In the strict volatile bitfields case, doing code changes here may prevent
- other optimizations, in particular in a SLOW_BYTE_ACCESS setting. */
- if (flag_strict_volatile_bitfields > 0)
- return 0;
-
/* Get all the information about the extractions being done. If the bit size
if the same as the size of the underlying object, we aren't doing an
extraction at all and so can do nothing. We also don't want to
linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
&lunsignedp, &lvolatilep, false);
if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
- || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
+ || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR || lvolatilep)
return 0;
if (!const_p)
if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
|| lunsignedp != runsignedp || offset != 0
- || TREE_CODE (rinner) == PLACEHOLDER_EXPR)
+ || TREE_CODE (rinner) == PLACEHOLDER_EXPR || rvolatilep)
return 0;
}
/* See if we can find a mode to refer to this field. We should be able to,
but fail if we can't. */
- if (lvolatilep
- && GET_MODE_BITSIZE (lmode) > 0
- && flag_strict_volatile_bitfields > 0)
- nmode = lmode;
- else
- nmode = get_best_mode (lbitsize, lbitpos, 0, 0,
- const_p ? TYPE_ALIGN (TREE_TYPE (linner))
- : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
- TYPE_ALIGN (TREE_TYPE (rinner))),
- word_mode, lvolatilep || rvolatilep);
+ nmode = get_best_mode (lbitsize, lbitpos, 0, 0,
+ const_p ? TYPE_ALIGN (TREE_TYPE (linner))
+ : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
+ TYPE_ALIGN (TREE_TYPE (rinner))),
+ word_mode, false);
if (nmode == VOIDmode)
return 0;
appropriate number of bits and mask it with the computed mask
(in case this was a signed field). If we changed it, make a new one. */
lhs = make_bit_field_ref (loc, linner, unsigned_type, nbitsize, nbitpos, 1);
- if (lvolatilep)
- {
- TREE_SIDE_EFFECTS (lhs) = 1;
- TREE_THIS_VOLATILE (lhs) = 1;
- }
rhs = const_binop (BIT_AND_EXPR,
const_binop (LSHIFT_EXPR,
}
/* Return nonzero if MASK represents a mask of SIZE ones in the low-order
- bit positions. */
+ bit positions and MASK is SIGNED. */
static int
-all_ones_mask_p (const_tree mask, int size)
+all_ones_mask_p (const_tree mask, unsigned int size)
{
tree type = TREE_TYPE (mask);
unsigned int precision = TYPE_PRECISION (type);
- tree tmask;
- tmask = build_int_cst_type (signed_type_for (type), -1);
+ /* If this function returns true when the type of the mask is
+ UNSIGNED, then there will be errors. In particular see
+ gcc.c-torture/execute/990326-1.c. There does not appear to be
+ any documentation paper trail as to why this is so. But the pre
+ wide-int worked with that restriction and it has been preserved
+ here. */
+ if (size > precision || TYPE_SIGN (type) == UNSIGNED)
+ return false;
- return
- tree_int_cst_equal (mask,
- const_binop (RSHIFT_EXPR,
- const_binop (LSHIFT_EXPR, tmask,
- size_int (precision - size)),
- size_int (precision - size)));
+ return wi::mask (size, false, precision) == mask;
}
/* Subroutine for fold: determine if VAL is the INTEGER_CONST that
static tree
sign_bit_p (tree exp, const_tree val)
{
- unsigned HOST_WIDE_INT mask_lo, lo;
- HOST_WIDE_INT mask_hi, hi;
int width;
tree t;
return NULL_TREE;
width = TYPE_PRECISION (t);
- if (width > HOST_BITS_PER_WIDE_INT)
- {
- hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1);
- lo = 0;
-
- mask_hi = ((unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_DOUBLE_INT - width));
- mask_lo = -1;
- }
- else
- {
- hi = 0;
- lo = (unsigned HOST_WIDE_INT) 1 << (width - 1);
-
- mask_hi = 0;
- mask_lo = ((unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_WIDE_INT - width));
- }
-
- /* We mask off those bits beyond TREE_TYPE (exp) so that we can
- treat VAL as if it were unsigned. */
- if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi
- && (TREE_INT_CST_LOW (val) & mask_lo) == lo)
+ if (wi::only_sign_bit_p (val, width))
return exp;
/* Handle extension from a narrower type. */
#pragma weak, etc). */
&& ! TREE_PUBLIC (exp)
&& ! DECL_EXTERNAL (exp)
+ /* Weakrefs are not safe to be read, since they can be NULL.
+ They are !TREE_PUBLIC && !DECL_EXTERNAL but still
+ have DECL_WEAK flag set. */
+ && (! VAR_OR_FUNCTION_DECL_P (exp) || ! DECL_WEAK (exp))
/* Loading a static variable is unduly expensive, but global
registers aren't expensive. */
&& (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
{
in_p = ! in_p;
high = range_binop (MINUS_EXPR, NULL_TREE, low, 0,
- integer_one_node, 0);
+ build_int_cst (TREE_TYPE (low), 1), 0);
low = build_int_cst (arg0_type, 0);
}
}
if (n_low && n_high && tree_int_cst_lt (n_high, n_low))
{
low = range_binop (PLUS_EXPR, arg0_type, n_high, 0,
- integer_one_node, 0);
+ build_int_cst (TREE_TYPE (n_high), 1), 0);
high = range_binop (MINUS_EXPR, arg0_type, n_low, 0,
- integer_one_node, 0);
+ build_int_cst (TREE_TYPE (n_low), 1), 0);
/* If the range is of the form +/- [ x+1, x ], we won't
be able to normalize it. But then, it represents the
}
if (low == 0 && high == 0)
- return build_int_cst (type, 1);
+ return omit_one_operand_loc (loc, type, build_int_cst (type, 1), exp);
if (low == 0)
return fold_build2_loc (loc, LE_EXPR, type, exp,
/* Optimize (c>=1) && (c<=127) into (signed char)c > 0. */
if (integer_onep (low) && TREE_CODE (high) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT lo;
- HOST_WIDE_INT hi;
- int prec;
-
- prec = TYPE_PRECISION (etype);
- if (prec <= HOST_BITS_PER_WIDE_INT)
- {
- hi = 0;
- lo = ((unsigned HOST_WIDE_INT) 1 << (prec - 1)) - 1;
- }
- else
- {
- hi = ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)) - 1;
- lo = (unsigned HOST_WIDE_INT) -1;
- }
+ int prec = TYPE_PRECISION (etype);
+ wide_int osb = wi::set_bit_in_zero (prec - 1, prec) - 1;
- if (TREE_INT_CST_HIGH (high) == hi && TREE_INT_CST_LOW (high) == lo)
+ if (osb == high)
{
if (TYPE_UNSIGNED (etype))
{
utype = unsigned_type_for (etype);
maxv = fold_convert_loc (loc, utype, TYPE_MAX_VALUE (etype));
maxv = range_binop (PLUS_EXPR, NULL_TREE, maxv, 1,
- integer_one_node, 1);
+ build_int_cst (TREE_TYPE (maxv), 1), 1);
minv = fold_convert_loc (loc, utype, TYPE_MIN_VALUE (etype));
if (integer_zerop (range_binop (NE_EXPR, integer_type_node,
&& operand_equal_p (val, TYPE_MIN_VALUE (type), 0))
return 0;
else
- return range_binop (MINUS_EXPR, NULL_TREE, val, 0, integer_one_node, 0);
+ return range_binop (MINUS_EXPR, NULL_TREE, val, 0,
+ build_int_cst (TREE_TYPE (val), 1), 0);
}
/* Return the successor of VAL in its type, handling the infinite case. */
&& operand_equal_p (val, TYPE_MAX_VALUE (type), 0))
return 0;
else
- return range_binop (PLUS_EXPR, NULL_TREE, val, 0, integer_one_node, 0);
+ return range_binop (PLUS_EXPR, NULL_TREE, val, 0,
+ build_int_cst (TREE_TYPE (val), 1), 0);
}
/* Given two ranges, see if we can merge them into one. Return 1 if we
if (TYPE_UNSIGNED (TREE_TYPE (high1))
&& integer_zerop (range_binop (PLUS_EXPR, NULL_TREE,
high1, 1,
- integer_one_node, 1)))
+ build_int_cst (TREE_TYPE (high1), 1),
+ 1)))
high1 = 0;
break;
default:
int in0_p, in1_p, in_p;
tree low0, low1, low, high0, high1, high;
bool strict_overflow_p = false;
- tree lhs = make_range (op0, &in0_p, &low0, &high0, &strict_overflow_p);
- tree rhs = make_range (op1, &in1_p, &low1, &high1, &strict_overflow_p);
- tree tem;
+ tree tem, lhs, rhs;
const char * const warnmsg = G_("assuming signed overflow does not occur "
"when simplifying range test");
+ if (!INTEGRAL_TYPE_P (type))
+ return 0;
+
+ lhs = make_range (op0, &in0_p, &low0, &high0, &strict_overflow_p);
+ rhs = make_range (op1, &in1_p, &low1, &high1, &strict_overflow_p);
+
/* If this is an OR operation, invert both sides; we will invert
again at the end. */
if (or_op)
/* We work by getting just the sign bit into the low-order bit, then
into the high-order bit, then sign-extend. We then XOR that value
with C. */
- temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1));
- temp = const_binop (BIT_AND_EXPR, temp, size_int (1));
+ temp = build_int_cst (TREE_TYPE (c), wi::extract_uhwi (c, p - 1, 1));
/* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that
&& (tcode == RSHIFT_EXPR || TYPE_UNSIGNED (TREE_TYPE (op0)))
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- && TYPE_PRECISION (TREE_TYPE (size_one_node)) > TREE_INT_CST_LOW (op1)
- && TREE_INT_CST_HIGH (op1) == 0
+ && wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
&& 0 != (t1 = fold_convert (ctype,
const_binop (LSHIFT_EXPR,
size_one_node,
assuming no overflow. */
if (tcode == code)
{
- double_int mul;
bool overflow_p;
- unsigned prec = TYPE_PRECISION (ctype);
- bool uns = TYPE_UNSIGNED (ctype);
- double_int diop1 = tree_to_double_int (op1).ext (prec, uns);
- double_int dic = tree_to_double_int (c).ext (prec, uns);
- mul = diop1.mul_with_sign (dic, false, &overflow_p);
- overflow_p = ((!uns && overflow_p)
- | TREE_OVERFLOW (c) | TREE_OVERFLOW (op1));
- if (!double_int_fits_to_tree_p (ctype, mul)
- && ((uns && tcode != MULT_EXPR) || !uns))
- overflow_p = 1;
+ signop sign = TYPE_SIGN (ctype);
+ wide_int mul = wi::mul_full (op1, c, sign);
+ overflow_p = TREE_OVERFLOW (c) | TREE_OVERFLOW (op1);
+ if (!wi::fits_to_tree_p (mul, ctype)
+ && ((sign == UNSIGNED && tcode != MULT_EXPR) || sign == SIGNED))
+ overflow_p = true;
if (!overflow_p)
return fold_build2 (tcode, ctype, fold_convert (ctype, op0),
- double_int_to_tree (ctype, mul));
+ wide_int_to_tree (ctype, mul));
}
/* If these operations "cancel" each other, we have the main
if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
return true;
+ /* In a vector or complex, we would need to check the sign of all zeros. */
+ if (TREE_CODE (addend) != REAL_CST)
+ return false;
+
/* Treat x + -0 as x - 0 and x - -0 as x + 0. */
- if (TREE_CODE (addend) == REAL_CST
- && REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (addend)))
+ if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (addend)))
negate = !negate;
/* The mode has signed zeros, and we have to honor their sign.
tree prod, tmp, hi, lo;
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
- double_int val;
- bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
- bool neg_overflow;
+ signop sign = TYPE_SIGN (TREE_TYPE (arg0));
+ bool neg_overflow = false;
bool overflow;
/* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, arg01, arg1); */
- val = TREE_INT_CST (arg01)
- .mul_with_sign (TREE_INT_CST (arg1), unsigned_p, &overflow);
- prod = force_fit_type_double (TREE_TYPE (arg00), val, -1, overflow);
+ wide_int val = wi::mul (arg01, arg1, sign, &overflow);
+ prod = force_fit_type (TREE_TYPE (arg00), val, -1, overflow);
neg_overflow = false;
- if (unsigned_p)
+ if (sign == UNSIGNED)
{
tmp = int_const_binop (MINUS_EXPR, arg01,
build_int_cst (TREE_TYPE (arg01), 1));
lo = prod;
/* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp). */
- val = TREE_INT_CST (prod)
- .add_with_sign (TREE_INT_CST (tmp), unsigned_p, &overflow);
- hi = force_fit_type_double (TREE_TYPE (arg00), val,
- -1, overflow | TREE_OVERFLOW (prod));
+ val = wi::add (prod, tmp, sign, &overflow);
+ hi = force_fit_type (TREE_TYPE (arg00), val,
+ -1, overflow | TREE_OVERFLOW (prod));
}
else if (tree_int_cst_sgn (arg01) >= 0)
{
not overflow, adjust BITNUM and INNER. */
if (TREE_CODE (inner) == RSHIFT_EXPR
&& TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
- && TREE_INT_CST_HIGH (TREE_OPERAND (inner, 1)) == 0
- && bitnum < TYPE_PRECISION (type)
- && 0 > compare_tree_int (TREE_OPERAND (inner, 1),
- bitnum - TYPE_PRECISION (type)))
+ && wi::ltu_p (wi::to_widest (TREE_OPERAND (inner, 1)) + bitnum,
+ TYPE_PRECISION (type)))
{
- bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1));
+ bitnum += tree_to_hwi (TREE_OPERAND (inner, 1));
inner = TREE_OPERAND (inner, 0);
}
return NULL_TREE;
if (TREE_CODE (arg1) == INTEGER_CST)
- arg1 = force_fit_type_double (inner_type, tree_to_double_int (arg1),
- 0, TREE_OVERFLOW (arg1));
+ arg1 = force_fit_type (inner_type, wi::to_widest (arg1), 0,
+ TREE_OVERFLOW (arg1));
else
arg1 = fold_convert_loc (loc, inner_type, arg1);
else
{
/* Try if delta is a multiple of step. */
- tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step);
+ tree tmp = div_if_zero_remainder (op1, step);
if (! tmp)
goto cont;
delta = tmp;
else
{
/* Try if delta is a multiple of step. */
- tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step);
+ tree tmp = div_if_zero_remainder (op1, step);
if (! tmp)
continue;
delta = tmp;
arg10 = build_one_cst (type);
/* As we canonicalize A - 2 to A + -2 get rid of that sign for
the purpose of this canonicalization. */
- if (TREE_INT_CST_HIGH (arg1) == -1
+ if (TYPE_SIGN (TREE_TYPE (arg1)) == SIGNED
+ && wi::neg_p (arg1)
&& negate_expr_p (arg1)
&& code == PLUS_EXPR)
{
/* No identical multiplicands; see if we can find a common
power-of-two factor in non-power-of-two multiplies. This
can help in multi-dimensional array access. */
- else if (host_integerp (arg01, 0)
- && host_integerp (arg11, 0))
+ else if (tree_fits_shwi_p (arg01)
+ && tree_fits_shwi_p (arg11))
{
HOST_WIDE_INT int01, int11, tmp;
bool swap = false;
tree maybe_same;
- int01 = TREE_INT_CST_LOW (arg01);
- int11 = TREE_INT_CST_LOW (arg11);
+ int01 = tree_to_shwi (arg01);
+ int11 = tree_to_shwi (arg11);
/* Move min of absolute values to int11. */
if (absu_hwi (int01) < absu_hwi (int11))
for (byte = 0; byte < total_bytes; byte++)
{
int bitpos = byte * BITS_PER_UNIT;
- if (bitpos < HOST_BITS_PER_WIDE_INT)
- value = (unsigned char) (TREE_INT_CST_LOW (expr) >> bitpos);
- else
- value = (unsigned char) (TREE_INT_CST_HIGH (expr)
- >> (bitpos - HOST_BITS_PER_WIDE_INT));
+ /* Extend EXPR according to TYPE_SIGN if the precision isn't a whole
+ number of bytes. */
+ value = wi::extract_uhwi (wi::to_widest (expr), bitpos, BITS_PER_UNIT);
if (total_bytes > UNITS_PER_WORD)
{
if (TREE_CODE (type) != ARRAY_TYPE
|| TREE_CODE (TREE_TYPE (type)) != INTEGER_TYPE
|| GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type))) != BITS_PER_UNIT
- || !host_integerp (TYPE_SIZE_UNIT (type), 0))
+ || !tree_fits_shwi_p (TYPE_SIZE_UNIT (type)))
return 0;
- total_bytes = tree_low_cst (TYPE_SIZE_UNIT (type), 0);
+ total_bytes = tree_to_shwi (TYPE_SIZE_UNIT (type));
if (total_bytes > len)
return 0;
if (TREE_STRING_LENGTH (expr) < total_bytes)
native_interpret_int (tree type, const unsigned char *ptr, int len)
{
int total_bytes = GET_MODE_SIZE (TYPE_MODE (type));
- double_int result;
if (total_bytes > len
|| total_bytes * BITS_PER_UNIT > HOST_BITS_PER_DOUBLE_INT)
return NULL_TREE;
- result = double_int::from_buffer (ptr, total_bytes);
+ wide_int result = wi::from_buffer (ptr, total_bytes);
- return double_int_to_tree (type, result);
+ return wide_int_to_tree (type, result);
}
change = 1;
else if (TYPE_PRECISION (TREE_TYPE (and1))
<= HOST_BITS_PER_WIDE_INT
- && host_integerp (and1, 1))
+ && tree_fits_uhwi_p (and1))
{
unsigned HOST_WIDE_INT cst;
- cst = tree_low_cst (and1, 1);
- cst &= (HOST_WIDE_INT) -1
+ cst = tree_to_uhwi (and1);
+ cst &= HOST_WIDE_INT_M1U
<< (TYPE_PRECISION (TREE_TYPE (and1)) - 1);
change = (cst == 0);
#ifdef LOAD_EXTEND_OP
}
if (change)
{
- tem = force_fit_type_double (type, tree_to_double_int (and1),
- 0, TREE_OVERFLOW (and1));
+ tem = force_fit_type (type, wi::to_widest (and1), 0,
+ TREE_OVERFLOW (and1));
return fold_build2_loc (loc, BIT_AND_EXPR, type,
- fold_convert_loc (loc, type, and0), tem);
+ fold_convert_loc (loc, type, and0), tem);
}
}
static bool
pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos)
{
- double_int di_offset, total;
-
if (!POINTER_TYPE_P (TREE_TYPE (base)))
return true;
if (bitpos < 0)
return true;
+ wide_int wi_offset;
+ int precision = TYPE_PRECISION (TREE_TYPE (base));
if (offset == NULL_TREE)
- di_offset = double_int_zero;
+ wi_offset = wi::zero (precision);
else if (TREE_CODE (offset) != INTEGER_CST || TREE_OVERFLOW (offset))
return true;
else
- di_offset = TREE_INT_CST (offset);
+ wi_offset = offset;
bool overflow;
- double_int units = double_int::from_uhwi (bitpos / BITS_PER_UNIT);
- total = di_offset.add_with_sign (units, true, &overflow);
+ wide_int units = wi::shwi (bitpos / BITS_PER_UNIT, precision);
+ wide_int total = wi::add (wi_offset, units, UNSIGNED, &overflow);
if (overflow)
return true;
- if (total.high != 0)
+ if (!wi::fits_uhwi_p (total))
return true;
HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (TREE_TYPE (base)));
size = base_size;
}
- return total.low > (unsigned HOST_WIDE_INT) size;
+ return total.to_uhwi () > (unsigned HOST_WIDE_INT) size;
}
/* Subroutine of fold_binary. This routine performs all of the
indirect_base0 = true;
}
offset0 = TREE_OPERAND (arg0, 1);
- if (host_integerp (offset0, 0))
+ if (tree_fits_shwi_p (offset0))
{
HOST_WIDE_INT off = size_low_cst (offset0);
if ((HOST_WIDE_INT) (((unsigned HOST_WIDE_INT) off)
indirect_base1 = true;
}
offset1 = TREE_OPERAND (arg1, 1);
- if (host_integerp (offset1, 0))
+ if (tree_fits_shwi_p (offset1))
{
HOST_WIDE_INT off = size_low_cst (offset1);
if ((HOST_WIDE_INT) (((unsigned HOST_WIDE_INT) off)
inner_code = TREE_CODE (op1);
if (inner_code == INTEGER_CST)
{
- *residue += TREE_INT_CST_LOW (op1);
+ *residue += tree_to_hwi (op1);
return modulus;
}
else if (inner_code == MULT_EXPR)
unsigned HOST_WIDE_INT align;
/* Compute the greatest power-of-2 divisor of op1. */
- align = TREE_INT_CST_LOW (op1);
+ align = tree_to_hwi (op1);
align &= -align;
/* If align is non-zero and less than *modulus, replace
}
}
+/* Mask out the tz least significant bits of X of type TYPE where
+ tz is the number of trailing zeroes in Y. */
+static wide_int
+mask_with_tz (tree type, wide_int x, wide_int y)
+{
+ int tz = wi::ctz (y);
+ if (tz > 0)
+ return wi::mask (tz, true, TYPE_PRECISION (type)) & x;
+ return x;
+}
+
/* Fold a binary expression of code CODE and type TYPE with operands
OP0 and OP1. LOC is the location of the resulting expression.
Return the folded expression if folding is successful. Otherwise,
fold_convert_loc (loc, type,
TREE_OPERAND (arg0, 0)));
- if (INTEGRAL_TYPE_P (type))
+ if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
{
/* Convert ~A + 1 to -A. */
if (TREE_CODE (arg0) == BIT_NOT_EXPR
STRIP_NOPS (tem);
if (operand_equal_p (tem, arg1, 0))
{
- t1 = build_minus_one_cst (type);
+ t1 = build_all_ones_cst (type);
return omit_one_operand_loc (loc, type, t1, arg1);
}
}
STRIP_NOPS (tem);
if (operand_equal_p (arg0, tem, 0))
{
- t1 = build_minus_one_cst (type);
+ t1 = build_all_ones_cst (type);
return omit_one_operand_loc (loc, type, t1, arg0);
}
}
code11 = TREE_CODE (tree11);
if (code01 == INTEGER_CST
&& code11 == INTEGER_CST
- && TREE_INT_CST_HIGH (tree01) == 0
- && TREE_INT_CST_HIGH (tree11) == 0
- && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
+ && (wi::to_widest (tree01) + wi::to_widest (tree11)
== element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
{
tem = build2_loc (loc, LROTATE_EXPR,
TREE_OPERAND (arg1, 0)));
/* (-A) - B -> (-B) - A where B is easily negated and we can swap. */
if (TREE_CODE (arg0) == NEGATE_EXPR
- && (FLOAT_TYPE_P (type)
- || INTEGRAL_TYPE_P (type))
&& negate_expr_p (arg1)
&& reorder_operands_p (arg0, arg1))
return fold_build2_loc (loc, MINUS_EXPR, type,
fold_convert_loc (loc, type,
TREE_OPERAND (arg0, 0)));
/* Convert -A - 1 to ~A. */
- if (INTEGRAL_TYPE_P (type)
+ if (TREE_CODE (type) != COMPLEX_TYPE
&& TREE_CODE (arg0) == NEGATE_EXPR
&& integer_onep (arg1)
&& !TYPE_OVERFLOW_TRAPS (type))
TREE_OPERAND (arg0, 0)));
/* Convert -1 - A to ~A. */
- if (INTEGRAL_TYPE_P (type)
+ if (TREE_CODE (type) != COMPLEX_TYPE
&& integer_all_onesp (arg0))
return fold_build1_loc (loc, BIT_NOT_EXPR, type, op1);
- /* X - (X / CST) * CST is X % CST. */
- if (INTEGRAL_TYPE_P (type)
+ /* X - (X / Y) * Y is X % Y. */
+ if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
&& TREE_CODE (arg1) == MULT_EXPR
&& TREE_CODE (TREE_OPERAND (arg1, 0)) == TRUNC_DIV_EXPR
&& operand_equal_p (arg0,
fold_build2_loc (loc, MULT_EXPR, type,
build_int_cst (type, 2) , arg1));
+ /* ((T) (X /[ex] C)) * C cancels out if the conversion is
+ sign-changing only. */
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (arg0) == EXACT_DIV_EXPR
+ && operand_equal_p (arg1, TREE_OPERAND (arg0, 1), 0))
+ return fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
+
strict_overflow_p = false;
if (TREE_CODE (arg1) == INTEGER_CST
&& 0 != (tem = extract_muldiv (op0, arg1, code, NULL_TREE,
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- double_int c1, c2, c3, msk;
int width = TYPE_PRECISION (type), w;
- c1 = tree_to_double_int (TREE_OPERAND (arg0, 1));
- c2 = tree_to_double_int (arg1);
+ bool try_simplify = true;
+ wide_int c1 = TREE_OPERAND (arg0, 1);
+ wide_int c2 = arg1;
/* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */
if ((c1 & c2) == c1)
return omit_one_operand_loc (loc, type, arg1,
TREE_OPERAND (arg0, 0));
- msk = double_int::mask (width);
+ wide_int msk = wi::mask (width, false,
+ TYPE_PRECISION (TREE_TYPE (arg1)));
/* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
- if (msk.and_not (c1 | c2).is_zero ())
+ if (msk.and_not (c1 | c2) == 0)
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
TREE_OPERAND (arg0, 0), arg1);
mode which allows further optimizations. */
c1 &= msk;
c2 &= msk;
- c3 = c1.and_not (c2);
- for (w = BITS_PER_UNIT;
- w <= width && w <= HOST_BITS_PER_WIDE_INT;
- w <<= 1)
+ wide_int c3 = c1.and_not (c2);
+ for (w = BITS_PER_UNIT; w <= width; w <<= 1)
{
- unsigned HOST_WIDE_INT mask
- = (unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - w);
- if (((c1.low | c2.low) & mask) == mask
- && (c1.low & ~mask) == 0 && c1.high == 0)
+ wide_int mask = wi::mask (width - w, false,
+ TYPE_PRECISION (type));
+ if (((c1 | c2) & mask) == mask && c1.and_not (mask) == 0)
{
- c3 = double_int::from_uhwi (mask);
+ c3 = mask;
break;
}
}
- if (c3 != c1)
+
+ /* If X is a tree of the form (Y * K1) & K2, this might conflict
+ with that optimization from the BIT_AND_EXPR optimizations.
+ This could end up in an infinite recursion. */
+ if (TREE_CODE (TREE_OPERAND (arg0, 0)) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
+ == INTEGER_CST)
+ {
+ tree t = TREE_OPERAND (TREE_OPERAND (arg0, 0), 1);
+ wide_int masked = mask_with_tz (type, c3, t);
+
+ try_simplify = (masked != c1);
+ }
+
+ if (try_simplify && c3 != c1)
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
fold_build2_loc (loc, BIT_AND_EXPR, type,
TREE_OPERAND (arg0, 0),
- double_int_to_tree (type,
- c3)),
+ wide_int_to_tree (type,
+ c3)),
arg1);
}
multiple of 1 << CST. */
if (TREE_CODE (arg1) == INTEGER_CST)
{
- double_int cst1 = tree_to_double_int (arg1);
- double_int ncst1 = (-cst1).ext(TYPE_PRECISION (TREE_TYPE (arg1)),
- TYPE_UNSIGNED (TREE_TYPE (arg1)));
+ wide_int cst1 = arg1;
+ wide_int ncst1 = -cst1;
if ((cst1 & ncst1) == ncst1
&& multiple_of_p (type, arg0,
- double_int_to_tree (TREE_TYPE (arg1), ncst1)))
+ wide_int_to_tree (TREE_TYPE (arg1), ncst1)))
return fold_convert_loc (loc, type, arg0);
}
&& TREE_CODE (arg0) == MULT_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- int arg1tz
- = tree_to_double_int (TREE_OPERAND (arg0, 1)).trailing_zeros ();
- if (arg1tz > 0)
- {
- double_int arg1mask, masked;
- arg1mask = ~double_int::mask (arg1tz);
- arg1mask = arg1mask.ext (TYPE_PRECISION (type),
- TYPE_UNSIGNED (type));
- masked = arg1mask & tree_to_double_int (arg1);
- if (masked.is_zero ())
- return omit_two_operands_loc (loc, type, build_zero_cst (type),
- arg0, arg1);
- else if (masked != tree_to_double_int (arg1))
- return fold_build2_loc (loc, code, type, op0,
- double_int_to_tree (type, masked));
- }
+ wide_int masked = mask_with_tz (type, arg1, TREE_OPERAND (arg0, 1));
+
+ if (masked == 0)
+ return omit_two_operands_loc (loc, type, build_zero_cst (type),
+ arg0, arg1);
+ else if (masked != arg1)
+ return fold_build2_loc (loc, code, type, op0,
+ wide_int_to_tree (type, masked));
}
/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
and for - instead of + (or unary - instead of +)
and/or ^ instead of |.
If B is constant and (B & M) == 0, fold into A & M. */
- if (host_integerp (arg1, 1))
+ if (TREE_CODE (arg1) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT cst1 = tree_low_cst (arg1, 1);
- if (~cst1 && (cst1 & (cst1 + 1)) == 0
+ wide_int cst1 = arg1;
+ if ((~cst1 != 0) && (cst1 & (cst1 + 1)) == 0
&& INTEGRAL_TYPE_P (TREE_TYPE (arg0))
&& (TREE_CODE (arg0) == PLUS_EXPR
|| TREE_CODE (arg0) == MINUS_EXPR
{
tree pmop[2];
int which = 0;
- unsigned HOST_WIDE_INT cst0;
+ wide_int cst0;
/* Now we know that arg0 is (C + D) or (C - D) or
-C and arg1 (M) is == (1LL << cst) - 1.
which = 1;
}
- if (!host_integerp (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1)
- || (tree_low_cst (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1)
- & cst1) != cst1)
+ if ((wi::max_value (TREE_TYPE (arg0)) & cst1) != cst1)
which = -1;
for (; which >= 0; which--)
if (TREE_CODE (TREE_OPERAND (pmop[which], 1))
!= INTEGER_CST)
break;
- /* tree_low_cst not used, because we don't care about
- the upper bits. */
- cst0 = TREE_INT_CST_LOW (TREE_OPERAND (pmop[which], 1));
+ cst0 = TREE_OPERAND (pmop[which], 1);
cst0 &= cst1;
if (TREE_CODE (pmop[which]) == BIT_AND_EXPR)
{
omitted (assumed 0). */
if ((TREE_CODE (arg0) == PLUS_EXPR
|| (TREE_CODE (arg0) == MINUS_EXPR && which == 0))
- && (TREE_INT_CST_LOW (pmop[which]) & cst1) == 0)
+ && (cst1 & pmop[which]) == 0)
pmop[which] = NULL;
break;
default:
}
}
}
-
+
t1 = distribute_bit_expr (loc, code, type, arg0, arg1);
if (t1 != NULL_TREE)
return t1;
{
prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
- if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
- && (~TREE_INT_CST_LOW (arg1)
- & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
+ wide_int mask = wide_int::from (arg1, prec, UNSIGNED);
+ if (mask == -1)
return
fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
}
/* If arg0 is derived from the address of an object or function, we may
be able to fold this expression using the object or function's
alignment. */
- if (POINTER_TYPE_P (TREE_TYPE (arg0)) && host_integerp (arg1, 1))
+ if (POINTER_TYPE_P (TREE_TYPE (arg0)) && tree_fits_uhwi_p (arg1))
{
unsigned HOST_WIDE_INT modulus, residue;
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg1);
+ unsigned HOST_WIDE_INT low = tree_to_uhwi (arg1);
modulus = get_pointer_modulus_and_residue (arg0, &residue,
integer_onep (arg1));
if the new mask might be further optimized. */
if ((TREE_CODE (arg0) == LSHIFT_EXPR
|| TREE_CODE (arg0) == RSHIFT_EXPR)
- && host_integerp (TREE_OPERAND (arg0, 1), 1)
- && host_integerp (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1)))
- && tree_low_cst (TREE_OPERAND (arg0, 1), 1)
+ && tree_fits_uhwi_p (TREE_OPERAND (arg0, 1))
+ && tree_fits_hwi_p (arg1)
+ && tree_to_uhwi (TREE_OPERAND (arg0, 1))
< TYPE_PRECISION (TREE_TYPE (arg0))
&& TYPE_PRECISION (TREE_TYPE (arg0)) <= HOST_BITS_PER_WIDE_INT
- && tree_low_cst (TREE_OPERAND (arg0, 1), 1) > 0)
+ && tree_to_uhwi (TREE_OPERAND (arg0, 1)) > 0)
{
- unsigned int shiftc = tree_low_cst (TREE_OPERAND (arg0, 1), 1);
+ unsigned int shiftc = tree_to_uhwi (TREE_OPERAND (arg0, 1));
unsigned HOST_WIDE_INT mask
- = tree_low_cst (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1)));
+ = tree_to_hwi (arg1, TYPE_SIGN (TREE_TYPE (arg1)));
unsigned HOST_WIDE_INT newmask, zerobits = 0;
tree shift_type = TREE_TYPE (arg0);
tree sum = fold_binary_loc (loc, PLUS_EXPR, TREE_TYPE (arg1),
arg1, TREE_OPERAND (arg0, 1));
if (sum && integer_zerop (sum)) {
- unsigned long pow2;
-
- if (TREE_INT_CST_LOW (arg1))
- pow2 = exact_log2 (TREE_INT_CST_LOW (arg1));
- else
- pow2 = exact_log2 (TREE_INT_CST_HIGH (arg1))
- + HOST_BITS_PER_WIDE_INT;
-
+ tree pow2 = build_int_cst (integer_type_node,
+ wi::exact_log2 (arg1));
return fold_build2_loc (loc, RSHIFT_EXPR, type,
- TREE_OPERAND (arg0, 0),
- build_int_cst (integer_type_node, pow2));
+ TREE_OPERAND (arg0, 0), pow2);
}
}
if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0)
{
tree sh_cnt = TREE_OPERAND (arg1, 1);
- unsigned long pow2;
-
- if (TREE_INT_CST_LOW (sval))
- pow2 = exact_log2 (TREE_INT_CST_LOW (sval));
- else
- pow2 = exact_log2 (TREE_INT_CST_HIGH (sval))
- + HOST_BITS_PER_WIDE_INT;
+ tree pow2 = build_int_cst (TREE_TYPE (sh_cnt),
+ wi::exact_log2 (sval));
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not "
WARN_STRICT_OVERFLOW_MISC);
sh_cnt = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (sh_cnt),
- sh_cnt,
- build_int_cst (TREE_TYPE (sh_cnt),
- pow2));
+ sh_cnt, pow2);
return fold_build2_loc (loc, RSHIFT_EXPR, type,
- fold_convert_loc (loc, type, arg0), sh_cnt);
+ fold_convert_loc (loc, type, arg0), sh_cnt);
}
}
/* X / -1 is -X. */
if (!TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
- && TREE_INT_CST_LOW (arg1) == (unsigned HOST_WIDE_INT) -1
- && TREE_INT_CST_HIGH (arg1) == -1)
+ && wi::eq_p (arg1, -1))
return fold_convert_loc (loc, type, negate_expr (arg0));
/* Convert -A / -B to A / B when the type is signed and overflow is
/* X % -1 is zero. */
if (!TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
- && TREE_INT_CST_LOW (arg1) == (unsigned HOST_WIDE_INT) -1
- && TREE_INT_CST_HIGH (arg1) == -1)
+ && wi::eq_p (arg1, -1))
return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
/* X % -C is the same as X % C. */
if (code == TRUNC_MOD_EXPR
- && !TYPE_UNSIGNED (type)
+ && TYPE_SIGN (type) == SIGNED
&& TREE_CODE (arg1) == INTEGER_CST
&& !TREE_OVERFLOW (arg1)
- && TREE_INT_CST_HIGH (arg1) < 0
+ && wi::neg_p (arg1)
&& !TYPE_OVERFLOW_TRAPS (type)
/* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
&& !sign_bit_p (arg1, arg1))
prec = element_precision (type);
/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
- if (TREE_CODE (op0) == code && host_integerp (arg1, true)
- && TREE_INT_CST_LOW (arg1) < prec
- && host_integerp (TREE_OPERAND (arg0, 1), true)
- && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) < prec)
+ if (TREE_CODE (op0) == code && tree_fits_uhwi_p (arg1)
+ && tree_to_uhwi (arg1) < prec
+ && tree_fits_uhwi_p (TREE_OPERAND (arg0, 1))
+ && tree_to_uhwi (TREE_OPERAND (arg0, 1)) < prec)
{
- unsigned int low = (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
- + TREE_INT_CST_LOW (arg1));
+ HOST_WIDE_INT low = (tree_to_shwi (TREE_OPERAND (arg0, 1))
+ + tree_to_shwi (arg1));
/* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
being well defined. */
if (((code == LSHIFT_EXPR && TREE_CODE (arg0) == RSHIFT_EXPR)
|| (TYPE_UNSIGNED (type)
&& code == RSHIFT_EXPR && TREE_CODE (arg0) == LSHIFT_EXPR))
- && host_integerp (arg1, false)
- && TREE_INT_CST_LOW (arg1) < prec
- && host_integerp (TREE_OPERAND (arg0, 1), false)
- && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) < prec)
+ && tree_fits_shwi_p (arg1)
+ && tree_to_shwi (arg1) < prec
+ && tree_fits_shwi_p (TREE_OPERAND (arg0, 1))
+ && tree_to_shwi (TREE_OPERAND (arg0, 1)) < prec)
{
- HOST_WIDE_INT low0 = TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1));
- HOST_WIDE_INT low1 = TREE_INT_CST_LOW (arg1);
+ HOST_WIDE_INT low0 = tree_to_shwi (TREE_OPERAND (arg0, 1));
+ HOST_WIDE_INT low1 = tree_to_shwi (arg1);
tree lshift;
tree arg00;
fold_build2_loc (loc, code, type,
TREE_OPERAND (arg0, 1), arg1));
- /* Two consecutive rotates adding up to the precision of the
- type can be ignored. */
+ /* Two consecutive rotates adding up to the some integer
+ multiple of the precision of the type can be ignored. */
if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (arg0) == RROTATE_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
- && TREE_INT_CST_HIGH (arg1) == 0
- && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
- && ((TREE_INT_CST_LOW (arg1)
- + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
- == prec))
+ && wi::umod_trunc (wi::add (arg1, TREE_OPERAND (arg0, 1)),
+ prec) == 0)
return TREE_OPERAND (arg0, 0);
/* Fold (X & C2) << C1 into (X << C1) & (C2 << C1)
&& operand_equal_p (tree_strip_nop_conversions (TREE_OPERAND (arg0,
1)),
arg1, 0)
- && (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 0)) & 1) == 1)
+ && wi::bit_and (TREE_OPERAND (arg0, 0), 1) == 1)
{
return omit_two_operands_loc (loc, type,
code == NE_EXPR
prec = TYPE_PRECISION (itype);
/* Check for a valid shift count. */
- if (TREE_INT_CST_HIGH (arg001) == 0
- && TREE_INT_CST_LOW (arg001) < prec)
+ if (wi::ltu_p (arg001, prec))
{
tree arg01 = TREE_OPERAND (arg0, 1);
tree arg000 = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
unsigned HOST_WIDE_INT log2 = tree_log2 (arg01);
/* If (C2 << C1) doesn't overflow, then ((X >> C1) & C2) != 0
can be rewritten as (X & (C2 << C1)) != 0. */
- if ((log2 + TREE_INT_CST_LOW (arg001)) < prec)
+ if ((log2 + tree_to_uhwi (arg001)) < prec)
{
tem = fold_build2_loc (loc, LSHIFT_EXPR, itype, arg01, arg001);
tem = fold_build2_loc (loc, BIT_AND_EXPR, itype, arg000, tem);
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
tree itype = TREE_TYPE (arg00);
- if (TREE_INT_CST_HIGH (arg01) == 0
- && TREE_INT_CST_LOW (arg01)
- == (unsigned HOST_WIDE_INT) (TYPE_PRECISION (itype) - 1))
+ if (wi::eq_p (arg01, TYPE_PRECISION (itype) - 1))
{
if (TYPE_UNSIGNED (itype))
{
the specified precision will have known values. */
{
tree arg1_type = TREE_TYPE (arg1);
- unsigned int width = TYPE_PRECISION (arg1_type);
+ unsigned int prec = TYPE_PRECISION (arg1_type);
if (TREE_CODE (arg1) == INTEGER_CST
- && width <= HOST_BITS_PER_DOUBLE_INT
&& (INTEGRAL_TYPE_P (arg1_type) || POINTER_TYPE_P (arg1_type)))
{
- HOST_WIDE_INT signed_max_hi;
- unsigned HOST_WIDE_INT signed_max_lo;
- unsigned HOST_WIDE_INT max_hi, max_lo, min_hi, min_lo;
-
- if (width <= HOST_BITS_PER_WIDE_INT)
- {
- signed_max_lo = ((unsigned HOST_WIDE_INT) 1 << (width - 1))
- - 1;
- signed_max_hi = 0;
- max_hi = 0;
-
- if (TYPE_UNSIGNED (arg1_type))
- {
- max_lo = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1;
- min_lo = 0;
- min_hi = 0;
- }
- else
- {
- max_lo = signed_max_lo;
- min_lo = ((unsigned HOST_WIDE_INT) -1 << (width - 1));
- min_hi = -1;
- }
- }
- else
- {
- width -= HOST_BITS_PER_WIDE_INT;
- signed_max_lo = -1;
- signed_max_hi = ((unsigned HOST_WIDE_INT) 1 << (width - 1))
- - 1;
- max_lo = -1;
- min_lo = 0;
-
- if (TYPE_UNSIGNED (arg1_type))
- {
- max_hi = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1;
- min_hi = 0;
- }
- else
- {
- max_hi = signed_max_hi;
- min_hi = ((unsigned HOST_WIDE_INT) -1 << (width - 1));
- }
- }
+ wide_int max = wi::max_value (arg1_type);
+ wide_int signed_max = wi::max_value (prec, SIGNED);
+ wide_int min = wi::min_value (arg1_type);
- if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) == max_hi
- && TREE_INT_CST_LOW (arg1) == max_lo)
+ if (wi::eq_p (arg1, max))
switch (code)
{
case GT_EXPR:
default:
break;
}
- else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1)
- == max_hi
- && TREE_INT_CST_LOW (arg1) == max_lo - 1)
+ else if (wi::eq_p (arg1, max - 1))
switch (code)
{
case GT_EXPR:
default:
break;
}
- else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1)
- == min_hi
- && TREE_INT_CST_LOW (arg1) == min_lo)
+ else if (wi::eq_p (arg1, min))
switch (code)
{
case LT_EXPR:
default:
break;
}
- else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1)
- == min_hi
- && TREE_INT_CST_LOW (arg1) == min_lo + 1)
+ else if (wi::eq_p (arg1, min + 1))
switch (code)
{
case GE_EXPR:
- arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
+ arg1 = const_binop (MINUS_EXPR, arg1,
+ build_int_cst (TREE_TYPE (arg1), 1));
return fold_build2_loc (loc, NE_EXPR, type,
fold_convert_loc (loc,
TREE_TYPE (arg1), arg0),
arg1);
case LT_EXPR:
- arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
+ arg1 = const_binop (MINUS_EXPR, arg1,
+ build_int_cst (TREE_TYPE (arg1), 1));
return fold_build2_loc (loc, EQ_EXPR, type,
fold_convert_loc (loc, TREE_TYPE (arg1),
arg0),
break;
}
- else if (TREE_INT_CST_HIGH (arg1) == signed_max_hi
- && TREE_INT_CST_LOW (arg1) == signed_max_lo
+ else if (wi::eq_p (arg1, signed_max)
&& TYPE_UNSIGNED (arg1_type)
+ /* KENNY QUESTIONS THE CHECKING OF THE BITSIZE
+ HERE. HE FEELS THAT THE PRECISION SHOULD BE
+ CHECKED */
+
/* We will flip the signedness of the comparison operator
associated with the mode of arg1, so the sign bit is
specified by this mode. Check that arg1 is the signed
max associated with this sign bit. */
- && width == GET_MODE_BITSIZE (TYPE_MODE (arg1_type))
+ && prec == GET_MODE_BITSIZE (TYPE_MODE (arg1_type))
/* signed_type does not work on pointer types. */
&& INTEGRAL_TYPE_P (arg1_type))
{
&& integer_zerop (op2)
&& (tem = sign_bit_p (TREE_OPERAND (arg0, 0), arg1)))
{
+ /* sign_bit_p looks through both zero and sign extensions,
+ but for this optimization only sign extensions are
+ usable. */
+ tree tem2 = TREE_OPERAND (arg0, 0);
+ while (tem != tem2)
+ {
+ if (TREE_CODE (tem2) != NOP_EXPR
+ || TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (tem2, 0))))
+ {
+ tem = NULL_TREE;
+ break;
+ }
+ tem2 = TREE_OPERAND (tem2, 0);
+ }
/* sign_bit_p only checks ARG1 bits within A's precision.
If <sign bit of A> has wider type than A, bits outside
of A's precision in <sign bit of A> need to be checked.
If they are all 0, this optimization needs to be done
in unsigned A's type, if they are all 1 in signed A's type,
otherwise this can't be done. */
- if (TYPE_PRECISION (TREE_TYPE (tem))
- < TYPE_PRECISION (TREE_TYPE (arg1))
+ if (tem
+ && TYPE_PRECISION (TREE_TYPE (tem))
+ < TYPE_PRECISION (TREE_TYPE (arg1))
&& TYPE_PRECISION (TREE_TYPE (tem))
< TYPE_PRECISION (type))
{
- unsigned HOST_WIDE_INT mask_lo;
- HOST_WIDE_INT mask_hi;
int inner_width, outer_width;
tree tem_type;
if (outer_width > TYPE_PRECISION (type))
outer_width = TYPE_PRECISION (type);
- if (outer_width > HOST_BITS_PER_WIDE_INT)
- {
- mask_hi = ((unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_DOUBLE_INT - outer_width));
- mask_lo = -1;
- }
- else
- {
- mask_hi = 0;
- mask_lo = ((unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_WIDE_INT - outer_width));
- }
- if (inner_width > HOST_BITS_PER_WIDE_INT)
- {
- mask_hi &= ~((unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_WIDE_INT - inner_width));
- mask_lo = 0;
- }
- else
- mask_lo &= ~((unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_WIDE_INT - inner_width));
+ wide_int mask = wi::shifted_mask
+ (inner_width, outer_width - inner_width, false,
+ TYPE_PRECISION (TREE_TYPE (arg1)));
- if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == mask_hi
- && (TREE_INT_CST_LOW (arg1) & mask_lo) == mask_lo)
+ wide_int common = mask & arg1;
+ if (common == mask)
{
tem_type = signed_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
}
- else if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == 0
- && (TREE_INT_CST_LOW (arg1) & mask_lo) == 0)
+ else if (common == 0)
{
tem_type = unsigned_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
tree tem = TREE_OPERAND (arg0, 0);
STRIP_NOPS (tem);
if (TREE_CODE (tem) == RSHIFT_EXPR
- && TREE_CODE (TREE_OPERAND (tem, 1)) == INTEGER_CST
+ && tree_fits_uhwi_p (TREE_OPERAND (tem, 1))
&& (unsigned HOST_WIDE_INT) tree_log2 (arg1) ==
- TREE_INT_CST_LOW (TREE_OPERAND (tem, 1)))
+ tree_to_uhwi (TREE_OPERAND (tem, 1)))
return fold_build2_loc (loc, BIT_AND_EXPR, type,
TREE_OPERAND (tem, 0), arg1);
}
&& TREE_TYPE (type) == TREE_TYPE (TREE_TYPE (arg0)))))
{
tree eltype = TREE_TYPE (TREE_TYPE (arg0));
- unsigned HOST_WIDE_INT width = tree_low_cst (TYPE_SIZE (eltype), 1);
- unsigned HOST_WIDE_INT n = tree_low_cst (arg1, 1);
- unsigned HOST_WIDE_INT idx = tree_low_cst (op2, 1);
+ unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
+ unsigned HOST_WIDE_INT n = tree_to_uhwi (arg1);
+ unsigned HOST_WIDE_INT idx = tree_to_uhwi (op2);
if (n != 0
&& (idx % width) == 0
/* A bit-field-ref that referenced the full argument can be stripped. */
if (INTEGRAL_TYPE_P (TREE_TYPE (arg0))
- && TYPE_PRECISION (TREE_TYPE (arg0)) == tree_low_cst (arg1, 1)
+ && TYPE_PRECISION (TREE_TYPE (arg0)) == tree_to_uhwi (arg1)
&& integer_zerop (op2))
return fold_convert_loc (loc, type, arg0);
fold (nearly) all BIT_FIELD_REFs. */
if (CONSTANT_CLASS_P (arg0)
&& can_native_interpret_type_p (type)
- && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (arg0)), 1)
+ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (arg0)))
/* This limitation should not be necessary, we just need to
round this up to mode size. */
- && tree_low_cst (op1, 1) % BITS_PER_UNIT == 0
+ && tree_to_uhwi (op1) % BITS_PER_UNIT == 0
/* Need bit-shifting of the buffer to relax the following. */
- && tree_low_cst (op2, 1) % BITS_PER_UNIT == 0)
+ && tree_to_uhwi (op2) % BITS_PER_UNIT == 0)
{
- unsigned HOST_WIDE_INT bitpos = tree_low_cst (op2, 1);
- unsigned HOST_WIDE_INT bitsize = tree_low_cst (op1, 1);
+ unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (op2);
+ unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (op1);
unsigned HOST_WIDE_INT clen;
- clen = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (arg0)), 1);
+ clen = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (arg0)));
/* ??? We cannot tell native_encode_expr to start at
some random byte only. So limit us to a reasonable amount
of work. */
{
unsigned int nelts = TYPE_VECTOR_SUBPARTS (type), i, mask;
unsigned char *sel = XALLOCAVEC (unsigned char, nelts);
- tree t;
bool need_mask_canon = false;
bool all_in_vec0 = true;
bool all_in_vec1 = true;
if (TREE_CODE (val) != INTEGER_CST)
return NULL_TREE;
- sel[i] = TREE_INT_CST_LOW (val) & mask;
- if (TREE_INT_CST_HIGH (val)
- || ((unsigned HOST_WIDE_INT)
- TREE_INT_CST_LOW (val) != sel[i]))
- need_mask_canon = true;
+ /* Make sure that the perm value is in an acceptable
+ range. */
+ wide_int t = val;
+ if (wi::gtu_p (t, mask))
+ {
+ need_mask_canon = true;
+ sel[i] = t.to_uhwi () & mask;
+ }
+ else
+ sel[i] = t.to_uhwi ();
if (sel[i] < nelts)
all_in_vec1 = false;
&& (TREE_CODE (op1) == VECTOR_CST
|| TREE_CODE (op1) == CONSTRUCTOR))
{
- t = fold_vec_perm (type, op0, op1, sel);
+ tree t = fold_vec_perm (type, op0, op1, sel);
if (t != NULL_TREE)
return t;
}
op1 = TREE_OPERAND (top, 1);
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- if (TYPE_PRECISION (TREE_TYPE (size_one_node))
- > TREE_INT_CST_LOW (op1)
- && TREE_INT_CST_HIGH (op1) == 0
+ if (wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
&& 0 != (t1 = fold_convert (type,
const_binop (LSHIFT_EXPR,
size_one_node,
if (TREE_CODE (inner_type) == REAL_TYPE)
return tree_expr_nonnegative_warnv_p (op0,
strict_overflow_p);
- if (TREE_CODE (inner_type) == INTEGER_TYPE)
+ if (INTEGRAL_TYPE_P (inner_type))
{
if (TYPE_UNSIGNED (inner_type))
return true;
strict_overflow_p);
}
}
- else if (TREE_CODE (outer_type) == INTEGER_TYPE)
+ else if (INTEGRAL_TYPE_P (outer_type))
{
if (TREE_CODE (inner_type) == REAL_TYPE)
return tree_expr_nonnegative_warnv_p (op0,
strict_overflow_p);
- if (TREE_CODE (inner_type) == INTEGER_TYPE)
+ if (INTEGRAL_TYPE_P (inner_type))
return TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)
&& TYPE_UNSIGNED (inner_type);
}
&& TREE_CODE (inner1) == INTEGER_TYPE && unsigned1)
{
unsigned int precision0 = (TREE_CODE (op0) == INTEGER_CST)
- ? tree_int_cst_min_precision (op0, /*unsignedp=*/true)
+ ? tree_int_cst_min_precision (op0, UNSIGNED)
: TYPE_PRECISION (inner0);
unsigned int precision1 = (TREE_CODE (op1) == INTEGER_CST)
- ? tree_int_cst_min_precision (op1, /*unsignedp=*/true)
+ ? tree_int_cst_min_precision (op1, UNSIGNED)
: TYPE_PRECISION (inner1);
return precision0 + precision1 < TYPE_PRECISION (type);
CASE_INT_FN (BUILT_IN_FFS):
CASE_INT_FN (BUILT_IN_PARITY):
CASE_INT_FN (BUILT_IN_POPCOUNT):
+ CASE_INT_FN (BUILT_IN_CLZ):
+ CASE_INT_FN (BUILT_IN_CLRSB):
case BUILT_IN_BSWAP32:
case BUILT_IN_BSWAP64:
/* Always true. */
/* True if the 1st argument is nonnegative or the second
argument is an even integer. */
if (TREE_CODE (arg1) == INTEGER_CST
- && (TREE_INT_CST_LOW (arg1) & 1) == 0)
+ && (tree_to_hwi (arg1) & 1) == 0)
return true;
return tree_expr_nonnegative_warnv_p (arg0,
strict_overflow_p);
if ((n & 1) == 0)
{
REAL_VALUE_TYPE cint;
- real_from_integer (&cint, VOIDmode, n,
- n < 0 ? -1 : 0, 0);
+ real_from_integer (&cint, VOIDmode, n, SIGNED);
if (real_identical (&c, &cint))
return true;
}
strict_overflow_p);
case CALL_EXPR:
- return alloca_call_p (t);
+ {
+ tree fndecl = get_callee_fndecl (t);
+ if (!fndecl) return false;
+ if (flag_delete_null_pointer_checks && !flag_check_new
+ && DECL_IS_OPERATOR_NEW (fndecl)
+ && !TREE_NOTHROW (fndecl))
+ return true;
+ if (flag_delete_null_pointer_checks
+ && lookup_attribute ("returns_nonnull",
+ TYPE_ATTRIBUTES (TREE_TYPE (fndecl))))
+ return true;
+ return alloca_call_p (t);
+ }
default:
break;
&& (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))) == 1))
return build_int_cst_type (TREE_TYPE (exp),
(TREE_STRING_POINTER (string)
- [TREE_INT_CST_LOW (index)]));
+ [tree_to_uhwi (index)]));
}
return NULL;
}
{
case INTEGER_CST:
{
- double_int val = tree_to_double_int (arg0);
bool overflow;
- val = val.neg_with_overflow (&overflow);
- t = force_fit_type_double (type, val, 1,
- (overflow | TREE_OVERFLOW (arg0))
- && !TYPE_UNSIGNED (type));
+ wide_int val = wi::neg (arg0, &overflow);
+ t = force_fit_type (type, val, 1,
+ (overflow | TREE_OVERFLOW (arg0))
+ && !TYPE_UNSIGNED (type));
break;
}
{
case INTEGER_CST:
{
- double_int val = tree_to_double_int (arg0);
-
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
- if (TYPE_UNSIGNED (type)
- || !val.is_negative ())
+ if (!wi::neg_p (arg0, TYPE_SIGN (type)))
t = arg0;
/* If the value is negative, then the absolute value is
else
{
bool overflow;
- val = val.neg_with_overflow (&overflow);
- t = force_fit_type_double (type, val, -1,
- overflow | TREE_OVERFLOW (arg0));
+ wide_int val = wi::neg (arg0, &overflow);
+ t = force_fit_type (type, val, -1,
+ overflow | TREE_OVERFLOW (arg0));
}
}
break;
static tree
fold_not_const (const_tree arg0, tree type)
{
- double_int val;
-
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- val = ~tree_to_double_int (arg0);
- return force_fit_type_double (type, val, 0, TREE_OVERFLOW (arg0));
+ return force_fit_type (type, wi::bit_not (arg0), 0, TREE_OVERFLOW (arg0));
}
/* Given CODE, a relational operator, the target type, TYPE and two
{
if (code == EQ_EXPR)
result = tree_int_cst_equal (op0, op1);
- else if (TYPE_UNSIGNED (TREE_TYPE (op0)))
- result = INT_CST_LT_UNSIGNED (op0, op1);
else
result = INT_CST_LT (op0, op1);
}
if (TREE_CODE (op00type) == VECTOR_TYPE
&& type == TREE_TYPE (op00type))
{
- HOST_WIDE_INT offset = tree_low_cst (op01, 0);
+ HOST_WIDE_INT offset = tree_to_shwi (op01);
tree part_width = TYPE_SIZE (type);
- unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT;
+ unsigned HOST_WIDE_INT part_widthi
+ = tree_to_shwi (part_width) / BITS_PER_UNIT;
unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
tree index = bitsize_int (indexi);
}
}
-/* Return the value of VALUE, rounded up to a multiple of DIVISOR.
- This can only be applied to objects of a sizetype. */
+/* Return the value of VALUE, rounded up to a multiple of DIVISOR. */
tree
round_up_loc (location_t loc, tree value, int divisor)
{
if (TREE_CODE (value) == INTEGER_CST)
{
- double_int val = tree_to_double_int (value);
+ wide_int val = value;
bool overflow_p;
- if ((val.low & (divisor - 1)) == 0)
+ if ((val & (divisor - 1)) == 0)
return value;
overflow_p = TREE_OVERFLOW (value);
- val.low &= ~(divisor - 1);
- val.low += divisor;
- if (val.low == 0)
- {
- val.high++;
- if (val.high == 0)
- overflow_p = true;
- }
+ val &= ~(divisor - 1);
+ val += divisor;
+ if (val == 0)
+ overflow_p = true;
- return force_fit_type_double (TREE_TYPE (value), val,
- -1, overflow_p);
+ return force_fit_type (TREE_TYPE (value), val, -1, overflow_p);
}
else
{
toffset2 = fold_convert (type, toffset2);
tdiff = fold_build2 (MINUS_EXPR, type, toffset1, toffset2);
- if (!cst_and_fits_in_hwi (tdiff))
+ if (!cst_fits_shwi_p (tdiff))
return false;
*diff = int_cst_value (tdiff);