/* Conditional constant propagation pass for the GNU compiler.
- Copyright (C) 2000-2014 Free Software Foundation, Inc.
+ Copyright (C) 2000-2017 Free Software Foundation, Inc.
Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
final substitution and folding.
+ This algorithm uses wide-ints at the max precision of the target.
+ This means that, with one uninteresting exception, variables with
+ UNSIGNED types never go to VARYING because the bits above the
+ precision of the type of the variable are always zero. The
+ uninteresting case is a variable of UNSIGNED type that has the
+ maximum precision of the target. Such variables can go to VARYING,
+ but this causes no loss of infomation since these variables will
+ never be extended.
+
References:
Constant propagation with conditional branches,
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
#include "tree.h"
-#include "stor-layout.h"
-#include "flags.h"
-#include "tm_p.h"
-#include "basic-block.h"
-#include "function.h"
+#include "gimple.h"
+#include "tree-pass.h"
+#include "ssa.h"
#include "gimple-pretty-print.h"
-#include "hash-table.h"
-#include "tree-ssa-alias.h"
-#include "internal-fn.h"
+#include "fold-const.h"
#include "gimple-fold.h"
#include "tree-eh.h"
-#include "gimple-expr.h"
-#include "is-a.h"
-#include "gimple.h"
#include "gimplify.h"
#include "gimple-iterator.h"
-#include "gimple-ssa.h"
#include "tree-cfg.h"
-#include "tree-phinodes.h"
-#include "ssa-iterators.h"
-#include "stringpool.h"
-#include "tree-ssanames.h"
-#include "tree-pass.h"
#include "tree-ssa-propagate.h"
-#include "value-prof.h"
-#include "langhooks.h"
-#include "target.h"
-#include "diagnostic-core.h"
#include "dbgcnt.h"
#include "params.h"
-
+#include "builtins.h"
+#include "tree-chkp.h"
+#include "cfgloop.h"
+#include "stor-layout.h"
+#include "optabs-query.h"
+#include "tree-ssa-ccp.h"
+#include "tree-dfa.h"
+#include "diagnostic-core.h"
/* Possible lattice values. */
typedef enum
VARYING
} ccp_lattice_t;
-struct prop_value_d {
+struct ccp_prop_value_t {
/* Lattice value. */
ccp_lattice_t lattice_val;
/* Propagated value. */
tree value;
- /* Mask that applies to the propagated value during CCP. For
- X with a CONSTANT lattice value X & ~mask == value & ~mask. */
- double_int mask;
+ /* Mask that applies to the propagated value during CCP. For X
+ with a CONSTANT lattice value X & ~mask == value & ~mask. The
+ zero bits in the mask cover constant values. The ones mean no
+ information. */
+ widest_int mask;
};
-typedef struct prop_value_d prop_value_t;
-
/* Array of propagated constant values. After propagation,
CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
the constant is held in an SSA name representing a memory store
(i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
memory reference used to store (i.e., the LHS of the assignment
doing the store). */
-static prop_value_t *const_val;
+static ccp_prop_value_t *const_val;
static unsigned n_const_val;
-static void canonicalize_value (prop_value_t *);
+static void canonicalize_value (ccp_prop_value_t *);
static bool ccp_fold_stmt (gimple_stmt_iterator *);
+static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
/* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
static void
-dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
+dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
{
switch (val.lattice_val)
{
break;
case CONSTANT:
if (TREE_CODE (val.value) != INTEGER_CST
- || val.mask.is_zero ())
+ || val.mask == 0)
{
fprintf (outf, "%sCONSTANT ", prefix);
print_generic_expr (outf, val.value, dump_flags);
}
else
{
- double_int cval = tree_to_double_int (val.value).and_not (val.mask);
- fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- prefix, cval.high, cval.low);
- fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
- val.mask.high, val.mask.low);
+ widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
+ val.mask);
+ fprintf (outf, "%sCONSTANT ", prefix);
+ print_hex (cval, outf);
+ fprintf (outf, " (");
+ print_hex (val.mask, outf);
+ fprintf (outf, ")");
}
break;
default:
/* Print lattice value VAL to stderr. */
-void debug_lattice_value (prop_value_t val);
+void debug_lattice_value (ccp_prop_value_t val);
DEBUG_FUNCTION void
-debug_lattice_value (prop_value_t val)
+debug_lattice_value (ccp_prop_value_t val)
{
dump_lattice_value (stderr, "", val);
fprintf (stderr, "\n");
}
+/* Extend NONZERO_BITS to a full mask, based on sgn. */
+
+static widest_int
+extend_mask (const wide_int &nonzero_bits, signop sgn)
+{
+ return widest_int::from (nonzero_bits, sgn);
+}
/* Compute a default value for variable VAR and store it in the
CONST_VAL array. The following rules are used to get default
4- Initial values of variables that are not GIMPLE registers are
considered VARYING. */
-static prop_value_t
+static ccp_prop_value_t
get_default_value (tree var)
{
- prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
- gimple stmt;
+ ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
+ gimple *stmt;
stmt = SSA_NAME_DEF_STMT (var);
can assume initially that it is UNDEFINED, otherwise we must
consider it VARYING. */
if (!virtual_operand_p (var)
+ && SSA_NAME_VAR (var)
&& TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
val.lattice_val = UNDEFINED;
else
{
val.lattice_val = VARYING;
- val.mask = double_int_minus_one;
+ val.mask = -1;
if (flag_tree_bit_ccp)
{
- double_int nonzero_bits = get_nonzero_bits (var);
- double_int mask
- = double_int::mask (TYPE_PRECISION (TREE_TYPE (var)));
- if (nonzero_bits != double_int_minus_one && nonzero_bits != mask)
+ wide_int nonzero_bits = get_nonzero_bits (var);
+ if (nonzero_bits != -1)
{
val.lattice_val = CONSTANT;
val.value = build_zero_cst (TREE_TYPE (var));
- /* CCP wants the bits above precision set. */
- val.mask = nonzero_bits | ~mask;
+ val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (var)));
}
}
}
{
/* Otherwise, VAR will never take on a constant value. */
val.lattice_val = VARYING;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
return val;
/* Get the constant value associated with variable VAR. */
-static inline prop_value_t *
+static inline ccp_prop_value_t *
get_value (tree var)
{
- prop_value_t *val;
+ ccp_prop_value_t *val;
if (const_val == NULL
|| SSA_NAME_VERSION (var) >= n_const_val)
static inline tree
get_constant_value (tree var)
{
- prop_value_t *val;
+ ccp_prop_value_t *val;
if (TREE_CODE (var) != SSA_NAME)
{
if (is_gimple_min_invariant (var))
if (val
&& val->lattice_val == CONSTANT
&& (TREE_CODE (val->value) != INTEGER_CST
- || val->mask.is_zero ()))
+ || val->mask == 0))
return val->value;
return NULL_TREE;
}
static inline void
set_value_varying (tree var)
{
- prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
+ ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
val->lattice_val = VARYING;
val->value = NULL_TREE;
- val->mask = double_int_minus_one;
+ val->mask = -1;
}
-/* For float types, modify the value of VAL to make ccp work correctly
- for non-standard values (-0, NaN):
-
- If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
- If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
- This is to fix the following problem (see PR 29921): Suppose we have
-
- x = 0.0 * y
-
- and we set value of y to NaN. This causes value of x to be set to NaN.
- When we later determine that y is in fact VARYING, fold uses the fact
- that HONOR_NANS is false, and we try to change the value of x to 0,
- causing an ICE. With HONOR_NANS being false, the real appearance of
- NaN would cause undefined behavior, though, so claiming that y (and x)
- are UNDEFINED initially is correct.
-
- For other constants, make sure to drop TREE_OVERFLOW. */
+/* For integer constants, make sure to drop TREE_OVERFLOW. */
static void
-canonicalize_value (prop_value_t *val)
+canonicalize_value (ccp_prop_value_t *val)
{
- enum machine_mode mode;
- tree type;
- REAL_VALUE_TYPE d;
-
if (val->lattice_val != CONSTANT)
return;
if (TREE_OVERFLOW_P (val->value))
val->value = drop_tree_overflow (val->value);
-
- if (TREE_CODE (val->value) != REAL_CST)
- return;
-
- d = TREE_REAL_CST (val->value);
- type = TREE_TYPE (val->value);
- mode = TYPE_MODE (type);
-
- if (!HONOR_SIGNED_ZEROS (mode)
- && REAL_VALUE_MINUS_ZERO (d))
- {
- val->value = build_real (type, dconst0);
- return;
- }
-
- if (!HONOR_NANS (mode)
- && REAL_VALUE_ISNAN (d))
- {
- val->lattice_val = UNDEFINED;
- val->value = NULL;
- return;
- }
}
/* Return whether the lattice transition is valid. */
static bool
-valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
+valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
{
/* Lattice transitions must always be monotonically increasing in
value. */
/* Now both lattice values are CONSTANT. */
+ /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
+ when only a single copy edge is executable. */
+ if (TREE_CODE (old_val.value) == SSA_NAME
+ && TREE_CODE (new_val.value) == SSA_NAME)
+ return true;
+
+ /* Allow transitioning from a constant to a copy. */
+ if (is_gimple_min_invariant (old_val.value)
+ && TREE_CODE (new_val.value) == SSA_NAME)
+ return true;
+
/* Allow transitioning from PHI <&x, not executable> == &x
to PHI <&x, &y> == common alignment. */
if (TREE_CODE (old_val.value) != INTEGER_CST
/* Bit-lattices have to agree in the still valid bits. */
if (TREE_CODE (old_val.value) == INTEGER_CST
&& TREE_CODE (new_val.value) == INTEGER_CST)
- return tree_to_double_int (old_val.value).and_not (new_val.mask)
- == tree_to_double_int (new_val.value).and_not (new_val.mask);
+ return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
+ == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
/* Otherwise constant values have to agree. */
- return operand_equal_p (old_val.value, new_val.value, 0);
+ if (operand_equal_p (old_val.value, new_val.value, 0))
+ return true;
+
+ /* At least the kinds and types should agree now. */
+ if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
+ || !types_compatible_p (TREE_TYPE (old_val.value),
+ TREE_TYPE (new_val.value)))
+ return false;
+
+ /* For floats and !HONOR_NANS allow transitions from (partial) NaN
+ to non-NaN. */
+ tree type = TREE_TYPE (new_val.value);
+ if (SCALAR_FLOAT_TYPE_P (type)
+ && !HONOR_NANS (type))
+ {
+ if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
+ return true;
+ }
+ else if (VECTOR_FLOAT_TYPE_P (type)
+ && !HONOR_NANS (type))
+ {
+ for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
+ if (!REAL_VALUE_ISNAN
+ (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
+ && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
+ VECTOR_CST_ELT (new_val.value, i), 0))
+ return false;
+ return true;
+ }
+ else if (COMPLEX_FLOAT_TYPE_P (type)
+ && !HONOR_NANS (type))
+ {
+ if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
+ && !operand_equal_p (TREE_REALPART (old_val.value),
+ TREE_REALPART (new_val.value), 0))
+ return false;
+ if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
+ && !operand_equal_p (TREE_IMAGPART (old_val.value),
+ TREE_IMAGPART (new_val.value), 0))
+ return false;
+ return true;
+ }
+ return false;
}
/* Set the value for variable VAR to NEW_VAL. Return true if the new
value is different from VAR's previous value. */
static bool
-set_lattice_value (tree var, prop_value_t new_val)
+set_lattice_value (tree var, ccp_prop_value_t *new_val)
{
/* We can deal with old UNINITIALIZED values just fine here. */
- prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
+ ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
- canonicalize_value (&new_val);
+ canonicalize_value (new_val);
/* We have to be careful to not go up the bitwise lattice
- represented by the mask.
- ??? This doesn't seem to be the best place to enforce this. */
- if (new_val.lattice_val == CONSTANT
+ represented by the mask. Instead of dropping to VARYING
+ use the meet operator to retain a conservative value.
+ Missed optimizations like PR65851 makes this necessary.
+ It also ensures we converge to a stable lattice solution. */
+ if (new_val->lattice_val == CONSTANT
&& old_val->lattice_val == CONSTANT
- && TREE_CODE (new_val.value) == INTEGER_CST
- && TREE_CODE (old_val->value) == INTEGER_CST)
- {
- double_int diff;
- diff = tree_to_double_int (new_val.value)
- ^ tree_to_double_int (old_val->value);
- new_val.mask = new_val.mask | old_val->mask | diff;
- }
+ && TREE_CODE (new_val->value) != SSA_NAME)
+ ccp_lattice_meet (new_val, old_val);
- gcc_assert (valid_lattice_transition (*old_val, new_val));
+ gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
/* If *OLD_VAL and NEW_VAL are the same, return false to inform the
caller that this was a non-transition. */
- if (old_val->lattice_val != new_val.lattice_val
- || (new_val.lattice_val == CONSTANT
- && TREE_CODE (new_val.value) == INTEGER_CST
- && (TREE_CODE (old_val->value) != INTEGER_CST
- || new_val.mask != old_val->mask)))
+ if (old_val->lattice_val != new_val->lattice_val
+ || (new_val->lattice_val == CONSTANT
+ && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
+ || (TREE_CODE (new_val->value) == INTEGER_CST
+ && (new_val->mask != old_val->mask
+ || (wi::bit_and_not (wi::to_widest (old_val->value),
+ new_val->mask)
+ != wi::bit_and_not (wi::to_widest (new_val->value),
+ new_val->mask))))
+ || (TREE_CODE (new_val->value) != INTEGER_CST
+ && !operand_equal_p (new_val->value, old_val->value, 0)))))
{
/* ??? We would like to delay creation of INTEGER_CSTs from
partially constants here. */
if (dump_file && (dump_flags & TDF_DETAILS))
{
- dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
+ dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
fprintf (dump_file, ". Adding SSA edges to worklist.\n");
}
- *old_val = new_val;
+ *old_val = *new_val;
- gcc_assert (new_val.lattice_val != UNINITIALIZED);
+ gcc_assert (new_val->lattice_val != UNINITIALIZED);
return true;
}
return false;
}
-static prop_value_t get_value_for_expr (tree, bool);
-static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
-static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
- tree, double_int, double_int,
- tree, double_int, double_int);
+static ccp_prop_value_t get_value_for_expr (tree, bool);
+static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
+void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
+ signop, int, const widest_int &, const widest_int &,
+ signop, int, const widest_int &, const widest_int &);
-/* Return a double_int that can be used for bitwise simplifications
+/* Return a widest_int that can be used for bitwise simplifications
from VAL. */
-static double_int
-value_to_double_int (prop_value_t val)
+static widest_int
+value_to_wide_int (ccp_prop_value_t val)
{
if (val.value
&& TREE_CODE (val.value) == INTEGER_CST)
- return tree_to_double_int (val.value);
- else
- return double_int_zero;
+ return wi::to_widest (val.value);
+
+ return 0;
}
/* Return the value for the address expression EXPR based on alignment
information. */
-static prop_value_t
+static ccp_prop_value_t
get_value_from_alignment (tree expr)
{
tree type = TREE_TYPE (expr);
- prop_value_t val;
+ ccp_prop_value_t val;
unsigned HOST_WIDE_INT bitpos;
unsigned int align;
get_pointer_alignment_1 (expr, &align, &bitpos);
val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
- ? double_int::mask (TYPE_PRECISION (type))
- : double_int_minus_one)
- .and_not (double_int::from_uhwi (align / BITS_PER_UNIT - 1));
- val.lattice_val = val.mask.is_minus_one () ? VARYING : CONSTANT;
+ ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
+ : -1).and_not (align / BITS_PER_UNIT - 1);
+ val.lattice_val
+ = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
if (val.lattice_val == CONSTANT)
- val.value
- = double_int_to_tree (type,
- double_int::from_uhwi (bitpos / BITS_PER_UNIT));
+ val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
else
val.value = NULL_TREE;
return constant bits extracted from alignment information for
invariant addresses. */
-static prop_value_t
+static ccp_prop_value_t
get_value_for_expr (tree expr, bool for_bits_p)
{
- prop_value_t val;
+ ccp_prop_value_t val;
if (TREE_CODE (expr) == SSA_NAME)
{
- val = *get_value (expr);
+ ccp_prop_value_t *val_ = get_value (expr);
+ if (val_)
+ val = *val_;
+ else
+ {
+ val.lattice_val = VARYING;
+ val.value = NULL_TREE;
+ val.mask = -1;
+ }
if (for_bits_p
&& val.lattice_val == CONSTANT
&& TREE_CODE (val.value) == ADDR_EXPR)
val = get_value_from_alignment (val.value);
+ /* Fall back to a copy value. */
+ if (!for_bits_p
+ && val.lattice_val == VARYING
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
+ {
+ val.lattice_val = CONSTANT;
+ val.value = expr;
+ val.mask = -1;
+ }
}
else if (is_gimple_min_invariant (expr)
&& (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
{
val.lattice_val = CONSTANT;
val.value = expr;
- val.mask = double_int_zero;
+ val.mask = 0;
canonicalize_value (&val);
}
else if (TREE_CODE (expr) == ADDR_EXPR)
else
{
val.lattice_val = VARYING;
- val.mask = double_int_minus_one;
+ val.mask = -1;
val.value = NULL_TREE;
}
+
+ if (val.lattice_val == VARYING
+ && TYPE_UNSIGNED (TREE_TYPE (expr)))
+ val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
+
return val;
}
Else return VARYING. */
static ccp_lattice_t
-likely_value (gimple stmt)
+likely_value (gimple *stmt)
{
bool has_constant_operand, has_undefined_operand, all_undefined_operands;
+ bool has_nsa_operand;
tree use;
ssa_op_iter iter;
unsigned i;
has_constant_operand = false;
has_undefined_operand = false;
all_undefined_operands = true;
+ has_nsa_operand = false;
FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
{
- prop_value_t *val = get_value (use);
+ ccp_prop_value_t *val = get_value (use);
- if (val->lattice_val == UNDEFINED)
+ if (val && val->lattice_val == UNDEFINED)
has_undefined_operand = true;
else
all_undefined_operands = false;
- if (val->lattice_val == CONSTANT)
+ if (val && val->lattice_val == CONSTANT)
has_constant_operand = true;
+
+ if (SSA_NAME_IS_DEFAULT_DEF (use)
+ || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
+ has_nsa_operand = true;
}
/* There may be constants in regular rhs operands. For calls we
case PLUS_EXPR:
case MINUS_EXPR:
case POINTER_PLUS_EXPR:
+ case BIT_XOR_EXPR:
/* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
Not bitwise operators, one VARYING operand may specify the
- result completely. Not logical operators for the same reason.
+ result completely.
+ Not logical operators for the same reason, apart from XOR.
Not COMPLEX_EXPR as one VARYING operand makes the result partly
not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
the undefined operand may be promoted. */
/* We do not consider virtual operands here -- load from read-only
memory may have only VARYING virtual operands, but still be
- constant. */
+ constant. Also we can combine the stmt with definitions from
+ operands whose definitions are not simulated again. */
if (has_constant_operand
+ || has_nsa_operand
|| gimple_references_memory_p (stmt))
return CONSTANT;
/* Returns true if STMT cannot be constant. */
static bool
-surely_varying_stmt_p (gimple stmt)
+surely_varying_stmt_p (gimple *stmt)
{
/* If the statement has operands that we cannot handle, it cannot be
constant. */
basic_block bb;
n_const_val = num_ssa_names;
- const_val = XCNEWVEC (prop_value_t, n_const_val);
+ const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
/* Initialize simulation flags for PHI nodes and statements. */
FOR_EACH_BB_FN (bb, cfun)
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
{
- gimple stmt = gsi_stmt (i);
+ gimple *stmt = gsi_stmt (i);
bool is_varying;
/* If the statement is a control insn, then we do not
except for phi nodes for virtual operands when we do not do store ccp. */
FOR_EACH_BB_FN (bb, cfun)
{
- gimple_stmt_iterator i;
+ gphi_iterator i;
for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
{
- gimple phi = gsi_stmt (i);
+ gphi *phi = i.phi ();
if (virtual_operand_p (gimple_phi_result (phi)))
prop_set_simulate_again (phi, false);
if (!dbg_cnt (ccp))
{
const_val[i].lattice_val = VARYING;
- const_val[i].mask = double_int_minus_one;
+ const_val[i].mask = -1;
const_val[i].value = NULL_TREE;
}
}
/* Do final substitution of propagated values, cleanup the flowgraph and
- free allocated storage.
+ free allocated storage. If NONZERO_P, record nonzero bits.
Return TRUE when something was optimized. */
static bool
-ccp_finalize (void)
+ccp_finalize (bool nonzero_p)
{
bool something_changed;
unsigned i;
+ tree name;
do_dbg_cnt ();
/* Derive alignment and misalignment information from partially
constant pointers in the lattice or nonzero bits from partially
constant integers. */
- for (i = 1; i < num_ssa_names; ++i)
+ FOR_EACH_SSA_NAME (i, name, cfun)
{
- tree name = ssa_name (i);
- prop_value_t *val;
+ ccp_prop_value_t *val;
unsigned int tem, align;
- if (!name
- || (!POINTER_TYPE_P (TREE_TYPE (name))
- && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
- /* Don't record nonzero bits before IPA to avoid
- using too much memory. */
- || first_pass_instance)))
+ if (!POINTER_TYPE_P (TREE_TYPE (name))
+ && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
+ /* Don't record nonzero bits before IPA to avoid
+ using too much memory. */
+ || !nonzero_p))
continue;
val = get_value (name);
if (val->lattice_val != CONSTANT
- || TREE_CODE (val->value) != INTEGER_CST)
+ || TREE_CODE (val->value) != INTEGER_CST
+ || val->mask == 0)
continue;
if (POINTER_TYPE_P (TREE_TYPE (name)))
{
/* Trailing mask bits specify the alignment, trailing value
bits the misalignment. */
- tem = val->mask.low;
- align = (tem & -tem);
+ tem = val->mask.to_uhwi ();
+ align = least_bit_hwi (tem);
if (align > 1)
set_ptr_info_alignment (get_ptr_info (name), align,
(TREE_INT_CST_LOW (val->value)
}
else
{
- double_int nonzero_bits = val->mask;
- nonzero_bits = nonzero_bits | tree_to_double_int (val->value);
+ unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
+ wide_int nonzero_bits = wide_int::from (val->mask, precision,
+ UNSIGNED) | val->value;
nonzero_bits &= get_nonzero_bits (name);
set_nonzero_bits (name, nonzero_bits);
}
}
/* Perform substitutions based on the known constant values. */
- something_changed = substitute_and_fold (get_constant_value,
- ccp_fold_stmt, true);
+ something_changed = substitute_and_fold (get_constant_value, ccp_fold_stmt);
free (const_val);
const_val = NULL;
*/
static void
-ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
+ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
{
- if (val1->lattice_val == UNDEFINED)
+ if (val1->lattice_val == UNDEFINED
+ /* For UNDEFINED M SSA we can't always SSA because its definition
+ may not dominate the PHI node. Doing optimistic copy propagation
+ also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
+ && (val2->lattice_val != CONSTANT
+ || TREE_CODE (val2->value) != SSA_NAME))
{
/* UNDEFINED M any = any */
*val1 = *val2;
}
- else if (val2->lattice_val == UNDEFINED)
+ else if (val2->lattice_val == UNDEFINED
+ /* See above. */
+ && (val1->lattice_val != CONSTANT
+ || TREE_CODE (val1->value) != SSA_NAME))
{
/* any M UNDEFINED = any
Nothing to do. VAL1 already contains the value we want. */
{
/* any M VARYING = VARYING. */
val1->lattice_val = VARYING;
- val1->mask = double_int_minus_one;
+ val1->mask = -1;
val1->value = NULL_TREE;
}
else if (val1->lattice_val == CONSTANT
For INTEGER_CSTs mask unequal bits. If no equal bits remain,
drop to varying. */
- val1->mask = val1->mask | val2->mask
- | (tree_to_double_int (val1->value)
- ^ tree_to_double_int (val2->value));
- if (val1->mask.is_minus_one ())
+ val1->mask = (val1->mask | val2->mask
+ | (wi::to_widest (val1->value)
+ ^ wi::to_widest (val2->value)));
+ if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
{
val1->lattice_val = VARYING;
val1->value = NULL_TREE;
}
else if (val1->lattice_val == CONSTANT
&& val2->lattice_val == CONSTANT
- && simple_cst_equal (val1->value, val2->value) == 1)
+ && operand_equal_p (val1->value, val2->value, 0))
{
/* Ci M Cj = Ci if (i == j)
Ci M Cj = VARYING if (i != j)
{
/* When not equal addresses are involved try meeting for
alignment. */
- prop_value_t tem = *val2;
+ ccp_prop_value_t tem = *val2;
if (TREE_CODE (val1->value) == ADDR_EXPR)
*val1 = get_value_for_expr (val1->value, true);
if (TREE_CODE (val2->value) == ADDR_EXPR)
{
/* Any other combination is VARYING. */
val1->lattice_val = VARYING;
- val1->mask = double_int_minus_one;
+ val1->mask = -1;
val1->value = NULL_TREE;
}
}
of the PHI node that are incoming via executable edges. */
static enum ssa_prop_result
-ccp_visit_phi_node (gimple phi)
+ccp_visit_phi_node (gphi *phi)
{
unsigned i;
- prop_value_t *old_val, new_val;
+ ccp_prop_value_t new_val;
if (dump_file && (dump_flags & TDF_DETAILS))
{
print_gimple_stmt (dump_file, phi, 0, dump_flags);
}
- old_val = get_value (gimple_phi_result (phi));
- switch (old_val->lattice_val)
- {
- case VARYING:
- return SSA_PROP_VARYING;
-
- case CONSTANT:
- new_val = *old_val;
- break;
-
- case UNDEFINED:
- new_val.lattice_val = UNDEFINED;
- new_val.value = NULL_TREE;
- break;
-
- default:
- gcc_unreachable ();
- }
+ new_val.lattice_val = UNDEFINED;
+ new_val.value = NULL_TREE;
+ new_val.mask = 0;
+ bool first = true;
+ bool non_exec_edge = false;
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
/* Compute the meet operator over all the PHI arguments flowing
if (e->flags & EDGE_EXECUTABLE)
{
tree arg = gimple_phi_arg (phi, i)->def;
- prop_value_t arg_val = get_value_for_expr (arg, false);
+ ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
- ccp_lattice_meet (&new_val, &arg_val);
+ if (first)
+ {
+ new_val = arg_val;
+ first = false;
+ }
+ else
+ ccp_lattice_meet (&new_val, &arg_val);
if (dump_file && (dump_flags & TDF_DETAILS))
{
if (new_val.lattice_val == VARYING)
break;
}
+ else
+ non_exec_edge = true;
+ }
+
+ /* In case there were non-executable edges and the value is a copy
+ make sure its definition dominates the PHI node. */
+ if (non_exec_edge
+ && new_val.lattice_val == CONSTANT
+ && TREE_CODE (new_val.value) == SSA_NAME
+ && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
+ && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
+ gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
+ {
+ new_val.lattice_val = VARYING;
+ new_val.value = NULL_TREE;
+ new_val.mask = -1;
}
if (dump_file && (dump_flags & TDF_DETAILS))
}
/* Make the transition to the new value. */
- if (set_lattice_value (gimple_phi_result (phi), new_val))
+ if (set_lattice_value (gimple_phi_result (phi), &new_val))
{
if (new_val.lattice_val == VARYING)
return SSA_PROP_VARYING;
return op;
}
+/* Return the constant value for OP, but signal to not follow SSA
+ edges if the definition may be simulated again. */
+
+static tree
+valueize_op_1 (tree op)
+{
+ if (TREE_CODE (op) == SSA_NAME)
+ {
+ /* If the definition may be simulated again we cannot follow
+ this SSA edge as the SSA propagator does not necessarily
+ re-visit the use. */
+ gimple *def_stmt = SSA_NAME_DEF_STMT (op);
+ if (!gimple_nop_p (def_stmt)
+ && prop_simulate_again_p (def_stmt))
+ return NULL_TREE;
+ tree tem = get_constant_value (op);
+ if (tem)
+ return tem;
+ }
+ return op;
+}
+
/* CCP specific front-end to the non-destructive constant folding
routines.
otherwise return the original RHS or NULL_TREE. */
static tree
-ccp_fold (gimple stmt)
+ccp_fold (gimple *stmt)
{
location_t loc = gimple_location (stmt);
switch (gimple_code (stmt))
case GIMPLE_SWITCH:
{
/* Return the constant switch index. */
- return valueize_op (gimple_switch_index (stmt));
+ return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
}
case GIMPLE_ASSIGN:
case GIMPLE_CALL:
- return gimple_fold_stmt_to_constant_1 (stmt, valueize_op);
+ return gimple_fold_stmt_to_constant_1 (stmt,
+ valueize_op, valueize_op_1);
default:
gcc_unreachable ();
RVAL and RMASK representing a value of type RTYPE and set
the value, mask pair *VAL and *MASK to the result. */
-static void
-bit_value_unop_1 (enum tree_code code, tree type,
- double_int *val, double_int *mask,
- tree rtype, double_int rval, double_int rmask)
+void
+bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
+ widest_int *val, widest_int *mask,
+ signop rtype_sgn, int rtype_precision,
+ const widest_int &rval, const widest_int &rmask)
{
switch (code)
{
case NEGATE_EXPR:
{
- double_int temv, temm;
+ widest_int temv, temm;
/* Return ~rval + 1. */
- bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
- bit_value_binop_1 (PLUS_EXPR, type, val, mask,
- type, temv, temm,
- type, double_int_one, double_int_zero);
+ bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
+ type_sgn, type_precision, rval, rmask);
+ bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
+ type_sgn, type_precision, temv, temm,
+ type_sgn, type_precision, 1, 0);
break;
}
CASE_CONVERT:
{
- bool uns;
-
/* First extend mask and value according to the original type. */
- uns = TYPE_UNSIGNED (rtype);
- *mask = rmask.ext (TYPE_PRECISION (rtype), uns);
- *val = rval.ext (TYPE_PRECISION (rtype), uns);
+ *mask = wi::ext (rmask, rtype_precision, rtype_sgn);
+ *val = wi::ext (rval, rtype_precision, rtype_sgn);
/* Then extend mask and value according to the target type. */
- uns = TYPE_UNSIGNED (type);
- *mask = (*mask).ext (TYPE_PRECISION (type), uns);
- *val = (*val).ext (TYPE_PRECISION (type), uns);
+ *mask = wi::ext (*mask, type_precision, type_sgn);
+ *val = wi::ext (*val, type_precision, type_sgn);
break;
}
default:
- *mask = double_int_minus_one;
+ *mask = -1;
break;
}
}
R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
-static void
-bit_value_binop_1 (enum tree_code code, tree type,
- double_int *val, double_int *mask,
- tree r1type, double_int r1val, double_int r1mask,
- tree r2type, double_int r2val, double_int r2mask)
+void
+bit_value_binop (enum tree_code code, signop sgn, int width,
+ widest_int *val, widest_int *mask,
+ signop r1type_sgn, int r1type_precision,
+ const widest_int &r1val, const widest_int &r1mask,
+ signop r2type_sgn, int r2type_precision,
+ const widest_int &r2val, const widest_int &r2mask)
{
- bool uns = TYPE_UNSIGNED (type);
- /* Assume we'll get a constant result. Use an initial varying value,
- we fall back to varying in the end if necessary. */
- *mask = double_int_minus_one;
+ bool swap_p = false;
+
+ /* Assume we'll get a constant result. Use an initial non varying
+ value, we fall back to varying in the end if necessary. */
+ *mask = -1;
+
switch (code)
{
case BIT_AND_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
- if (r2mask.is_zero ())
+ if (r2mask == 0)
{
- HOST_WIDE_INT shift = r2val.low;
- if (code == RROTATE_EXPR)
- shift = -shift;
- *mask = r1mask.lrotate (shift, TYPE_PRECISION (type));
- *val = r1val.lrotate (shift, TYPE_PRECISION (type));
+ widest_int shift = r2val;
+ if (shift == 0)
+ {
+ *mask = r1mask;
+ *val = r1val;
+ }
+ else
+ {
+ if (wi::neg_p (shift))
+ {
+ shift = -shift;
+ if (code == RROTATE_EXPR)
+ code = LROTATE_EXPR;
+ else
+ code = RROTATE_EXPR;
+ }
+ if (code == RROTATE_EXPR)
+ {
+ *mask = wi::rrotate (r1mask, shift, width);
+ *val = wi::rrotate (r1val, shift, width);
+ }
+ else
+ {
+ *mask = wi::lrotate (r1mask, shift, width);
+ *val = wi::lrotate (r1val, shift, width);
+ }
+ }
}
break;
/* ??? We can handle partially known shift counts if we know
its sign. That way we can tell that (x << (y | 8)) & 255
is zero. */
- if (r2mask.is_zero ())
+ if (r2mask == 0)
{
- HOST_WIDE_INT shift = r2val.low;
- if (code == RSHIFT_EXPR)
- shift = -shift;
- /* We need to know if we are doing a left or a right shift
- to properly shift in zeros for left shift and unsigned
- right shifts and the sign bit for signed right shifts.
- For signed right shifts we shift in varying in case
- the sign bit was varying. */
- if (shift > 0)
- {
- *mask = r1mask.llshift (shift, TYPE_PRECISION (type));
- *val = r1val.llshift (shift, TYPE_PRECISION (type));
- }
- else if (shift < 0)
+ widest_int shift = r2val;
+ if (shift == 0)
{
- shift = -shift;
- *mask = r1mask.rshift (shift, TYPE_PRECISION (type), !uns);
- *val = r1val.rshift (shift, TYPE_PRECISION (type), !uns);
+ *mask = r1mask;
+ *val = r1val;
}
else
{
- *mask = r1mask;
- *val = r1val;
+ if (wi::neg_p (shift))
+ {
+ shift = -shift;
+ if (code == RSHIFT_EXPR)
+ code = LSHIFT_EXPR;
+ else
+ code = RSHIFT_EXPR;
+ }
+ if (code == RSHIFT_EXPR)
+ {
+ *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
+ *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
+ }
+ else
+ {
+ *mask = wi::ext (r1mask << shift, width, sgn);
+ *val = wi::ext (r1val << shift, width, sgn);
+ }
}
}
break;
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
{
- double_int lo, hi;
/* Do the addition with unknown bits set to zero, to give carry-ins of
zero wherever possible. */
- lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
- lo = lo.ext (TYPE_PRECISION (type), uns);
+ widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
+ lo = wi::ext (lo, width, sgn);
/* Do the addition with unknown bits set to one, to give carry-ins of
one wherever possible. */
- hi = (r1val | r1mask) + (r2val | r2mask);
- hi = hi.ext (TYPE_PRECISION (type), uns);
+ widest_int hi = (r1val | r1mask) + (r2val | r2mask);
+ hi = wi::ext (hi, width, sgn);
/* Each bit in the result is known if (a) the corresponding bits in
both inputs are known, and (b) the carry-in to that bit position
is known. We can check condition (b) by seeing if we got the same
result with minimised carries as with maximised carries. */
*mask = r1mask | r2mask | (lo ^ hi);
- *mask = (*mask).ext (TYPE_PRECISION (type), uns);
+ *mask = wi::ext (*mask, width, sgn);
/* It shouldn't matter whether we choose lo or hi here. */
*val = lo;
break;
case MINUS_EXPR:
{
- double_int temv, temm;
- bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
- r2type, r2val, r2mask);
- bit_value_binop_1 (PLUS_EXPR, type, val, mask,
- r1type, r1val, r1mask,
- r2type, temv, temm);
+ widest_int temv, temm;
+ bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
+ r2type_sgn, r2type_precision, r2val, r2mask);
+ bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
+ r1type_sgn, r1type_precision, r1val, r1mask,
+ r2type_sgn, r2type_precision, temv, temm);
break;
}
{
/* Just track trailing zeros in both operands and transfer
them to the other. */
- int r1tz = (r1val | r1mask).trailing_zeros ();
- int r2tz = (r2val | r2mask).trailing_zeros ();
- if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
+ int r1tz = wi::ctz (r1val | r1mask);
+ int r2tz = wi::ctz (r2val | r2mask);
+ if (r1tz + r2tz >= width)
{
- *mask = double_int_zero;
- *val = double_int_zero;
+ *mask = 0;
+ *val = 0;
}
else if (r1tz + r2tz > 0)
{
- *mask = ~double_int::mask (r1tz + r2tz);
- *mask = (*mask).ext (TYPE_PRECISION (type), uns);
- *val = double_int_zero;
+ *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
+ width, sgn);
+ *val = 0;
}
break;
}
case EQ_EXPR:
case NE_EXPR:
{
- double_int m = r1mask | r2mask;
+ widest_int m = r1mask | r2mask;
if (r1val.and_not (m) != r2val.and_not (m))
{
- *mask = double_int_zero;
- *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
+ *mask = 0;
+ *val = ((code == EQ_EXPR) ? 0 : 1);
}
else
{
/* We know the result of a comparison is always one or zero. */
- *mask = double_int_one;
- *val = double_int_zero;
+ *mask = 1;
+ *val = 0;
}
break;
}
case GE_EXPR:
case GT_EXPR:
- {
- double_int tem = r1val;
- r1val = r2val;
- r2val = tem;
- tem = r1mask;
- r1mask = r2mask;
- r2mask = tem;
- code = swap_tree_comparison (code);
- }
- /* Fallthru. */
+ swap_p = true;
+ code = swap_tree_comparison (code);
+ /* Fall through. */
case LT_EXPR:
case LE_EXPR:
{
int minmax, maxmin;
+
+ const widest_int &o1val = swap_p ? r2val : r1val;
+ const widest_int &o1mask = swap_p ? r2mask : r1mask;
+ const widest_int &o2val = swap_p ? r1val : r2val;
+ const widest_int &o2mask = swap_p ? r1mask : r2mask;
+
/* If the most significant bits are not known we know nothing. */
- if (r1mask.is_negative () || r2mask.is_negative ())
+ if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
break;
/* For comparisons the signedness is in the comparison operands. */
- uns = TYPE_UNSIGNED (r1type);
+ sgn = r1type_sgn;
/* If we know the most significant bits we know the values
value ranges by means of treating varying bits as zero
or one. Do a cross comparison of the max/min pairs. */
- maxmin = (r1val | r1mask).cmp (r2val.and_not (r2mask), uns);
- minmax = r1val.and_not (r1mask).cmp (r2val | r2mask, uns);
- if (maxmin < 0) /* r1 is less than r2. */
+ maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
+ minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
+ if (maxmin < 0) /* o1 is less than o2. */
{
- *mask = double_int_zero;
- *val = double_int_one;
+ *mask = 0;
+ *val = 1;
}
- else if (minmax > 0) /* r1 is not less or equal to r2. */
+ else if (minmax > 0) /* o1 is not less or equal to o2. */
{
- *mask = double_int_zero;
- *val = double_int_zero;
+ *mask = 0;
+ *val = 0;
}
- else if (maxmin == minmax) /* r1 and r2 are equal. */
+ else if (maxmin == minmax) /* o1 and o2 are equal. */
{
/* This probably should never happen as we'd have
folded the thing during fully constant value folding. */
- *mask = double_int_zero;
- *val = (code == LE_EXPR ? double_int_one : double_int_zero);
+ *mask = 0;
+ *val = (code == LE_EXPR ? 1 : 0);
}
else
{
/* We know the result of a comparison is always one or zero. */
- *mask = double_int_one;
- *val = double_int_zero;
+ *mask = 1;
+ *val = 0;
}
break;
}
/* Return the propagation value when applying the operation CODE to
the value RHS yielding type TYPE. */
-static prop_value_t
+static ccp_prop_value_t
bit_value_unop (enum tree_code code, tree type, tree rhs)
{
- prop_value_t rval = get_value_for_expr (rhs, true);
- double_int value, mask;
- prop_value_t val;
+ ccp_prop_value_t rval = get_value_for_expr (rhs, true);
+ widest_int value, mask;
+ ccp_prop_value_t val;
if (rval.lattice_val == UNDEFINED)
return rval;
gcc_assert ((rval.lattice_val == CONSTANT
&& TREE_CODE (rval.value) == INTEGER_CST)
- || rval.mask.is_minus_one ());
- bit_value_unop_1 (code, type, &value, &mask,
- TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
- if (!mask.is_minus_one ())
+ || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
+ bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
+ TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
+ value_to_wide_int (rval), rval.mask);
+ if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
{
val.lattice_val = CONSTANT;
val.mask = mask;
/* ??? Delay building trees here. */
- val.value = double_int_to_tree (type, value);
+ val.value = wide_int_to_tree (type, value);
}
else
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
return val;
}
/* Return the propagation value when applying the operation CODE to
the values RHS1 and RHS2 yielding type TYPE. */
-static prop_value_t
+static ccp_prop_value_t
bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
{
- prop_value_t r1val = get_value_for_expr (rhs1, true);
- prop_value_t r2val = get_value_for_expr (rhs2, true);
- double_int value, mask;
- prop_value_t val;
+ ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
+ ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
+ widest_int value, mask;
+ ccp_prop_value_t val;
if (r1val.lattice_val == UNDEFINED
|| r2val.lattice_val == UNDEFINED)
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
return val;
}
gcc_assert ((r1val.lattice_val == CONSTANT
&& TREE_CODE (r1val.value) == INTEGER_CST)
- || r1val.mask.is_minus_one ());
+ || wi::sext (r1val.mask,
+ TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
gcc_assert ((r2val.lattice_val == CONSTANT
&& TREE_CODE (r2val.value) == INTEGER_CST)
- || r2val.mask.is_minus_one ());
- bit_value_binop_1 (code, type, &value, &mask,
- TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
- TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
- if (!mask.is_minus_one ())
+ || wi::sext (r2val.mask,
+ TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
+ bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
+ TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
+ value_to_wide_int (r1val), r1val.mask,
+ TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
+ value_to_wide_int (r2val), r2val.mask);
+
+ if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
{
val.lattice_val = CONSTANT;
val.mask = mask;
/* ??? Delay building trees here. */
- val.value = double_int_to_tree (type, value);
+ val.value = wide_int_to_tree (type, value);
}
else
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
return val;
}
is false, for alloc_aligned attribute ATTR is non-NULL and
ALLOC_ALIGNED is true. */
-static prop_value_t
-bit_value_assume_aligned (gimple stmt, tree attr, prop_value_t ptrval,
+static ccp_prop_value_t
+bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
bool alloc_aligned)
{
tree align, misalign = NULL_TREE, type;
unsigned HOST_WIDE_INT aligni, misaligni = 0;
- prop_value_t alignval;
- double_int value, mask;
- prop_value_t val;
+ ccp_prop_value_t alignval;
+ widest_int value, mask;
+ ccp_prop_value_t val;
if (attr == NULL_TREE)
{
return ptrval;
gcc_assert ((ptrval.lattice_val == CONSTANT
&& TREE_CODE (ptrval.value) == INTEGER_CST)
- || ptrval.mask.is_minus_one ());
+ || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
if (attr == NULL_TREE)
{
/* Get aligni and misaligni from __builtin_assume_aligned. */
align = build_int_cst_type (type, -aligni);
alignval = get_value_for_expr (align, true);
- bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
- type, value_to_double_int (ptrval), ptrval.mask,
- type, value_to_double_int (alignval), alignval.mask);
- if (!mask.is_minus_one ())
+ bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
+ TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
+ TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
+
+ if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
{
val.lattice_val = CONSTANT;
val.mask = mask;
- gcc_assert ((mask.low & (aligni - 1)) == 0);
- gcc_assert ((value.low & (aligni - 1)) == 0);
- value.low |= misaligni;
+ gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
+ gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
+ value |= misaligni;
/* ??? Delay building trees here. */
- val.value = double_int_to_tree (type, value);
+ val.value = wide_int_to_tree (type, value);
}
else
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
return val;
}
/* Evaluate statement STMT.
Valid only for assignments, calls, conditionals, and switches. */
-static prop_value_t
-evaluate_stmt (gimple stmt)
+static ccp_prop_value_t
+evaluate_stmt (gimple *stmt)
{
- prop_value_t val;
+ ccp_prop_value_t val;
tree simplified = NULL_TREE;
ccp_lattice_t likelyvalue = likely_value (stmt);
bool is_constant = false;
{
fold_defer_overflow_warnings ();
simplified = ccp_fold (stmt);
+ if (simplified
+ && TREE_CODE (simplified) == SSA_NAME)
+ {
+ /* We may not use values of something that may be simulated again,
+ see valueize_op_1. */
+ if (SSA_NAME_IS_DEFAULT_DEF (simplified)
+ || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
+ {
+ ccp_prop_value_t *val = get_value (simplified);
+ if (val && val->lattice_val != VARYING)
+ {
+ fold_undefer_overflow_warnings (true, stmt, 0);
+ return *val;
+ }
+ }
+ else
+ /* We may also not place a non-valueized copy in the lattice
+ as that might become stale if we never re-visit this stmt. */
+ simplified = NULL_TREE;
+ }
is_constant = simplified && is_gimple_min_invariant (simplified);
fold_undefer_overflow_warnings (is_constant, stmt, 0);
if (is_constant)
/* The statement produced a constant value. */
val.lattice_val = CONSTANT;
val.value = simplified;
- val.mask = double_int_zero;
+ val.mask = 0;
+ return val;
}
}
/* If the statement is likely to have a VARYING result, then do not
simplified = gimple_assign_rhs1 (stmt);
}
else if (code == GIMPLE_SWITCH)
- simplified = gimple_switch_index (stmt);
+ simplified = gimple_switch_index (as_a <gswitch *> (stmt));
else
/* These cannot satisfy is_gimple_min_invariant without folding. */
gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
/* The statement produced a constant value. */
val.lattice_val = CONSTANT;
val.value = simplified;
- val.mask = double_int_zero;
+ val.mask = 0;
}
}
+ /* If the statement result is likely UNDEFINED, make it so. */
+ else if (likelyvalue == UNDEFINED)
+ {
+ val.lattice_val = UNDEFINED;
+ val.value = NULL_TREE;
+ val.mask = 0;
+ return val;
+ }
/* Resort to simplification for bitwise tracking. */
if (flag_tree_bit_ccp
- && (likelyvalue == CONSTANT || is_gimple_call (stmt))
+ && (likelyvalue == CONSTANT || is_gimple_call (stmt)
+ || (gimple_assign_single_p (stmt)
+ && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
&& !is_constant)
{
enum gimple_code code = gimple_code (stmt);
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
if (code == GIMPLE_ASSIGN)
{
enum tree_code subcode = gimple_assign_rhs_code (stmt);
tree rhs1 = gimple_assign_rhs1 (stmt);
- switch (get_gimple_rhs_class (subcode))
- {
- case GIMPLE_SINGLE_RHS:
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
- || POINTER_TYPE_P (TREE_TYPE (rhs1)))
- val = get_value_for_expr (rhs1, true);
- break;
+ tree lhs = gimple_assign_lhs (stmt);
+ if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ || POINTER_TYPE_P (TREE_TYPE (lhs)))
+ && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+ || POINTER_TYPE_P (TREE_TYPE (rhs1))))
+ switch (get_gimple_rhs_class (subcode))
+ {
+ case GIMPLE_SINGLE_RHS:
+ val = get_value_for_expr (rhs1, true);
+ break;
- case GIMPLE_UNARY_RHS:
- if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
- || POINTER_TYPE_P (TREE_TYPE (rhs1)))
- && (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
- || POINTER_TYPE_P (gimple_expr_type (stmt))))
- val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
- break;
+ case GIMPLE_UNARY_RHS:
+ val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
+ break;
- case GIMPLE_BINARY_RHS:
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
- || POINTER_TYPE_P (TREE_TYPE (rhs1)))
- {
- tree lhs = gimple_assign_lhs (stmt);
- tree rhs2 = gimple_assign_rhs2 (stmt);
- val = bit_value_binop (subcode,
- TREE_TYPE (lhs), rhs1, rhs2);
- }
- break;
+ case GIMPLE_BINARY_RHS:
+ val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
+ gimple_assign_rhs2 (stmt));
+ break;
- default:;
- }
+ default:;
+ }
}
else if (code == GIMPLE_COND)
{
case BUILT_IN_STRNDUP:
val.lattice_val = CONSTANT;
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
- val.mask = double_int::from_shwi
- (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
- / BITS_PER_UNIT - 1));
+ val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
+ / BITS_PER_UNIT - 1);
break;
case BUILT_IN_ALLOCA:
: BIGGEST_ALIGNMENT);
val.lattice_val = CONSTANT;
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
- val.mask = double_int::from_shwi (~(((HOST_WIDE_INT) align)
- / BITS_PER_UNIT - 1));
+ val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
break;
/* These builtins return their first argument, unmodified. */
val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
break;
+ case BUILT_IN_ALIGNED_ALLOC:
+ {
+ tree align = get_constant_value (gimple_call_arg (stmt, 0));
+ if (align
+ && tree_fits_uhwi_p (align))
+ {
+ unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
+ if (aligni > 1
+ /* align must be power-of-two */
+ && (aligni & (aligni - 1)) == 0)
+ {
+ val.lattice_val = CONSTANT;
+ val.value = build_int_cst (ptr_type_node, 0);
+ val.mask = -aligni;
+ }
+ }
+ break;
+ }
+
default:;
}
}
if (flag_tree_bit_ccp
&& ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
- || (!is_constant && likelyvalue != UNDEFINED))
+ || !is_constant)
&& gimple_get_lhs (stmt)
&& TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
{
tree lhs = gimple_get_lhs (stmt);
- double_int nonzero_bits = get_nonzero_bits (lhs);
- double_int mask = double_int::mask (TYPE_PRECISION (TREE_TYPE (lhs)));
- if (nonzero_bits != double_int_minus_one && nonzero_bits != mask)
+ wide_int nonzero_bits = get_nonzero_bits (lhs);
+ if (nonzero_bits != -1)
{
if (!is_constant)
{
val.lattice_val = CONSTANT;
val.value = build_zero_cst (TREE_TYPE (lhs));
- /* CCP wants the bits above precision set. */
- val.mask = nonzero_bits | ~mask;
+ val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
is_constant = true;
}
else
{
- double_int valv = tree_to_double_int (val.value);
- if (!(valv & ~nonzero_bits & mask).is_zero ())
- val.value = double_int_to_tree (TREE_TYPE (lhs),
- valv & nonzero_bits);
- if (nonzero_bits.is_zero ())
- val.mask = double_int_zero;
+ if (wi::bit_and_not (val.value, nonzero_bits) != 0)
+ val.value = wide_int_to_tree (TREE_TYPE (lhs),
+ nonzero_bits & val.value);
+ if (nonzero_bits == 0)
+ val.mask = 0;
else
- val.mask = val.mask & (nonzero_bits | ~mask);
+ val.mask = val.mask & extend_mask (nonzero_bits,
+ TYPE_SIGN (TREE_TYPE (lhs)));
}
}
}
+ /* The statement produced a nonconstant value. */
if (!is_constant)
{
- /* The statement produced a nonconstant value. If the statement
- had UNDEFINED operands, then the result of the statement
- should be UNDEFINED. Otherwise, the statement is VARYING. */
- if (likelyvalue == UNDEFINED)
+ /* The statement produced a copy. */
+ if (simplified && TREE_CODE (simplified) == SSA_NAME
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
{
- val.lattice_val = likelyvalue;
- val.mask = double_int_zero;
+ val.lattice_val = CONSTANT;
+ val.value = simplified;
+ val.mask = -1;
}
+ /* The statement is VARYING. */
else
{
val.lattice_val = VARYING;
- val.mask = double_int_minus_one;
+ val.value = NULL_TREE;
+ val.mask = -1;
}
-
- val.value = NULL_TREE;
}
return val;
}
-typedef hash_table <pointer_hash <gimple_statement_base> > gimple_htab;
+typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
/* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
static void
insert_clobber_before_stack_restore (tree saved_val, tree var,
- gimple_htab *visited)
+ gimple_htab **visited)
{
- gimple stmt, clobber_stmt;
+ gimple *stmt;
+ gassign *clobber_stmt;
tree clobber;
imm_use_iterator iter;
gimple_stmt_iterator i;
- gimple *slot;
+ gimple **slot;
FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
}
else if (gimple_code (stmt) == GIMPLE_PHI)
{
- if (!visited->is_created ())
- visited->create (10);
+ if (!*visited)
+ *visited = new gimple_htab (10);
- slot = visited->find_slot (stmt, INSERT);
+ slot = (*visited)->find_slot (stmt, INSERT);
if (*slot != NULL)
continue;
else if (gimple_assign_ssa_name_copy_p (stmt))
insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
visited);
+ else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
+ continue;
else
gcc_assert (is_gimple_debug (stmt));
}
static void
insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
{
- gimple stmt;
+ gimple *stmt;
tree saved_val;
- gimple_htab visited;
+ gimple_htab *visited = NULL;
for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
{
break;
}
- if (visited.is_created ())
- visited.dispose ();
+ delete visited;
}
/* Detects a __builtin_alloca_with_align with constant size argument. Declares
NULL_TREE. */
static tree
-fold_builtin_alloca_with_align (gimple stmt)
+fold_builtin_alloca_with_align (gimple *stmt)
{
unsigned HOST_WIDE_INT size, threshold, n_elem;
tree lhs, arg, block, var, elem_type, array_type;
as a declared array, so we allow a larger size. */
block = gimple_block (stmt);
if (!(cfun->after_inlining
+ && block
&& TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
threshold /= 10;
if (size > threshold)
elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
n_elem = size * 8 / BITS_PER_UNIT;
array_type = build_array_type_nelts (elem_type, n_elem);
- var = create_tmp_var (array_type, NULL);
- DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
+ var = create_tmp_var (array_type);
+ SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
{
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
if (pi != NULL && !pi->pt.anything)
{
bool singleton_p;
unsigned uid;
- singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
+ singleton_p = pt_solution_singleton_or_null_p (&pi->pt, &uid);
gcc_assert (singleton_p);
SET_DECL_PT_UID (var, uid);
}
static bool
ccp_fold_stmt (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
switch (gimple_code (stmt))
{
case GIMPLE_COND:
{
- prop_value_t val;
+ gcond *cond_stmt = as_a <gcond *> (stmt);
+ ccp_prop_value_t val;
/* Statement evaluation will handle type mismatches in constants
more gracefully than the final propagation. This allows us to
fold more conditionals here. */
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || !val.mask.is_zero ())
+ || val.mask != 0)
return false;
if (dump_file)
}
if (integer_zerop (val.value))
- gimple_cond_make_false (stmt);
+ gimple_cond_make_false (cond_stmt);
else
- gimple_cond_make_true (stmt);
+ gimple_cond_make_true (cond_stmt);
return true;
}
are handled here. */
static enum ssa_prop_result
-visit_assignment (gimple stmt, tree *output_p)
+visit_assignment (gimple *stmt, tree *output_p)
{
- prop_value_t val;
- enum ssa_prop_result retval;
+ ccp_prop_value_t val;
+ enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
tree lhs = gimple_get_lhs (stmt);
-
- gcc_assert (gimple_code (stmt) != GIMPLE_CALL
- || gimple_call_lhs (stmt) != NULL_TREE);
-
- if (gimple_assign_single_p (stmt)
- && gimple_assign_rhs_code (stmt) == SSA_NAME)
- /* For a simple copy operation, we copy the lattice values. */
- val = *get_value (gimple_assign_rhs1 (stmt));
- else
- /* Evaluate the statement, which could be
- either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
- val = evaluate_stmt (stmt);
-
- retval = SSA_PROP_NOT_INTERESTING;
-
- /* Set the lattice value of the statement's output. */
if (TREE_CODE (lhs) == SSA_NAME)
{
+ /* Evaluate the statement, which could be
+ either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
+ val = evaluate_stmt (stmt);
+
/* If STMT is an assignment to an SSA_NAME, we only have one
value to set. */
- if (set_lattice_value (lhs, val))
+ if (set_lattice_value (lhs, &val))
{
*output_p = lhs;
if (val.lattice_val == VARYING)
SSA_PROP_VARYING. */
static enum ssa_prop_result
-visit_cond_stmt (gimple stmt, edge *taken_edge_p)
+visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
{
- prop_value_t val;
+ ccp_prop_value_t val;
basic_block block;
block = gimple_bb (stmt);
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || !val.mask.is_zero ())
+ || val.mask != 0)
return SSA_PROP_VARYING;
/* Find which edge out of the conditional block will be taken and add it
value, return SSA_PROP_VARYING. */
static enum ssa_prop_result
-ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
+ccp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
{
tree def;
ssa_op_iter iter;
SSA_NAMEs represent unknown modifications to their outputs.
Mark them VARYING. */
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
- {
- prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
- set_lattice_value (def, v);
- }
+ set_value_varying (def);
return SSA_PROP_VARYING;
}
-/* Main entry point for SSA Conditional Constant Propagation. */
+/* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
+ record nonzero bits. */
static unsigned int
-do_ssa_ccp (void)
+do_ssa_ccp (bool nonzero_p)
{
unsigned int todo = 0;
calculate_dominance_info (CDI_DOMINATORS);
+
ccp_initialize ();
ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
- if (ccp_finalize ())
- todo = (TODO_cleanup_cfg | TODO_update_ssa);
- free_dominance_info (CDI_DOMINATORS);
- return todo;
-}
+ if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
+ {
+ todo = (TODO_cleanup_cfg | TODO_update_ssa);
+ /* ccp_finalize does not preserve loop-closed ssa. */
+ loops_state_clear (LOOP_CLOSED_SSA);
+ }
-static bool
-gate_ccp (void)
-{
- return flag_tree_ccp != 0;
+ free_dominance_info (CDI_DOMINATORS);
+ return todo;
}
GIMPLE_PASS, /* type */
"ccp", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_execute */
TV_TREE_CCP, /* tv_id */
( PROP_cfg | PROP_ssa ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- ( TODO_verify_ssa | TODO_update_address_taken
- | TODO_verify_stmts ), /* todo_flags_finish */
+ TODO_update_address_taken, /* todo_flags_finish */
};
class pass_ccp : public gimple_opt_pass
{
public:
pass_ccp (gcc::context *ctxt)
- : gimple_opt_pass (pass_data_ccp, ctxt)
+ : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
{}
/* opt_pass methods: */
opt_pass * clone () { return new pass_ccp (m_ctxt); }
- bool gate () { return gate_ccp (); }
- unsigned int execute () { return do_ssa_ccp (); }
+ void set_pass_param (unsigned int n, bool param)
+ {
+ gcc_assert (n == 0);
+ nonzero_p = param;
+ }
+ virtual bool gate (function *) { return flag_tree_ccp != 0; }
+ virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
+ private:
+ /* Determines whether the pass instance records nonzero bits. */
+ bool nonzero_p;
}; // class pass_ccp
} // anon namespace
optimize_stack_restore (gimple_stmt_iterator i)
{
tree callee;
- gimple stmt;
+ gimple *stmt;
basic_block bb = gsi_bb (i);
- gimple call = gsi_stmt (i);
+ gimple *call = gsi_stmt (i);
if (gimple_code (call) != GIMPLE_CALL
|| gimple_call_num_args (call) != 1
or not is irrelevant to removing the call to __builtin_stack_restore. */
if (has_single_use (gimple_call_arg (call, 0)))
{
- gimple stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
+ gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
if (is_gimple_call (stack_save))
{
callee = gimple_call_fndecl (stack_save);
pointer assignment. */
static tree
-optimize_stdarg_builtin (gimple call)
+optimize_stdarg_builtin (gimple *call)
{
tree callee, lhs, rhs, cfun_va_list;
bool va_list_simple_ptr;
{
basic_block bb = gsi_bb (i);
gimple_stmt_iterator gsi;
- gimple stmt;
+ gimple *stmt;
edge_iterator ei;
edge e;
bool ret;
+ if (flag_sanitize & SANITIZE_UNREACHABLE)
+ return false;
+
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
- if (gimple_code (stmt) == GIMPLE_LABEL)
+ if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
{
/* Verify we do not need to preserve the label. */
- if (FORCED_LABEL (gimple_label_label (stmt)))
+ if (FORCED_LABEL (gimple_label_label (label_stmt)))
return false;
continue;
continue;
stmt = gsi_stmt (gsi);
- if (gimple_code (stmt) == GIMPLE_COND)
+ if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
{
if (e->flags & EDGE_TRUE_VALUE)
- gimple_cond_make_false (stmt);
+ gimple_cond_make_false (cond_stmt);
else if (e->flags & EDGE_FALSE_VALUE)
- gimple_cond_make_true (stmt);
+ gimple_cond_make_true (cond_stmt);
else
gcc_unreachable ();
- update_stmt (stmt);
+ update_stmt (cond_stmt);
}
else
{
- /* Todo: handle other cases, f.i. switch statement. */
+ /* Todo: handle other cases. Note that unreachable switch case
+ statements have already been removed. */
continue;
}
return ret;
}
+/* Optimize
+ mask_2 = 1 << cnt_1;
+ _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
+ _5 = _4 & mask_2;
+ to
+ _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
+ _5 = _4;
+ If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
+ is passed instead of 0, and the builtin just returns a zero
+ or 1 value instead of the actual bit.
+ Similarly for __sync_fetch_and_or_* (without the ", _3" part
+ in there), and/or if mask_2 is a power of 2 constant.
+ Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
+ in that case. And similarly for and instead of or, except that
+ the second argument to the builtin needs to be one's complement
+ of the mask instead of mask. */
+
+static void
+optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
+ enum internal_fn fn, bool has_model_arg,
+ bool after)
+{
+ gimple *call = gsi_stmt (*gsip);
+ tree lhs = gimple_call_lhs (call);
+ use_operand_p use_p;
+ gimple *use_stmt;
+ tree mask, bit;
+ optab optab;
+
+ if (!flag_inline_atomics
+ || optimize_debug
+ || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
+ || !lhs
+ || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
+ || !single_imm_use (lhs, &use_p, &use_stmt)
+ || !is_gimple_assign (use_stmt)
+ || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
+ || !gimple_vdef (call))
+ return;
+
+ switch (fn)
+ {
+ case IFN_ATOMIC_BIT_TEST_AND_SET:
+ optab = atomic_bit_test_and_set_optab;
+ break;
+ case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
+ optab = atomic_bit_test_and_complement_optab;
+ break;
+ case IFN_ATOMIC_BIT_TEST_AND_RESET:
+ optab = atomic_bit_test_and_reset_optab;
+ break;
+ default:
+ return;
+ }
+
+ if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
+ return;
+
+ mask = gimple_call_arg (call, 1);
+ tree use_lhs = gimple_assign_lhs (use_stmt);
+ if (!use_lhs)
+ return;
+
+ if (TREE_CODE (mask) == INTEGER_CST)
+ {
+ if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
+ mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
+ mask = fold_convert (TREE_TYPE (lhs), mask);
+ int ibit = tree_log2 (mask);
+ if (ibit < 0)
+ return;
+ bit = build_int_cst (TREE_TYPE (lhs), ibit);
+ }
+ else if (TREE_CODE (mask) == SSA_NAME)
+ {
+ gimple *g = SSA_NAME_DEF_STMT (mask);
+ if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
+ {
+ if (!is_gimple_assign (g)
+ || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
+ return;
+ mask = gimple_assign_rhs1 (g);
+ if (TREE_CODE (mask) != SSA_NAME)
+ return;
+ g = SSA_NAME_DEF_STMT (mask);
+ }
+ if (!is_gimple_assign (g)
+ || gimple_assign_rhs_code (g) != LSHIFT_EXPR
+ || !integer_onep (gimple_assign_rhs1 (g)))
+ return;
+ bit = gimple_assign_rhs2 (g);
+ }
+ else
+ return;
+
+ if (gimple_assign_rhs1 (use_stmt) == lhs)
+ {
+ if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
+ return;
+ }
+ else if (gimple_assign_rhs2 (use_stmt) != lhs
+ || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
+ return;
+
+ bool use_bool = true;
+ bool has_debug_uses = false;
+ imm_use_iterator iter;
+ gimple *g;
+
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
+ use_bool = false;
+ FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
+ {
+ enum tree_code code = ERROR_MARK;
+ tree op0 = NULL_TREE, op1 = NULL_TREE;
+ if (is_gimple_debug (g))
+ {
+ has_debug_uses = true;
+ continue;
+ }
+ else if (is_gimple_assign (g))
+ switch (gimple_assign_rhs_code (g))
+ {
+ case COND_EXPR:
+ op1 = gimple_assign_rhs1 (g);
+ code = TREE_CODE (op1);
+ op0 = TREE_OPERAND (op1, 0);
+ op1 = TREE_OPERAND (op1, 1);
+ break;
+ case EQ_EXPR:
+ case NE_EXPR:
+ code = gimple_assign_rhs_code (g);
+ op0 = gimple_assign_rhs1 (g);
+ op1 = gimple_assign_rhs2 (g);
+ break;
+ default:
+ break;
+ }
+ else if (gimple_code (g) == GIMPLE_COND)
+ {
+ code = gimple_cond_code (g);
+ op0 = gimple_cond_lhs (g);
+ op1 = gimple_cond_rhs (g);
+ }
+
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && op0 == use_lhs
+ && integer_zerop (op1))
+ {
+ use_operand_p use_p;
+ int n = 0;
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ n++;
+ if (n == 1)
+ continue;
+ }
+
+ use_bool = false;
+ BREAK_FROM_IMM_USE_STMT (iter);
+ }
+
+ tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
+ tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
+ if (has_model_arg)
+ g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
+ bit, flag, gimple_call_arg (call, 2));
+ else
+ g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
+ bit, flag);
+ gimple_call_set_lhs (g, new_lhs);
+ gimple_set_location (g, gimple_location (call));
+ gimple_set_vuse (g, gimple_vuse (call));
+ gimple_set_vdef (g, gimple_vdef (call));
+ bool throws = stmt_can_throw_internal (call);
+ gimple_call_set_nothrow (as_a <gcall *> (g),
+ gimple_call_nothrow_p (as_a <gcall *> (call)));
+ SSA_NAME_DEF_STMT (gimple_vdef (call)) = g;
+ gimple_stmt_iterator gsi = *gsip;
+ gsi_insert_after (&gsi, g, GSI_NEW_STMT);
+ edge e = NULL;
+ if (throws)
+ {
+ maybe_clean_or_replace_eh_stmt (call, g);
+ if (after || (use_bool && has_debug_uses))
+ e = find_fallthru_edge (gsi_bb (gsi)->succs);
+ }
+ if (after)
+ {
+ /* The internal function returns the value of the specified bit
+ before the atomic operation. If we are interested in the value
+ of the specified bit after the atomic operation (makes only sense
+ for xor, otherwise the bit content is compile time known),
+ we need to invert the bit. */
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
+ BIT_XOR_EXPR, new_lhs,
+ use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
+ : mask);
+ new_lhs = gimple_assign_lhs (g);
+ if (throws)
+ {
+ gsi_insert_on_edge_immediate (e, g);
+ gsi = gsi_for_stmt (g);
+ }
+ else
+ gsi_insert_after (&gsi, g, GSI_NEW_STMT);
+ }
+ if (use_bool && has_debug_uses)
+ {
+ tree temp = NULL_TREE;
+ if (!throws || after || single_pred_p (e->dest))
+ {
+ temp = make_node (DEBUG_EXPR_DECL);
+ DECL_ARTIFICIAL (temp) = 1;
+ TREE_TYPE (temp) = TREE_TYPE (lhs);
+ SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
+ tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
+ g = gimple_build_debug_bind (temp, t, g);
+ if (throws && !after)
+ {
+ gsi = gsi_after_labels (e->dest);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ }
+ else
+ gsi_insert_after (&gsi, g, GSI_NEW_STMT);
+ }
+ FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
+ if (is_gimple_debug (g))
+ {
+ use_operand_p use_p;
+ if (temp == NULL_TREE)
+ gimple_debug_bind_reset_value (g);
+ else
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, temp);
+ update_stmt (g);
+ }
+ }
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
+ = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
+ replace_uses_by (use_lhs, new_lhs);
+ gsi = gsi_for_stmt (use_stmt);
+ gsi_remove (&gsi, true);
+ release_defs (use_stmt);
+ gsi_remove (gsip, true);
+ release_ssa_name (lhs);
+}
+
+/* Optimize
+ a = {};
+ b = a;
+ into
+ a = {};
+ b = {};
+ Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
+ and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
+
+static void
+optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
+{
+ gimple *stmt = gsi_stmt (*gsip);
+ if (gimple_has_volatile_ops (stmt))
+ return;
+
+ tree vuse = gimple_vuse (stmt);
+ if (vuse == NULL)
+ return;
+
+ gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
+ tree src2 = NULL_TREE, len2 = NULL_TREE;
+ HOST_WIDE_INT offset, offset2;
+ tree val = integer_zero_node;
+ if (gimple_store_p (defstmt)
+ && gimple_assign_single_p (defstmt)
+ && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
+ && !gimple_clobber_p (defstmt))
+ src2 = gimple_assign_lhs (defstmt);
+ else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
+ && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
+ && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
+ {
+ src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
+ len2 = gimple_call_arg (defstmt, 2);
+ val = gimple_call_arg (defstmt, 1);
+ /* For non-0 val, we'd have to transform stmt from assignment
+ into memset (only if dest is addressable). */
+ if (!integer_zerop (val) && is_gimple_assign (stmt))
+ src2 = NULL_TREE;
+ }
+
+ if (src2 == NULL_TREE)
+ return;
+
+ if (len == NULL_TREE)
+ len = (TREE_CODE (src) == COMPONENT_REF
+ ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
+ : TYPE_SIZE_UNIT (TREE_TYPE (src)));
+ if (len2 == NULL_TREE)
+ len2 = (TREE_CODE (src2) == COMPONENT_REF
+ ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
+ : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
+ if (len == NULL_TREE
+ || TREE_CODE (len) != INTEGER_CST
+ || len2 == NULL_TREE
+ || TREE_CODE (len2) != INTEGER_CST)
+ return;
+
+ src = get_addr_base_and_unit_offset (src, &offset);
+ src2 = get_addr_base_and_unit_offset (src2, &offset2);
+ if (src == NULL_TREE
+ || src2 == NULL_TREE
+ || offset < offset2)
+ return;
+
+ if (!operand_equal_p (src, src2, 0))
+ return;
+
+ /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
+ Make sure that
+ [ src + offset, src + offset + len - 1 ] is a subset of that. */
+ if (wi::to_offset (len) + (offset - offset2) > wi::to_offset (len2))
+ return;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Simplified\n ");
+ print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+ fprintf (dump_file, "after previous\n ");
+ print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
+ }
+
+ /* For simplicity, don't change the kind of the stmt,
+ turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
+ into memset (&dest, val, len);
+ In theory we could change dest = src into memset if dest
+ is addressable (maybe beneficial if val is not 0), or
+ memcpy (&dest, &src, len) into dest = {} if len is the size
+ of dest, dest isn't volatile. */
+ if (is_gimple_assign (stmt))
+ {
+ tree ctor = build_constructor (TREE_TYPE (dest), NULL);
+ gimple_assign_set_rhs_from_tree (gsip, ctor);
+ update_stmt (stmt);
+ }
+ else /* If stmt is memcpy, transform it into memset. */
+ {
+ gcall *call = as_a <gcall *> (stmt);
+ tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
+ gimple_call_set_fndecl (call, fndecl);
+ gimple_call_set_fntype (call, TREE_TYPE (fndecl));
+ gimple_call_set_arg (call, 1, val);
+ update_stmt (stmt);
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "into\n ");
+ print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+ }
+}
+
/* A simple pass that attempts to fold all builtin functions. This pass
is run after we've propagated as many constants as we can. */
-static unsigned int
-execute_fold_all_builtins (void)
+namespace {
+
+const pass_data pass_data_fold_builtins =
+{
+ GIMPLE_PASS, /* type */
+ "fab", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ ( PROP_cfg | PROP_ssa ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa, /* todo_flags_finish */
+};
+
+class pass_fold_builtins : public gimple_opt_pass
+{
+public:
+ pass_fold_builtins (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_fold_builtins, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
+ virtual unsigned int execute (function *);
+
+}; // class pass_fold_builtins
+
+unsigned int
+pass_fold_builtins::execute (function *fun)
{
bool cfg_changed = false;
basic_block bb;
unsigned int todoflags = 0;
- FOR_EACH_BB_FN (bb, cfun)
+ FOR_EACH_BB_FN (bb, fun)
{
gimple_stmt_iterator i;
for (i = gsi_start_bb (bb); !gsi_end_p (i); )
{
- gimple stmt, old_stmt;
- tree callee, result;
+ gimple *stmt, *old_stmt;
+ tree callee;
enum built_in_function fcode;
stmt = gsi_stmt (i);
continue;
}
}
+ else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
+ optimize_memcpy (&i, gimple_assign_lhs (stmt),
+ gimple_assign_rhs1 (stmt), NULL_TREE);
gsi_next (&i);
continue;
}
+
callee = gimple_call_fndecl (stmt);
if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
{
gsi_next (&i);
continue;
}
+
fcode = DECL_FUNCTION_CODE (callee);
+ if (fold_stmt (&i))
+ ;
+ else
+ {
+ tree result = NULL_TREE;
+ switch (DECL_FUNCTION_CODE (callee))
+ {
+ case BUILT_IN_CONSTANT_P:
+ /* Resolve __builtin_constant_p. If it hasn't been
+ folded to integer_one_node by now, it's fairly
+ certain that the value simply isn't constant. */
+ result = integer_zero_node;
+ break;
- result = gimple_fold_builtin (stmt);
+ case BUILT_IN_ASSUME_ALIGNED:
+ /* Remove __builtin_assume_aligned. */
+ result = gimple_call_arg (stmt, 0);
+ break;
- if (result)
- gimple_remove_stmt_histograms (cfun, stmt);
+ case BUILT_IN_STACK_RESTORE:
+ result = optimize_stack_restore (i);
+ if (result)
+ break;
+ gsi_next (&i);
+ continue;
- if (!result)
- switch (DECL_FUNCTION_CODE (callee))
- {
- case BUILT_IN_CONSTANT_P:
- /* Resolve __builtin_constant_p. If it hasn't been
- folded to integer_one_node by now, it's fairly
- certain that the value simply isn't constant. */
- result = integer_zero_node;
- break;
+ case BUILT_IN_UNREACHABLE:
+ if (optimize_unreachable (i))
+ cfg_changed = true;
+ break;
- case BUILT_IN_ASSUME_ALIGNED:
- /* Remove __builtin_assume_aligned. */
- result = gimple_call_arg (stmt, 0);
- break;
+ case BUILT_IN_ATOMIC_FETCH_OR_1:
+ case BUILT_IN_ATOMIC_FETCH_OR_2:
+ case BUILT_IN_ATOMIC_FETCH_OR_4:
+ case BUILT_IN_ATOMIC_FETCH_OR_8:
+ case BUILT_IN_ATOMIC_FETCH_OR_16:
+ optimize_atomic_bit_test_and (&i,
+ IFN_ATOMIC_BIT_TEST_AND_SET,
+ true, false);
+ break;
+ case BUILT_IN_SYNC_FETCH_AND_OR_1:
+ case BUILT_IN_SYNC_FETCH_AND_OR_2:
+ case BUILT_IN_SYNC_FETCH_AND_OR_4:
+ case BUILT_IN_SYNC_FETCH_AND_OR_8:
+ case BUILT_IN_SYNC_FETCH_AND_OR_16:
+ optimize_atomic_bit_test_and (&i,
+ IFN_ATOMIC_BIT_TEST_AND_SET,
+ false, false);
+ break;
- case BUILT_IN_STACK_RESTORE:
- result = optimize_stack_restore (i);
- if (result)
+ case BUILT_IN_ATOMIC_FETCH_XOR_1:
+ case BUILT_IN_ATOMIC_FETCH_XOR_2:
+ case BUILT_IN_ATOMIC_FETCH_XOR_4:
+ case BUILT_IN_ATOMIC_FETCH_XOR_8:
+ case BUILT_IN_ATOMIC_FETCH_XOR_16:
+ optimize_atomic_bit_test_and
+ (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
+ break;
+ case BUILT_IN_SYNC_FETCH_AND_XOR_1:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_2:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_4:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_8:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_16:
+ optimize_atomic_bit_test_and
+ (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
break;
- gsi_next (&i);
- continue;
- case BUILT_IN_UNREACHABLE:
- if (optimize_unreachable (i))
- cfg_changed = true;
- break;
+ case BUILT_IN_ATOMIC_XOR_FETCH_1:
+ case BUILT_IN_ATOMIC_XOR_FETCH_2:
+ case BUILT_IN_ATOMIC_XOR_FETCH_4:
+ case BUILT_IN_ATOMIC_XOR_FETCH_8:
+ case BUILT_IN_ATOMIC_XOR_FETCH_16:
+ optimize_atomic_bit_test_and
+ (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
+ break;
+ case BUILT_IN_SYNC_XOR_AND_FETCH_1:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_2:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_4:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_8:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_16:
+ optimize_atomic_bit_test_and
+ (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
+ break;
- case BUILT_IN_VA_START:
- case BUILT_IN_VA_END:
- case BUILT_IN_VA_COPY:
- /* These shouldn't be folded before pass_stdarg. */
- result = optimize_stdarg_builtin (stmt);
- if (result)
+ case BUILT_IN_ATOMIC_FETCH_AND_1:
+ case BUILT_IN_ATOMIC_FETCH_AND_2:
+ case BUILT_IN_ATOMIC_FETCH_AND_4:
+ case BUILT_IN_ATOMIC_FETCH_AND_8:
+ case BUILT_IN_ATOMIC_FETCH_AND_16:
+ optimize_atomic_bit_test_and (&i,
+ IFN_ATOMIC_BIT_TEST_AND_RESET,
+ true, false);
+ break;
+ case BUILT_IN_SYNC_FETCH_AND_AND_1:
+ case BUILT_IN_SYNC_FETCH_AND_AND_2:
+ case BUILT_IN_SYNC_FETCH_AND_AND_4:
+ case BUILT_IN_SYNC_FETCH_AND_AND_8:
+ case BUILT_IN_SYNC_FETCH_AND_AND_16:
+ optimize_atomic_bit_test_and (&i,
+ IFN_ATOMIC_BIT_TEST_AND_RESET,
+ false, false);
break;
- /* FALLTHRU */
- default:
- gsi_next (&i);
- continue;
- }
+ case BUILT_IN_MEMCPY:
+ if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
+ && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
+ && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
+ && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
+ {
+ tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
+ tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
+ tree len = gimple_call_arg (stmt, 2);
+ optimize_memcpy (&i, dest, src, len);
+ }
+ break;
- if (result == NULL_TREE)
- break;
+ case BUILT_IN_VA_START:
+ case BUILT_IN_VA_END:
+ case BUILT_IN_VA_COPY:
+ /* These shouldn't be folded before pass_stdarg. */
+ result = optimize_stdarg_builtin (stmt);
+ break;
+
+ default:;
+ }
+
+ if (!result)
+ {
+ gsi_next (&i);
+ continue;
+ }
+
+ if (!update_call_from_tree (&i, result))
+ gimplify_and_update_call_from_tree (&i, result);
+ }
+
+ todoflags |= TODO_update_address_taken;
if (dump_file && (dump_flags & TDF_DETAILS))
{
}
old_stmt = stmt;
- if (!update_call_from_tree (&i, result))
- {
- gimplify_and_update_call_from_tree (&i, result);
- todoflags |= TODO_update_address_taken;
- }
-
stmt = gsi_stmt (i);
update_stmt (stmt);
return todoflags;
}
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_fold_builtins (gcc::context *ctxt)
+{
+ return new pass_fold_builtins (ctxt);
+}
+
+/* A simple pass that emits some warnings post IPA. */
namespace {
-const pass_data pass_data_fold_builtins =
+const pass_data pass_data_post_ipa_warn =
{
GIMPLE_PASS, /* type */
- "fab", /* name */
+ "post_ipa_warn", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_execute */
TV_NONE, /* tv_id */
( PROP_cfg | PROP_ssa ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- ( TODO_verify_ssa | TODO_update_ssa ), /* todo_flags_finish */
+ 0, /* todo_flags_finish */
};
-class pass_fold_builtins : public gimple_opt_pass
+class pass_post_ipa_warn : public gimple_opt_pass
{
public:
- pass_fold_builtins (gcc::context *ctxt)
- : gimple_opt_pass (pass_data_fold_builtins, ctxt)
+ pass_post_ipa_warn (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
{}
/* opt_pass methods: */
- opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
- unsigned int execute () { return execute_fold_all_builtins (); }
+ opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
+ virtual bool gate (function *) { return warn_nonnull != 0; }
+ virtual unsigned int execute (function *);
}; // class pass_fold_builtins
+unsigned int
+pass_post_ipa_warn::execute (function *fun)
+{
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
+ continue;
+
+ if (warn_nonnull)
+ {
+ bitmap nonnullargs
+ = get_nonnull_args (gimple_call_fntype (stmt));
+ if (nonnullargs)
+ {
+ for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+ if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
+ continue;
+ if (!integer_zerop (arg))
+ continue;
+ if (!bitmap_empty_p (nonnullargs)
+ && !bitmap_bit_p (nonnullargs, i))
+ continue;
+
+ location_t loc = gimple_location (stmt);
+ if (warning_at (loc, OPT_Wnonnull,
+ "argument %u null where non-null "
+ "expected", i + 1))
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ if (fndecl && DECL_IS_BUILTIN (fndecl))
+ inform (loc, "in a call to built-in function %qD",
+ fndecl);
+ else if (fndecl)
+ inform (DECL_SOURCE_LOCATION (fndecl),
+ "in a call to function %qD declared here",
+ fndecl);
+
+ }
+ }
+ BITMAP_FREE (nonnullargs);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
} // anon namespace
gimple_opt_pass *
-make_pass_fold_builtins (gcc::context *ctxt)
+make_pass_post_ipa_warn (gcc::context *ctxt)
{
- return new pass_fold_builtins (ctxt);
+ return new pass_post_ipa_warn (ctxt);
}