X-Git-Url: http://git.ipfire.org/?a=blobdiff_plain;f=gcc%2Ftree-ssa-ccp.c;h=a501d2cf03fd7fb8783620d7782818f3fb6d04a2;hb=818ab71a415cd234be092111a0aa5e812ec56434;hp=7f0c407caa8a99f486a2bb414650f299dc6d8f83;hpb=c38cdd3f2235f07770a46708c86246986a3c12ae;p=thirdparty%2Fgcc.git diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 7f0c407caa8a..a501d2cf03fd 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -1,6 +1,5 @@ /* Conditional constant propagation pass for the GNU compiler. - Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, - 2010, 2011, 2012 Free Software Foundation, Inc. + Copyright (C) 2000-2016 Free Software Foundation, Inc. Adapted from original RTL SSA-CCP by Daniel Berlin Adapted to GIMPLE trees by Diego Novillo @@ -99,6 +98,15 @@ along with GCC; see the file COPYING3. If not see array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for final substitution and folding. + This algorithm uses wide-ints at the max precision of the target. + This means that, with one uninteresting exception, variables with + UNSIGNED types never go to VARYING because the bits above the + precision of the type of the variable are always zero. The + uninteresting case is a variable of UNSIGNED type that has the + maximum precision of the target. Such variables can go to VARYING, + but this causes no loss of infomation since these variables will + never be extended. + References: Constant propagation with conditional branches, @@ -113,24 +121,25 @@ along with GCC; see the file COPYING3. If not see #include "config.h" #include "system.h" #include "coretypes.h" -#include "tm.h" +#include "backend.h" +#include "target.h" #include "tree.h" -#include "flags.h" -#include "tm_p.h" -#include "basic-block.h" -#include "function.h" -#include "gimple-pretty-print.h" -#include "tree-flow.h" +#include "gimple.h" #include "tree-pass.h" +#include "ssa.h" +#include "gimple-pretty-print.h" +#include "fold-const.h" +#include "gimple-fold.h" +#include "tree-eh.h" +#include "gimplify.h" +#include "gimple-iterator.h" +#include "tree-cfg.h" #include "tree-ssa-propagate.h" -#include "value-prof.h" -#include "langhooks.h" -#include "target.h" -#include "diagnostic-core.h" #include "dbgcnt.h" -#include "gimple-fold.h" #include "params.h" -#include "hash-table.h" +#include "builtins.h" +#include "tree-chkp.h" +#include "cfgloop.h" /* Possible lattice values. */ @@ -142,35 +151,37 @@ typedef enum VARYING } ccp_lattice_t; -struct prop_value_d { +struct ccp_prop_value_t { /* Lattice value. */ ccp_lattice_t lattice_val; /* Propagated value. */ tree value; - /* Mask that applies to the propagated value during CCP. For - X with a CONSTANT lattice value X & ~mask == value & ~mask. */ - double_int mask; + /* Mask that applies to the propagated value during CCP. For X + with a CONSTANT lattice value X & ~mask == value & ~mask. The + zero bits in the mask cover constant values. The ones mean no + information. */ + widest_int mask; }; -typedef struct prop_value_d prop_value_t; - /* Array of propagated constant values. After propagation, CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If the constant is held in an SSA name representing a memory store (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual memory reference used to store (i.e., the LHS of the assignment doing the store). */ -static prop_value_t *const_val; +static ccp_prop_value_t *const_val; +static unsigned n_const_val; -static void canonicalize_float_value (prop_value_t *); +static void canonicalize_value (ccp_prop_value_t *); static bool ccp_fold_stmt (gimple_stmt_iterator *); +static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *); /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */ static void -dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val) +dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val) { switch (val.lattice_val) { @@ -185,18 +196,20 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val) break; case CONSTANT: if (TREE_CODE (val.value) != INTEGER_CST - || val.mask.is_zero ()) + || val.mask == 0) { fprintf (outf, "%sCONSTANT ", prefix); print_generic_expr (outf, val.value, dump_flags); } else { - double_int cval = tree_to_double_int (val.value).and_not (val.mask); - fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX, - prefix, cval.high, cval.low); - fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")", - val.mask.high, val.mask.low); + widest_int cval = wi::bit_and_not (wi::to_widest (val.value), + val.mask); + fprintf (outf, "%sCONSTANT ", prefix); + print_hex (cval, outf); + fprintf (outf, " ("); + print_hex (val.mask, outf); + fprintf (outf, ")"); } break; default: @@ -207,15 +220,23 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val) /* Print lattice value VAL to stderr. */ -void debug_lattice_value (prop_value_t val); +void debug_lattice_value (ccp_prop_value_t val); DEBUG_FUNCTION void -debug_lattice_value (prop_value_t val) +debug_lattice_value (ccp_prop_value_t val) { dump_lattice_value (stderr, "", val); fprintf (stderr, "\n"); } +/* Extend NONZERO_BITS to a full mask, with the upper bits being set. */ + +static widest_int +extend_mask (const wide_int &nonzero_bits) +{ + return (wi::mask (wi::get_precision (nonzero_bits), true) + | widest_int::from (nonzero_bits, UNSIGNED)); +} /* Compute a default value for variable VAR and store it in the CONST_VAL array. The following rules are used to get default @@ -235,11 +256,11 @@ debug_lattice_value (prop_value_t val) 4- Initial values of variables that are not GIMPLE registers are considered VARYING. */ -static prop_value_t +static ccp_prop_value_t get_default_value (tree var) { - prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } }; - gimple stmt; + ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 }; + gimple *stmt; stmt = SSA_NAME_DEF_STMT (var); @@ -255,15 +276,20 @@ get_default_value (tree var) else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; + if (flag_tree_bit_ccp) + { + wide_int nonzero_bits = get_nonzero_bits (var); + if (nonzero_bits != -1) + { + val.lattice_val = CONSTANT; + val.value = build_zero_cst (TREE_TYPE (var)); + val.mask = extend_mask (nonzero_bits); + } + } } } - else if (is_gimple_assign (stmt) - /* Value-returning GIMPLE_CALL statements assign to - a variable, and are treated similarly to GIMPLE_ASSIGN. */ - || (is_gimple_call (stmt) - && gimple_call_lhs (stmt) != NULL_TREE) - || gimple_code (stmt) == GIMPLE_PHI) + else if (is_gimple_assign (stmt)) { tree cst; if (gimple_assign_single_p (stmt) @@ -274,15 +300,25 @@ get_default_value (tree var) val.value = cst; } else - /* Any other variable defined by an assignment or a PHI node - is considered UNDEFINED. */ - val.lattice_val = UNDEFINED; + { + /* Any other variable defined by an assignment is considered + UNDEFINED. */ + val.lattice_val = UNDEFINED; + } + } + else if ((is_gimple_call (stmt) + && gimple_call_lhs (stmt) != NULL_TREE) + || gimple_code (stmt) == GIMPLE_PHI) + { + /* A variable defined by a call or a PHI node is considered + UNDEFINED. */ + val.lattice_val = UNDEFINED; } else { /* Otherwise, VAR will never take on a constant value. */ val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; } return val; @@ -291,19 +327,20 @@ get_default_value (tree var) /* Get the constant value associated with variable VAR. */ -static inline prop_value_t * +static inline ccp_prop_value_t * get_value (tree var) { - prop_value_t *val; + ccp_prop_value_t *val; - if (const_val == NULL) + if (const_val == NULL + || SSA_NAME_VERSION (var) >= n_const_val) return NULL; val = &const_val[SSA_NAME_VERSION (var)]; if (val->lattice_val == UNINITIALIZED) *val = get_default_value (var); - canonicalize_float_value (val); + canonicalize_value (val); return val; } @@ -313,7 +350,7 @@ get_value (tree var) static inline tree get_constant_value (tree var) { - prop_value_t *val; + ccp_prop_value_t *val; if (TREE_CODE (var) != SSA_NAME) { if (is_gimple_min_invariant (var)) @@ -324,7 +361,7 @@ get_constant_value (tree var) if (val && val->lattice_val == CONSTANT && (TREE_CODE (val->value) != INTEGER_CST - || val->mask.is_zero ())) + || val->mask == 0)) return val->value; return NULL_TREE; } @@ -334,64 +371,29 @@ get_constant_value (tree var) static inline void set_value_varying (tree var) { - prop_value_t *val = &const_val[SSA_NAME_VERSION (var)]; + ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)]; val->lattice_val = VARYING; val->value = NULL_TREE; - val->mask = double_int_minus_one; + val->mask = -1; } -/* For float types, modify the value of VAL to make ccp work correctly - for non-standard values (-0, NaN): - - If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0. - If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED. - This is to fix the following problem (see PR 29921): Suppose we have - - x = 0.0 * y - - and we set value of y to NaN. This causes value of x to be set to NaN. - When we later determine that y is in fact VARYING, fold uses the fact - that HONOR_NANS is false, and we try to change the value of x to 0, - causing an ICE. With HONOR_NANS being false, the real appearance of - NaN would cause undefined behavior, though, so claiming that y (and x) - are UNDEFINED initially is correct. */ +/* For integer constants, make sure to drop TREE_OVERFLOW. */ static void -canonicalize_float_value (prop_value_t *val) +canonicalize_value (ccp_prop_value_t *val) { - enum machine_mode mode; - tree type; - REAL_VALUE_TYPE d; - - if (val->lattice_val != CONSTANT - || TREE_CODE (val->value) != REAL_CST) + if (val->lattice_val != CONSTANT) return; - d = TREE_REAL_CST (val->value); - type = TREE_TYPE (val->value); - mode = TYPE_MODE (type); - - if (!HONOR_SIGNED_ZEROS (mode) - && REAL_VALUE_MINUS_ZERO (d)) - { - val->value = build_real (type, dconst0); - return; - } - - if (!HONOR_NANS (mode) - && REAL_VALUE_ISNAN (d)) - { - val->lattice_val = UNDEFINED; - val->value = NULL; - return; - } + if (TREE_OVERFLOW_P (val->value)) + val->value = drop_tree_overflow (val->value); } /* Return whether the lattice transition is valid. */ static bool -valid_lattice_transition (prop_value_t old_val, prop_value_t new_val) +valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val) { /* Lattice transitions must always be monotonically increasing in value. */ @@ -406,6 +408,17 @@ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val) /* Now both lattice values are CONSTANT. */ + /* Allow arbitrary copy changes as we might look through PHI + when only a single copy edge is executable. */ + if (TREE_CODE (old_val.value) == SSA_NAME + && TREE_CODE (new_val.value) == SSA_NAME) + return true; + + /* Allow transitioning from a constant to a copy. */ + if (is_gimple_min_invariant (old_val.value) + && TREE_CODE (new_val.value) == SSA_NAME) + return true; + /* Allow transitioning from PHI <&x, not executable> == &x to PHI <&x, &y> == common alignment. */ if (TREE_CODE (old_val.value) != INTEGER_CST @@ -415,93 +428,137 @@ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val) /* Bit-lattices have to agree in the still valid bits. */ if (TREE_CODE (old_val.value) == INTEGER_CST && TREE_CODE (new_val.value) == INTEGER_CST) - return tree_to_double_int (old_val.value).and_not (new_val.mask) - == tree_to_double_int (new_val.value).and_not (new_val.mask); + return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask) + == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask)); /* Otherwise constant values have to agree. */ - return operand_equal_p (old_val.value, new_val.value, 0); + if (operand_equal_p (old_val.value, new_val.value, 0)) + return true; + + /* At least the kinds and types should agree now. */ + if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value) + || !types_compatible_p (TREE_TYPE (old_val.value), + TREE_TYPE (new_val.value))) + return false; + + /* For floats and !HONOR_NANS allow transitions from (partial) NaN + to non-NaN. */ + tree type = TREE_TYPE (new_val.value); + if (SCALAR_FLOAT_TYPE_P (type) + && !HONOR_NANS (type)) + { + if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value))) + return true; + } + else if (VECTOR_FLOAT_TYPE_P (type) + && !HONOR_NANS (type)) + { + for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i) + if (!REAL_VALUE_ISNAN + (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i))) + && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i), + VECTOR_CST_ELT (new_val.value, i), 0)) + return false; + return true; + } + else if (COMPLEX_FLOAT_TYPE_P (type) + && !HONOR_NANS (type)) + { + if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value))) + && !operand_equal_p (TREE_REALPART (old_val.value), + TREE_REALPART (new_val.value), 0)) + return false; + if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value))) + && !operand_equal_p (TREE_IMAGPART (old_val.value), + TREE_IMAGPART (new_val.value), 0)) + return false; + return true; + } + return false; } /* Set the value for variable VAR to NEW_VAL. Return true if the new value is different from VAR's previous value. */ static bool -set_lattice_value (tree var, prop_value_t new_val) +set_lattice_value (tree var, ccp_prop_value_t *new_val) { /* We can deal with old UNINITIALIZED values just fine here. */ - prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)]; + ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)]; - canonicalize_float_value (&new_val); + canonicalize_value (new_val); /* We have to be careful to not go up the bitwise lattice - represented by the mask. - ??? This doesn't seem to be the best place to enforce this. */ - if (new_val.lattice_val == CONSTANT + represented by the mask. Instead of dropping to VARYING + use the meet operator to retain a conservative value. + Missed optimizations like PR65851 makes this necessary. + It also ensures we converge to a stable lattice solution. */ + if (new_val->lattice_val == CONSTANT && old_val->lattice_val == CONSTANT - && TREE_CODE (new_val.value) == INTEGER_CST - && TREE_CODE (old_val->value) == INTEGER_CST) - { - double_int diff; - diff = tree_to_double_int (new_val.value) - ^ tree_to_double_int (old_val->value); - new_val.mask = new_val.mask | old_val->mask | diff; - } + && TREE_CODE (new_val->value) != SSA_NAME) + ccp_lattice_meet (new_val, old_val); - gcc_assert (valid_lattice_transition (*old_val, new_val)); + gcc_checking_assert (valid_lattice_transition (*old_val, *new_val)); /* If *OLD_VAL and NEW_VAL are the same, return false to inform the caller that this was a non-transition. */ - if (old_val->lattice_val != new_val.lattice_val - || (new_val.lattice_val == CONSTANT - && TREE_CODE (new_val.value) == INTEGER_CST - && (TREE_CODE (old_val->value) != INTEGER_CST - || new_val.mask != old_val->mask))) + if (old_val->lattice_val != new_val->lattice_val + || (new_val->lattice_val == CONSTANT + && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value) + || (TREE_CODE (new_val->value) == INTEGER_CST + && (new_val->mask != old_val->mask + || (wi::bit_and_not (wi::to_widest (old_val->value), + new_val->mask) + != wi::bit_and_not (wi::to_widest (new_val->value), + new_val->mask)))) + || (TREE_CODE (new_val->value) != INTEGER_CST + && !operand_equal_p (new_val->value, old_val->value, 0))))) { /* ??? We would like to delay creation of INTEGER_CSTs from partially constants here. */ if (dump_file && (dump_flags & TDF_DETAILS)) { - dump_lattice_value (dump_file, "Lattice value changed to ", new_val); + dump_lattice_value (dump_file, "Lattice value changed to ", *new_val); fprintf (dump_file, ". Adding SSA edges to worklist.\n"); } - *old_val = new_val; + *old_val = *new_val; - gcc_assert (new_val.lattice_val != UNINITIALIZED); + gcc_assert (new_val->lattice_val != UNINITIALIZED); return true; } return false; } -static prop_value_t get_value_for_expr (tree, bool); -static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree); -static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *, - tree, double_int, double_int, - tree, double_int, double_int); +static ccp_prop_value_t get_value_for_expr (tree, bool); +static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree); +static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *, + tree, const widest_int &, const widest_int &, + tree, const widest_int &, const widest_int &); -/* Return a double_int that can be used for bitwise simplifications +/* Return a widest_int that can be used for bitwise simplifications from VAL. */ -static double_int -value_to_double_int (prop_value_t val) +static widest_int +value_to_wide_int (ccp_prop_value_t val) { if (val.value && TREE_CODE (val.value) == INTEGER_CST) - return tree_to_double_int (val.value); - else - return double_int_zero; + return wi::to_widest (val.value); + + return 0; } /* Return the value for the address expression EXPR based on alignment information. */ -static prop_value_t +static ccp_prop_value_t get_value_from_alignment (tree expr) { tree type = TREE_TYPE (expr); - prop_value_t val; + ccp_prop_value_t val; unsigned HOST_WIDE_INT bitpos; unsigned int align; @@ -509,14 +566,12 @@ get_value_from_alignment (tree expr) get_pointer_alignment_1 (expr, &align, &bitpos); val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type) - ? double_int::mask (TYPE_PRECISION (type)) - : double_int_minus_one) - .and_not (double_int::from_uhwi (align / BITS_PER_UNIT - 1)); - val.lattice_val = val.mask.is_minus_one () ? VARYING : CONSTANT; + ? wi::mask (TYPE_PRECISION (type), false) + : -1).and_not (align / BITS_PER_UNIT - 1); + val.lattice_val + = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT; if (val.lattice_val == CONSTANT) - val.value - = double_int_to_tree (type, - double_int::from_uhwi (bitpos / BITS_PER_UNIT)); + val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT); else val.value = NULL_TREE; @@ -527,10 +582,10 @@ get_value_from_alignment (tree expr) return constant bits extracted from alignment information for invariant addresses. */ -static prop_value_t +static ccp_prop_value_t get_value_for_expr (tree expr, bool for_bits_p) { - prop_value_t val; + ccp_prop_value_t val; if (TREE_CODE (expr) == SSA_NAME) { @@ -539,23 +594,37 @@ get_value_for_expr (tree expr, bool for_bits_p) && val.lattice_val == CONSTANT && TREE_CODE (val.value) == ADDR_EXPR) val = get_value_from_alignment (val.value); + /* Fall back to a copy value. */ + if (!for_bits_p + && val.lattice_val == VARYING + && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr)) + { + val.lattice_val = CONSTANT; + val.value = expr; + val.mask = -1; + } } else if (is_gimple_min_invariant (expr) && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR)) { val.lattice_val = CONSTANT; val.value = expr; - val.mask = double_int_zero; - canonicalize_float_value (&val); + val.mask = 0; + canonicalize_value (&val); } else if (TREE_CODE (expr) == ADDR_EXPR) val = get_value_from_alignment (expr); else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; val.value = NULL_TREE; } + + if (val.lattice_val == VARYING + && TYPE_UNSIGNED (TREE_TYPE (expr))) + val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr))); + return val; } @@ -571,9 +640,10 @@ get_value_for_expr (tree expr, bool for_bits_p) Else return VARYING. */ static ccp_lattice_t -likely_value (gimple stmt) +likely_value (gimple *stmt) { bool has_constant_operand, has_undefined_operand, all_undefined_operands; + bool has_nsa_operand; tree use; ssa_op_iter iter; unsigned i; @@ -596,9 +666,10 @@ likely_value (gimple stmt) has_constant_operand = false; has_undefined_operand = false; all_undefined_operands = true; + has_nsa_operand = false; FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) { - prop_value_t *val = get_value (use); + ccp_prop_value_t *val = get_value (use); if (val->lattice_val == UNDEFINED) has_undefined_operand = true; @@ -607,6 +678,10 @@ likely_value (gimple stmt) if (val->lattice_val == CONSTANT) has_constant_operand = true; + + if (SSA_NAME_IS_DEFAULT_DEF (use) + || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use))) + has_nsa_operand = true; } /* There may be constants in regular rhs operands. For calls we @@ -625,6 +700,22 @@ likely_value (gimple stmt) if (has_constant_operand) all_undefined_operands = false; + if (has_undefined_operand + && code == GIMPLE_CALL + && gimple_call_internal_p (stmt)) + switch (gimple_call_internal_fn (stmt)) + { + /* These 3 builtins use the first argument just as a magic + way how to find out a decl uid. */ + case IFN_GOMP_SIMD_LANE: + case IFN_GOMP_SIMD_VF: + case IFN_GOMP_SIMD_LAST_LANE: + has_undefined_operand = false; + break; + default: + break; + } + /* If the operation combines operands like COMPLEX_EXPR make sure to not mark the result UNDEFINED if only one part of the result is undefined. */ @@ -663,8 +754,10 @@ likely_value (gimple stmt) /* We do not consider virtual operands here -- load from read-only memory may have only VARYING virtual operands, but still be - constant. */ + constant. Also we can combine the stmt with definitions from + operands whose definitions are not simulated again. */ if (has_constant_operand + || has_nsa_operand || gimple_references_memory_p (stmt)) return CONSTANT; @@ -674,7 +767,7 @@ likely_value (gimple stmt) /* Returns true if STMT cannot be constant. */ static bool -surely_varying_stmt_p (gimple stmt) +surely_varying_stmt_p (gimple *stmt) { /* If the statement has operands that we cannot handle, it cannot be constant. */ @@ -682,13 +775,18 @@ surely_varying_stmt_p (gimple stmt) return true; /* If it is a call and does not return a value or is not a - builtin and not an indirect call, it is varying. */ + builtin and not an indirect call or a call to function with + assume_aligned/alloc_align attribute, it is varying. */ if (is_gimple_call (stmt)) { - tree fndecl; + tree fndecl, fntype = gimple_call_fntype (stmt); if (!gimple_call_lhs (stmt) || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE - && !DECL_BUILT_IN (fndecl))) + && !DECL_BUILT_IN (fndecl) + && !lookup_attribute ("assume_aligned", + TYPE_ATTRIBUTES (fntype)) + && !lookup_attribute ("alloc_align", + TYPE_ATTRIBUTES (fntype)))) return true; } @@ -714,16 +812,17 @@ ccp_initialize (void) { basic_block bb; - const_val = XCNEWVEC (prop_value_t, num_ssa_names); + n_const_val = num_ssa_names; + const_val = XCNEWVEC (ccp_prop_value_t, n_const_val); /* Initialize simulation flags for PHI nodes and statements. */ - FOR_EACH_BB (bb) + FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator i; for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) { - gimple stmt = gsi_stmt (i); + gimple *stmt = gsi_stmt (i); bool is_varying; /* If the statement is a control insn, then we do not @@ -751,13 +850,13 @@ ccp_initialize (void) /* Now process PHI nodes. We never clear the simulate_again flag on phi nodes, since we do not know which edges are executable yet, except for phi nodes for virtual operands when we do not do store ccp. */ - FOR_EACH_BB (bb) + FOR_EACH_BB_FN (bb, cfun) { - gimple_stmt_iterator i; + gphi_iterator i; for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i)) { - gimple phi = gsi_stmt (i); + gphi *phi = i.phi (); if (virtual_operand_p (gimple_phi_result (phi))) prop_set_simulate_again (phi, false); @@ -780,7 +879,7 @@ do_dbg_cnt (void) if (!dbg_cnt (ccp)) { const_val[i].lattice_val = VARYING; - const_val[i].mask = double_int_minus_one; + const_val[i].mask = -1; const_val[i].value = NULL_TREE; } } @@ -788,12 +887,12 @@ do_dbg_cnt (void) /* Do final substitution of propagated values, cleanup the flowgraph and - free allocated storage. + free allocated storage. If NONZERO_P, record nonzero bits. Return TRUE when something was optimized. */ static bool -ccp_finalize (void) +ccp_finalize (bool nonzero_p) { bool something_changed; unsigned i; @@ -801,15 +900,20 @@ ccp_finalize (void) do_dbg_cnt (); /* Derive alignment and misalignment information from partially - constant pointers in the lattice. */ + constant pointers in the lattice or nonzero bits from partially + constant integers. */ for (i = 1; i < num_ssa_names; ++i) { tree name = ssa_name (i); - prop_value_t *val; + ccp_prop_value_t *val; unsigned int tem, align; if (!name - || !POINTER_TYPE_P (TREE_TYPE (name))) + || (!POINTER_TYPE_P (TREE_TYPE (name)) + && (!INTEGRAL_TYPE_P (TREE_TYPE (name)) + /* Don't record nonzero bits before IPA to avoid + using too much memory. */ + || !nonzero_p))) continue; val = get_value (name); @@ -817,13 +921,25 @@ ccp_finalize (void) || TREE_CODE (val->value) != INTEGER_CST) continue; - /* Trailing constant bits specify the alignment, trailing value - bits the misalignment. */ - tem = val->mask.low; - align = (tem & -tem); - if (align > 1) - set_ptr_info_alignment (get_ptr_info (name), align, - TREE_INT_CST_LOW (val->value) & (align - 1)); + if (POINTER_TYPE_P (TREE_TYPE (name))) + { + /* Trailing mask bits specify the alignment, trailing value + bits the misalignment. */ + tem = val->mask.to_uhwi (); + align = (tem & -tem); + if (align > 1) + set_ptr_info_alignment (get_ptr_info (name), align, + (TREE_INT_CST_LOW (val->value) + & (align - 1))); + } + else + { + unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value)); + wide_int nonzero_bits = wide_int::from (val->mask, precision, + UNSIGNED) | val->value; + nonzero_bits &= get_nonzero_bits (name); + set_nonzero_bits (name, nonzero_bits); + } } /* Perform substitutions based on the known constant values. */ @@ -846,14 +962,22 @@ ccp_finalize (void) */ static void -ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) +ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2) { - if (val1->lattice_val == UNDEFINED) + if (val1->lattice_val == UNDEFINED + /* For UNDEFINED M SSA we can't always SSA because its definition + may not dominate the PHI node. Doing optimistic copy propagation + also causes a lot of gcc.dg/uninit-pred*.c FAILs. */ + && (val2->lattice_val != CONSTANT + || TREE_CODE (val2->value) != SSA_NAME)) { /* UNDEFINED M any = any */ *val1 = *val2; } - else if (val2->lattice_val == UNDEFINED) + else if (val2->lattice_val == UNDEFINED + /* See above. */ + && (val1->lattice_val != CONSTANT + || TREE_CODE (val1->value) != SSA_NAME)) { /* any M UNDEFINED = any Nothing to do. VAL1 already contains the value we want. */ @@ -864,7 +988,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) { /* any M VARYING = VARYING. */ val1->lattice_val = VARYING; - val1->mask = double_int_minus_one; + val1->mask = -1; val1->value = NULL_TREE; } else if (val1->lattice_val == CONSTANT @@ -877,10 +1001,10 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) For INTEGER_CSTs mask unequal bits. If no equal bits remain, drop to varying. */ - val1->mask = val1->mask | val2->mask - | (tree_to_double_int (val1->value) - ^ tree_to_double_int (val2->value)); - if (val1->mask.is_minus_one ()) + val1->mask = (val1->mask | val2->mask + | (wi::to_widest (val1->value) + ^ wi::to_widest (val2->value))); + if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1) { val1->lattice_val = VARYING; val1->value = NULL_TREE; @@ -888,7 +1012,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) } else if (val1->lattice_val == CONSTANT && val2->lattice_val == CONSTANT - && simple_cst_equal (val1->value, val2->value) == 1) + && operand_equal_p (val1->value, val2->value, 0)) { /* Ci M Cj = Ci if (i == j) Ci M Cj = VARYING if (i != j) @@ -902,7 +1026,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) { /* When not equal addresses are involved try meeting for alignment. */ - prop_value_t tem = *val2; + ccp_prop_value_t tem = *val2; if (TREE_CODE (val1->value) == ADDR_EXPR) *val1 = get_value_for_expr (val1->value, true); if (TREE_CODE (val2->value) == ADDR_EXPR) @@ -913,7 +1037,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) { /* Any other combination is VARYING. */ val1->lattice_val = VARYING; - val1->mask = double_int_minus_one; + val1->mask = -1; val1->value = NULL_TREE; } } @@ -925,10 +1049,10 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) of the PHI node that are incoming via executable edges. */ static enum ssa_prop_result -ccp_visit_phi_node (gimple phi) +ccp_visit_phi_node (gphi *phi) { unsigned i; - prop_value_t *old_val, new_val; + ccp_prop_value_t new_val; if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -936,25 +1060,12 @@ ccp_visit_phi_node (gimple phi) print_gimple_stmt (dump_file, phi, 0, dump_flags); } - old_val = get_value (gimple_phi_result (phi)); - switch (old_val->lattice_val) - { - case VARYING: - return SSA_PROP_VARYING; - - case CONSTANT: - new_val = *old_val; - break; - - case UNDEFINED: - new_val.lattice_val = UNDEFINED; - new_val.value = NULL_TREE; - break; - - default: - gcc_unreachable (); - } + new_val.lattice_val = UNDEFINED; + new_val.value = NULL_TREE; + new_val.mask = 0; + bool first = true; + bool non_exec_edge = false; for (i = 0; i < gimple_phi_num_args (phi); i++) { /* Compute the meet operator over all the PHI arguments flowing @@ -974,9 +1085,15 @@ ccp_visit_phi_node (gimple phi) if (e->flags & EDGE_EXECUTABLE) { tree arg = gimple_phi_arg (phi, i)->def; - prop_value_t arg_val = get_value_for_expr (arg, false); + ccp_prop_value_t arg_val = get_value_for_expr (arg, false); - ccp_lattice_meet (&new_val, &arg_val); + if (first) + { + new_val = arg_val; + first = false; + } + else + ccp_lattice_meet (&new_val, &arg_val); if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -989,6 +1106,22 @@ ccp_visit_phi_node (gimple phi) if (new_val.lattice_val == VARYING) break; } + else + non_exec_edge = true; + } + + /* In case there were non-executable edges and the value is a copy + make sure its definition dominates the PHI node. */ + if (non_exec_edge + && new_val.lattice_val == CONSTANT + && TREE_CODE (new_val.value) == SSA_NAME + && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value) + && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), + gimple_bb (SSA_NAME_DEF_STMT (new_val.value)))) + { + new_val.lattice_val = VARYING; + new_val.value = NULL_TREE; + new_val.mask = -1; } if (dump_file && (dump_flags & TDF_DETAILS)) @@ -998,7 +1131,7 @@ ccp_visit_phi_node (gimple phi) } /* Make the transition to the new value. */ - if (set_lattice_value (gimple_phi_result (phi), new_val)) + if (set_lattice_value (gimple_phi_result (phi), &new_val)) { if (new_val.lattice_val == VARYING) return SSA_PROP_VARYING; @@ -1023,6 +1156,28 @@ valueize_op (tree op) return op; } +/* Return the constant value for OP, but signal to not follow SSA + edges if the definition may be simulated again. */ + +static tree +valueize_op_1 (tree op) +{ + if (TREE_CODE (op) == SSA_NAME) + { + /* If the definition may be simulated again we cannot follow + this SSA edge as the SSA propagator does not necessarily + re-visit the use. */ + gimple *def_stmt = SSA_NAME_DEF_STMT (op); + if (!gimple_nop_p (def_stmt) + && prop_simulate_again_p (def_stmt)) + return NULL_TREE; + tree tem = get_constant_value (op); + if (tem) + return tem; + } + return op; +} + /* CCP specific front-end to the non-destructive constant folding routines. @@ -1033,7 +1188,7 @@ valueize_op (tree op) otherwise return the original RHS or NULL_TREE. */ static tree -ccp_fold (gimple stmt) +ccp_fold (gimple *stmt) { location_t loc = gimple_location (stmt); switch (gimple_code (stmt)) @@ -1050,12 +1205,13 @@ ccp_fold (gimple stmt) case GIMPLE_SWITCH: { /* Return the constant switch index. */ - return valueize_op (gimple_switch_index (stmt)); + return valueize_op (gimple_switch_index (as_a (stmt))); } case GIMPLE_ASSIGN: case GIMPLE_CALL: - return gimple_fold_stmt_to_constant_1 (stmt, valueize_op); + return gimple_fold_stmt_to_constant_1 (stmt, + valueize_op, valueize_op_1); default: gcc_unreachable (); @@ -1068,8 +1224,8 @@ ccp_fold (gimple stmt) static void bit_value_unop_1 (enum tree_code code, tree type, - double_int *val, double_int *mask, - tree rtype, double_int rval, double_int rmask) + widest_int *val, widest_int *mask, + tree rtype, const widest_int &rval, const widest_int &rmask) { switch (code) { @@ -1080,33 +1236,32 @@ bit_value_unop_1 (enum tree_code code, tree type, case NEGATE_EXPR: { - double_int temv, temm; + widest_int temv, temm; /* Return ~rval + 1. */ bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask); bit_value_binop_1 (PLUS_EXPR, type, val, mask, - type, temv, temm, - type, double_int_one, double_int_zero); + type, temv, temm, type, 1, 0); break; } CASE_CONVERT: { - bool uns; + signop sgn; /* First extend mask and value according to the original type. */ - uns = TYPE_UNSIGNED (rtype); - *mask = rmask.ext (TYPE_PRECISION (rtype), uns); - *val = rval.ext (TYPE_PRECISION (rtype), uns); + sgn = TYPE_SIGN (rtype); + *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn); + *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn); /* Then extend mask and value according to the target type. */ - uns = TYPE_UNSIGNED (type); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); - *val = (*val).ext (TYPE_PRECISION (type), uns); + sgn = TYPE_SIGN (type); + *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn); + *val = wi::ext (*val, TYPE_PRECISION (type), sgn); break; } default: - *mask = double_int_minus_one; + *mask = -1; break; } } @@ -1117,14 +1272,19 @@ bit_value_unop_1 (enum tree_code code, tree type, static void bit_value_binop_1 (enum tree_code code, tree type, - double_int *val, double_int *mask, - tree r1type, double_int r1val, double_int r1mask, - tree r2type, double_int r2val, double_int r2mask) + widest_int *val, widest_int *mask, + tree r1type, const widest_int &r1val, + const widest_int &r1mask, tree r2type, + const widest_int &r2val, const widest_int &r2mask) { - bool uns = TYPE_UNSIGNED (type); - /* Assume we'll get a constant result. Use an initial varying value, - we fall back to varying in the end if necessary. */ - *mask = double_int_minus_one; + signop sgn = TYPE_SIGN (type); + int width = TYPE_PRECISION (type); + bool swap_p = false; + + /* Assume we'll get a constant result. Use an initial non varying + value, we fall back to varying in the end if necessary. */ + *mask = -1; + switch (code) { case BIT_AND_EXPR: @@ -1150,13 +1310,35 @@ bit_value_binop_1 (enum tree_code code, tree type, case LROTATE_EXPR: case RROTATE_EXPR: - if (r2mask.is_zero ()) + if (r2mask == 0) { - HOST_WIDE_INT shift = r2val.low; - if (code == RROTATE_EXPR) - shift = -shift; - *mask = r1mask.lrotate (shift, TYPE_PRECISION (type)); - *val = r1val.lrotate (shift, TYPE_PRECISION (type)); + widest_int shift = r2val; + if (shift == 0) + { + *mask = r1mask; + *val = r1val; + } + else + { + if (wi::neg_p (shift)) + { + shift = -shift; + if (code == RROTATE_EXPR) + code = LROTATE_EXPR; + else + code = RROTATE_EXPR; + } + if (code == RROTATE_EXPR) + { + *mask = wi::rrotate (r1mask, shift, width); + *val = wi::rrotate (r1val, shift, width); + } + else + { + *mask = wi::lrotate (r1mask, shift, width); + *val = wi::lrotate (r1val, shift, width); + } + } } break; @@ -1165,31 +1347,34 @@ bit_value_binop_1 (enum tree_code code, tree type, /* ??? We can handle partially known shift counts if we know its sign. That way we can tell that (x << (y | 8)) & 255 is zero. */ - if (r2mask.is_zero ()) + if (r2mask == 0) { - HOST_WIDE_INT shift = r2val.low; - if (code == RSHIFT_EXPR) - shift = -shift; - /* We need to know if we are doing a left or a right shift - to properly shift in zeros for left shift and unsigned - right shifts and the sign bit for signed right shifts. - For signed right shifts we shift in varying in case - the sign bit was varying. */ - if (shift > 0) - { - *mask = r1mask.llshift (shift, TYPE_PRECISION (type)); - *val = r1val.llshift (shift, TYPE_PRECISION (type)); - } - else if (shift < 0) + widest_int shift = r2val; + if (shift == 0) { - shift = -shift; - *mask = r1mask.rshift (shift, TYPE_PRECISION (type), !uns); - *val = r1val.rshift (shift, TYPE_PRECISION (type), !uns); + *mask = r1mask; + *val = r1val; } else { - *mask = r1mask; - *val = r1val; + if (wi::neg_p (shift)) + { + shift = -shift; + if (code == RSHIFT_EXPR) + code = LSHIFT_EXPR; + else + code = RSHIFT_EXPR; + } + if (code == RSHIFT_EXPR) + { + *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn); + *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn); + } + else + { + *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn); + *val = wi::ext (wi::lshift (r1val, shift), width, sgn); + } } } break; @@ -1197,21 +1382,20 @@ bit_value_binop_1 (enum tree_code code, tree type, case PLUS_EXPR: case POINTER_PLUS_EXPR: { - double_int lo, hi; /* Do the addition with unknown bits set to zero, to give carry-ins of zero wherever possible. */ - lo = r1val.and_not (r1mask) + r2val.and_not (r2mask); - lo = lo.ext (TYPE_PRECISION (type), uns); + widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask); + lo = wi::ext (lo, width, sgn); /* Do the addition with unknown bits set to one, to give carry-ins of one wherever possible. */ - hi = (r1val | r1mask) + (r2val | r2mask); - hi = hi.ext (TYPE_PRECISION (type), uns); + widest_int hi = (r1val | r1mask) + (r2val | r2mask); + hi = wi::ext (hi, width, sgn); /* Each bit in the result is known if (a) the corresponding bits in both inputs are known, and (b) the carry-in to that bit position is known. We can check condition (b) by seeing if we got the same result with minimised carries as with maximised carries. */ *mask = r1mask | r2mask | (lo ^ hi); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); + *mask = wi::ext (*mask, width, sgn); /* It shouldn't matter whether we choose lo or hi here. */ *val = lo; break; @@ -1219,7 +1403,7 @@ bit_value_binop_1 (enum tree_code code, tree type, case MINUS_EXPR: { - double_int temv, temm; + widest_int temv, temm; bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm, r2type, r2val, r2mask); bit_value_binop_1 (PLUS_EXPR, type, val, mask, @@ -1232,18 +1416,18 @@ bit_value_binop_1 (enum tree_code code, tree type, { /* Just track trailing zeros in both operands and transfer them to the other. */ - int r1tz = (r1val | r1mask).trailing_zeros (); - int r2tz = (r2val | r2mask).trailing_zeros (); - if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT) + int r1tz = wi::ctz (r1val | r1mask); + int r2tz = wi::ctz (r2val | r2mask); + if (r1tz + r2tz >= width) { - *mask = double_int_zero; - *val = double_int_zero; + *mask = 0; + *val = 0; } else if (r1tz + r2tz > 0) { - *mask = ~double_int::mask (r1tz + r2tz); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); - *val = double_int_zero; + *mask = wi::ext (wi::mask (r1tz + r2tz, true), + width, sgn); + *val = 0; } break; } @@ -1251,71 +1435,70 @@ bit_value_binop_1 (enum tree_code code, tree type, case EQ_EXPR: case NE_EXPR: { - double_int m = r1mask | r2mask; + widest_int m = r1mask | r2mask; if (r1val.and_not (m) != r2val.and_not (m)) { - *mask = double_int_zero; - *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one); + *mask = 0; + *val = ((code == EQ_EXPR) ? 0 : 1); } else { /* We know the result of a comparison is always one or zero. */ - *mask = double_int_one; - *val = double_int_zero; + *mask = 1; + *val = 0; } break; } case GE_EXPR: case GT_EXPR: - { - double_int tem = r1val; - r1val = r2val; - r2val = tem; - tem = r1mask; - r1mask = r2mask; - r2mask = tem; - code = swap_tree_comparison (code); - } - /* Fallthru. */ + swap_p = true; + code = swap_tree_comparison (code); + /* Fall through. */ case LT_EXPR: case LE_EXPR: { int minmax, maxmin; + + const widest_int &o1val = swap_p ? r2val : r1val; + const widest_int &o1mask = swap_p ? r2mask : r1mask; + const widest_int &o2val = swap_p ? r1val : r2val; + const widest_int &o2mask = swap_p ? r1mask : r2mask; + /* If the most significant bits are not known we know nothing. */ - if (r1mask.is_negative () || r2mask.is_negative ()) + if (wi::neg_p (o1mask) || wi::neg_p (o2mask)) break; /* For comparisons the signedness is in the comparison operands. */ - uns = TYPE_UNSIGNED (r1type); + sgn = TYPE_SIGN (r1type); /* If we know the most significant bits we know the values value ranges by means of treating varying bits as zero or one. Do a cross comparison of the max/min pairs. */ - maxmin = (r1val | r1mask).cmp (r2val.and_not (r2mask), uns); - minmax = r1val.and_not (r1mask).cmp (r2val | r2mask, uns); - if (maxmin < 0) /* r1 is less than r2. */ + maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn); + minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn); + if (maxmin < 0) /* o1 is less than o2. */ { - *mask = double_int_zero; - *val = double_int_one; + *mask = 0; + *val = 1; } - else if (minmax > 0) /* r1 is not less or equal to r2. */ + else if (minmax > 0) /* o1 is not less or equal to o2. */ { - *mask = double_int_zero; - *val = double_int_zero; + *mask = 0; + *val = 0; } - else if (maxmin == minmax) /* r1 and r2 are equal. */ + else if (maxmin == minmax) /* o1 and o2 are equal. */ { /* This probably should never happen as we'd have folded the thing during fully constant value folding. */ - *mask = double_int_zero; - *val = (code == LE_EXPR ? double_int_one : double_int_zero); + *mask = 0; + *val = (code == LE_EXPR ? 1 : 0); } else { /* We know the result of a comparison is always one or zero. */ - *mask = double_int_one; - *val = double_int_zero; + *mask = 1; + *val = 0; } break; } @@ -1327,33 +1510,33 @@ bit_value_binop_1 (enum tree_code code, tree type, /* Return the propagation value when applying the operation CODE to the value RHS yielding type TYPE. */ -static prop_value_t +static ccp_prop_value_t bit_value_unop (enum tree_code code, tree type, tree rhs) { - prop_value_t rval = get_value_for_expr (rhs, true); - double_int value, mask; - prop_value_t val; + ccp_prop_value_t rval = get_value_for_expr (rhs, true); + widest_int value, mask; + ccp_prop_value_t val; if (rval.lattice_val == UNDEFINED) return rval; gcc_assert ((rval.lattice_val == CONSTANT && TREE_CODE (rval.value) == INTEGER_CST) - || rval.mask.is_minus_one ()); + || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1); bit_value_unop_1 (code, type, &value, &mask, - TREE_TYPE (rhs), value_to_double_int (rval), rval.mask); - if (!mask.is_minus_one ()) + TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask); + if (wi::sext (mask, TYPE_PRECISION (type)) != -1) { val.lattice_val = CONSTANT; val.mask = mask; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } @@ -1361,102 +1544,150 @@ bit_value_unop (enum tree_code code, tree type, tree rhs) /* Return the propagation value when applying the operation CODE to the values RHS1 and RHS2 yielding type TYPE. */ -static prop_value_t +static ccp_prop_value_t bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2) { - prop_value_t r1val = get_value_for_expr (rhs1, true); - prop_value_t r2val = get_value_for_expr (rhs2, true); - double_int value, mask; - prop_value_t val; + ccp_prop_value_t r1val = get_value_for_expr (rhs1, true); + ccp_prop_value_t r2val = get_value_for_expr (rhs2, true); + widest_int value, mask; + ccp_prop_value_t val; if (r1val.lattice_val == UNDEFINED || r2val.lattice_val == UNDEFINED) { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; return val; } gcc_assert ((r1val.lattice_val == CONSTANT && TREE_CODE (r1val.value) == INTEGER_CST) - || r1val.mask.is_minus_one ()); + || wi::sext (r1val.mask, + TYPE_PRECISION (TREE_TYPE (rhs1))) == -1); gcc_assert ((r2val.lattice_val == CONSTANT && TREE_CODE (r2val.value) == INTEGER_CST) - || r2val.mask.is_minus_one ()); + || wi::sext (r2val.mask, + TYPE_PRECISION (TREE_TYPE (rhs2))) == -1); bit_value_binop_1 (code, type, &value, &mask, - TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask, - TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask); - if (!mask.is_minus_one ()) + TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask, + TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask); + if (wi::sext (mask, TYPE_PRECISION (type)) != -1) { val.lattice_val = CONSTANT; val.mask = mask; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } -/* Return the propagation value when applying __builtin_assume_aligned to - its arguments. */ +/* Return the propagation value for __builtin_assume_aligned + and functions with assume_aligned or alloc_aligned attribute. + For __builtin_assume_aligned, ATTR is NULL_TREE, + for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED + is false, for alloc_aligned attribute ATTR is non-NULL and + ALLOC_ALIGNED is true. */ -static prop_value_t -bit_value_assume_aligned (gimple stmt) +static ccp_prop_value_t +bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval, + bool alloc_aligned) { - tree ptr = gimple_call_arg (stmt, 0), align, misalign = NULL_TREE; - tree type = TREE_TYPE (ptr); + tree align, misalign = NULL_TREE, type; unsigned HOST_WIDE_INT aligni, misaligni = 0; - prop_value_t ptrval = get_value_for_expr (ptr, true); - prop_value_t alignval; - double_int value, mask; - prop_value_t val; + ccp_prop_value_t alignval; + widest_int value, mask; + ccp_prop_value_t val; + + if (attr == NULL_TREE) + { + tree ptr = gimple_call_arg (stmt, 0); + type = TREE_TYPE (ptr); + ptrval = get_value_for_expr (ptr, true); + } + else + { + tree lhs = gimple_call_lhs (stmt); + type = TREE_TYPE (lhs); + } + if (ptrval.lattice_val == UNDEFINED) return ptrval; gcc_assert ((ptrval.lattice_val == CONSTANT && TREE_CODE (ptrval.value) == INTEGER_CST) - || ptrval.mask.is_minus_one ()); - align = gimple_call_arg (stmt, 1); - if (!host_integerp (align, 1)) - return ptrval; - aligni = tree_low_cst (align, 1); - if (aligni <= 1 - || (aligni & (aligni - 1)) != 0) - return ptrval; - if (gimple_call_num_args (stmt) > 2) + || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1); + if (attr == NULL_TREE) { - misalign = gimple_call_arg (stmt, 2); - if (!host_integerp (misalign, 1)) + /* Get aligni and misaligni from __builtin_assume_aligned. */ + align = gimple_call_arg (stmt, 1); + if (!tree_fits_uhwi_p (align)) return ptrval; - misaligni = tree_low_cst (misalign, 1); - if (misaligni >= aligni) + aligni = tree_to_uhwi (align); + if (gimple_call_num_args (stmt) > 2) + { + misalign = gimple_call_arg (stmt, 2); + if (!tree_fits_uhwi_p (misalign)) + return ptrval; + misaligni = tree_to_uhwi (misalign); + } + } + else + { + /* Get aligni and misaligni from assume_aligned or + alloc_align attributes. */ + if (TREE_VALUE (attr) == NULL_TREE) + return ptrval; + attr = TREE_VALUE (attr); + align = TREE_VALUE (attr); + if (!tree_fits_uhwi_p (align)) return ptrval; + aligni = tree_to_uhwi (align); + if (alloc_aligned) + { + if (aligni == 0 || aligni > gimple_call_num_args (stmt)) + return ptrval; + align = gimple_call_arg (stmt, aligni - 1); + if (!tree_fits_uhwi_p (align)) + return ptrval; + aligni = tree_to_uhwi (align); + } + else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr))) + { + misalign = TREE_VALUE (TREE_CHAIN (attr)); + if (!tree_fits_uhwi_p (misalign)) + return ptrval; + misaligni = tree_to_uhwi (misalign); + } } + if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni) + return ptrval; + align = build_int_cst_type (type, -aligni); alignval = get_value_for_expr (align, true); bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask, - type, value_to_double_int (ptrval), ptrval.mask, - type, value_to_double_int (alignval), alignval.mask); - if (!mask.is_minus_one ()) + type, value_to_wide_int (ptrval), ptrval.mask, + type, value_to_wide_int (alignval), alignval.mask); + if (wi::sext (mask, TYPE_PRECISION (type)) != -1) { val.lattice_val = CONSTANT; val.mask = mask; - gcc_assert ((mask.low & (aligni - 1)) == 0); - gcc_assert ((value.low & (aligni - 1)) == 0); - value.low |= misaligni; + gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0); + gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0); + value |= misaligni; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } @@ -1464,10 +1695,10 @@ bit_value_assume_aligned (gimple stmt) /* Evaluate statement STMT. Valid only for assignments, calls, conditionals, and switches. */ -static prop_value_t -evaluate_stmt (gimple stmt) +static ccp_prop_value_t +evaluate_stmt (gimple *stmt) { - prop_value_t val; + ccp_prop_value_t val; tree simplified = NULL_TREE; ccp_lattice_t likelyvalue = likely_value (stmt); bool is_constant = false; @@ -1501,6 +1732,15 @@ evaluate_stmt (gimple stmt) { fold_defer_overflow_warnings (); simplified = ccp_fold (stmt); + if (simplified && TREE_CODE (simplified) == SSA_NAME) + { + val = *get_value (simplified); + if (val.lattice_val != VARYING) + { + fold_undefer_overflow_warnings (true, stmt, 0); + return val; + } + } is_constant = simplified && is_gimple_min_invariant (simplified); fold_undefer_overflow_warnings (is_constant, stmt, 0); if (is_constant) @@ -1508,7 +1748,8 @@ evaluate_stmt (gimple stmt) /* The statement produced a constant value. */ val.lattice_val = CONSTANT; val.value = simplified; - val.mask = double_int_zero; + val.mask = 0; + return val; } } /* If the statement is likely to have a VARYING result, then do not @@ -1526,7 +1767,7 @@ evaluate_stmt (gimple stmt) simplified = gimple_assign_rhs1 (stmt); } else if (code == GIMPLE_SWITCH) - simplified = gimple_switch_index (stmt); + simplified = gimple_switch_index (as_a (stmt)); else /* These cannot satisfy is_gimple_min_invariant without folding. */ gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND); @@ -1536,53 +1777,55 @@ evaluate_stmt (gimple stmt) /* The statement produced a constant value. */ val.lattice_val = CONSTANT; val.value = simplified; - val.mask = double_int_zero; + val.mask = 0; } } + /* If the statement result is likely UNDEFINED, make it so. */ + else if (likelyvalue == UNDEFINED) + { + val.lattice_val = UNDEFINED; + val.value = NULL_TREE; + val.mask = 0; + return val; + } /* Resort to simplification for bitwise tracking. */ if (flag_tree_bit_ccp - && (likelyvalue == CONSTANT || is_gimple_call (stmt)) + && (likelyvalue == CONSTANT || is_gimple_call (stmt) + || (gimple_assign_single_p (stmt) + && gimple_assign_rhs_code (stmt) == ADDR_EXPR)) && !is_constant) { enum gimple_code code = gimple_code (stmt); - tree fndecl; val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; if (code == GIMPLE_ASSIGN) { enum tree_code subcode = gimple_assign_rhs_code (stmt); tree rhs1 = gimple_assign_rhs1 (stmt); - switch (get_gimple_rhs_class (subcode)) - { - case GIMPLE_SINGLE_RHS: - if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) - || POINTER_TYPE_P (TREE_TYPE (rhs1))) - val = get_value_for_expr (rhs1, true); - break; + tree lhs = gimple_assign_lhs (stmt); + if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) + || POINTER_TYPE_P (TREE_TYPE (lhs))) + && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) + || POINTER_TYPE_P (TREE_TYPE (rhs1)))) + switch (get_gimple_rhs_class (subcode)) + { + case GIMPLE_SINGLE_RHS: + val = get_value_for_expr (rhs1, true); + break; - case GIMPLE_UNARY_RHS: - if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) - || POINTER_TYPE_P (TREE_TYPE (rhs1))) - && (INTEGRAL_TYPE_P (gimple_expr_type (stmt)) - || POINTER_TYPE_P (gimple_expr_type (stmt)))) - val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1); - break; + case GIMPLE_UNARY_RHS: + val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1); + break; - case GIMPLE_BINARY_RHS: - if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) - || POINTER_TYPE_P (TREE_TYPE (rhs1))) - { - tree lhs = gimple_assign_lhs (stmt); - tree rhs2 = gimple_assign_rhs2 (stmt); - val = bit_value_binop (subcode, - TREE_TYPE (lhs), rhs1, rhs2); - } - break; + case GIMPLE_BINARY_RHS: + val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1, + gimple_assign_rhs2 (stmt)); + break; - default:; - } + default:; + } } else if (code == GIMPLE_COND) { @@ -1593,10 +1836,9 @@ evaluate_stmt (gimple stmt) || POINTER_TYPE_P (TREE_TYPE (rhs1))) val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2); } - else if (code == GIMPLE_CALL - && (fndecl = gimple_call_fndecl (stmt)) - && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) + else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) { + tree fndecl = gimple_call_fndecl (stmt); switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_MALLOC: @@ -1606,9 +1848,8 @@ evaluate_stmt (gimple stmt) case BUILT_IN_STRNDUP: val.lattice_val = CONSTANT; val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0); - val.mask = double_int::from_shwi - (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT) - / BITS_PER_UNIT - 1)); + val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT + / BITS_PER_UNIT - 1); break; case BUILT_IN_ALLOCA: @@ -1618,8 +1859,7 @@ evaluate_stmt (gimple stmt) : BIGGEST_ALIGNMENT); val.lattice_val = CONSTANT; val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0); - val.mask = double_int::from_shwi (~(((HOST_WIDE_INT) align) - / BITS_PER_UNIT - 1)); + val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1); break; /* These builtins return their first argument, unmodified. */ @@ -1637,56 +1877,123 @@ evaluate_stmt (gimple stmt) break; case BUILT_IN_ASSUME_ALIGNED: - val = bit_value_assume_aligned (stmt); + val = bit_value_assume_aligned (stmt, NULL_TREE, val, false); break; + case BUILT_IN_ALIGNED_ALLOC: + { + tree align = get_constant_value (gimple_call_arg (stmt, 0)); + if (align + && tree_fits_uhwi_p (align)) + { + unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align); + if (aligni > 1 + /* align must be power-of-two */ + && (aligni & (aligni - 1)) == 0) + { + val.lattice_val = CONSTANT; + val.value = build_int_cst (ptr_type_node, 0); + val.mask = -aligni; + } + } + break; + } + default:; } } + if (is_gimple_call (stmt) && gimple_call_lhs (stmt)) + { + tree fntype = gimple_call_fntype (stmt); + if (fntype) + { + tree attrs = lookup_attribute ("assume_aligned", + TYPE_ATTRIBUTES (fntype)); + if (attrs) + val = bit_value_assume_aligned (stmt, attrs, val, false); + attrs = lookup_attribute ("alloc_align", + TYPE_ATTRIBUTES (fntype)); + if (attrs) + val = bit_value_assume_aligned (stmt, attrs, val, true); + } + } is_constant = (val.lattice_val == CONSTANT); } + if (flag_tree_bit_ccp + && ((is_constant && TREE_CODE (val.value) == INTEGER_CST) + || !is_constant) + && gimple_get_lhs (stmt) + && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME) + { + tree lhs = gimple_get_lhs (stmt); + wide_int nonzero_bits = get_nonzero_bits (lhs); + if (nonzero_bits != -1) + { + if (!is_constant) + { + val.lattice_val = CONSTANT; + val.value = build_zero_cst (TREE_TYPE (lhs)); + val.mask = extend_mask (nonzero_bits); + is_constant = true; + } + else + { + if (wi::bit_and_not (val.value, nonzero_bits) != 0) + val.value = wide_int_to_tree (TREE_TYPE (lhs), + nonzero_bits & val.value); + if (nonzero_bits == 0) + val.mask = 0; + else + val.mask = val.mask & extend_mask (nonzero_bits); + } + } + } + + /* The statement produced a nonconstant value. */ if (!is_constant) { - /* The statement produced a nonconstant value. If the statement - had UNDEFINED operands, then the result of the statement - should be UNDEFINED. Otherwise, the statement is VARYING. */ - if (likelyvalue == UNDEFINED) + /* The statement produced a copy. */ + if (simplified && TREE_CODE (simplified) == SSA_NAME + && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified)) { - val.lattice_val = likelyvalue; - val.mask = double_int_zero; + val.lattice_val = CONSTANT; + val.value = simplified; + val.mask = -1; } + /* The statement is VARYING. */ else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.value = NULL_TREE; + val.mask = -1; } - - val.value = NULL_TREE; } return val; } -typedef hash_table > gimple_htab; +typedef hash_table > gimple_htab; /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */ static void insert_clobber_before_stack_restore (tree saved_val, tree var, - gimple_htab *visited) + gimple_htab **visited) { - gimple stmt, clobber_stmt; + gimple *stmt; + gassign *clobber_stmt; tree clobber; imm_use_iterator iter; gimple_stmt_iterator i; - gimple *slot; + gimple **slot; FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val) if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE)) { - clobber = build_constructor (TREE_TYPE (var), NULL); + clobber = build_constructor (TREE_TYPE (var), + NULL); TREE_THIS_VOLATILE (clobber) = 1; clobber_stmt = gimple_build_assign (var, clobber); @@ -1695,10 +2002,10 @@ insert_clobber_before_stack_restore (tree saved_val, tree var, } else if (gimple_code (stmt) == GIMPLE_PHI) { - if (!visited->is_created ()) - visited->create (10); + if (!*visited) + *visited = new gimple_htab (10); - slot = visited->find_slot (stmt, INSERT); + slot = (*visited)->find_slot (stmt, INSERT); if (*slot != NULL) continue; @@ -1706,6 +2013,11 @@ insert_clobber_before_stack_restore (tree saved_val, tree var, insert_clobber_before_stack_restore (gimple_phi_result (stmt), var, visited); } + else if (gimple_assign_ssa_name_copy_p (stmt)) + insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var, + visited); + else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET)) + continue; else gcc_assert (is_gimple_debug (stmt)); } @@ -1722,7 +2034,7 @@ gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i) while (gsi_end_p (*i)) { dom = get_immediate_dominator (CDI_DOMINATORS, i->bb); - if (dom == NULL || dom == ENTRY_BLOCK_PTR) + if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun)) return; *i = gsi_last_bb (dom); @@ -1739,9 +2051,9 @@ gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i) static void insert_clobbers_for_var (gimple_stmt_iterator i, tree var) { - gimple stmt; + gimple *stmt; tree saved_val; - gimple_htab visited; + gimple_htab *visited = NULL; for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i)) { @@ -1758,8 +2070,7 @@ insert_clobbers_for_var (gimple_stmt_iterator i, tree var) break; } - if (visited.is_created ()) - visited.dispose (); + delete visited; } /* Detects a __builtin_alloca_with_align with constant size argument. Declares @@ -1767,7 +2078,7 @@ insert_clobbers_for_var (gimple_stmt_iterator i, tree var) NULL_TREE. */ static tree -fold_builtin_alloca_with_align (gimple stmt) +fold_builtin_alloca_with_align (gimple *stmt) { unsigned HOST_WIDE_INT size, threshold, n_elem; tree lhs, arg, block, var, elem_type, array_type; @@ -1781,10 +2092,10 @@ fold_builtin_alloca_with_align (gimple stmt) arg = get_constant_value (gimple_call_arg (stmt, 0)); if (arg == NULL_TREE || TREE_CODE (arg) != INTEGER_CST - || !host_integerp (arg, 1)) + || !tree_fits_uhwi_p (arg)) return NULL_TREE; - size = TREE_INT_CST_LOW (arg); + size = tree_to_uhwi (arg); /* Heuristic: don't fold large allocas. */ threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME); @@ -1792,6 +2103,7 @@ fold_builtin_alloca_with_align (gimple stmt) as a declared array, so we allow a larger size. */ block = gimple_block (stmt); if (!(cfun->after_inlining + && block && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL)) threshold /= 10; if (size > threshold) @@ -1801,7 +2113,7 @@ fold_builtin_alloca_with_align (gimple stmt) elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1); n_elem = size * 8 / BITS_PER_UNIT; array_type = build_array_type_nelts (elem_type, n_elem); - var = create_tmp_var (array_type, NULL); + var = create_tmp_var (array_type); DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)); { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs); @@ -1825,19 +2137,20 @@ fold_builtin_alloca_with_align (gimple stmt) static bool ccp_fold_stmt (gimple_stmt_iterator *gsi) { - gimple stmt = gsi_stmt (*gsi); + gimple *stmt = gsi_stmt (*gsi); switch (gimple_code (stmt)) { case GIMPLE_COND: { - prop_value_t val; + gcond *cond_stmt = as_a (stmt); + ccp_prop_value_t val; /* Statement evaluation will handle type mismatches in constants more gracefully than the final propagation. This allows us to fold more conditionals here. */ val = evaluate_stmt (stmt); if (val.lattice_val != CONSTANT - || !val.mask.is_zero ()) + || val.mask != 0) return false; if (dump_file) @@ -1850,9 +2163,9 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi) } if (integer_zerop (val.value)) - gimple_cond_make_false (stmt); + gimple_cond_make_false (cond_stmt); else - gimple_cond_make_true (stmt); + gimple_cond_make_true (cond_stmt); return true; } @@ -1964,33 +2277,21 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi) are handled here. */ static enum ssa_prop_result -visit_assignment (gimple stmt, tree *output_p) +visit_assignment (gimple *stmt, tree *output_p) { - prop_value_t val; - enum ssa_prop_result retval; + ccp_prop_value_t val; + enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING; tree lhs = gimple_get_lhs (stmt); - - gcc_assert (gimple_code (stmt) != GIMPLE_CALL - || gimple_call_lhs (stmt) != NULL_TREE); - - if (gimple_assign_single_p (stmt) - && gimple_assign_rhs_code (stmt) == SSA_NAME) - /* For a simple copy operation, we copy the lattice values. */ - val = *get_value (gimple_assign_rhs1 (stmt)); - else - /* Evaluate the statement, which could be - either a GIMPLE_ASSIGN or a GIMPLE_CALL. */ - val = evaluate_stmt (stmt); - - retval = SSA_PROP_NOT_INTERESTING; - - /* Set the lattice value of the statement's output. */ if (TREE_CODE (lhs) == SSA_NAME) { + /* Evaluate the statement, which could be + either a GIMPLE_ASSIGN or a GIMPLE_CALL. */ + val = evaluate_stmt (stmt); + /* If STMT is an assignment to an SSA_NAME, we only have one value to set. */ - if (set_lattice_value (lhs, val)) + if (set_lattice_value (lhs, &val)) { *output_p = lhs; if (val.lattice_val == VARYING) @@ -2009,15 +2310,15 @@ visit_assignment (gimple stmt, tree *output_p) SSA_PROP_VARYING. */ static enum ssa_prop_result -visit_cond_stmt (gimple stmt, edge *taken_edge_p) +visit_cond_stmt (gimple *stmt, edge *taken_edge_p) { - prop_value_t val; + ccp_prop_value_t val; basic_block block; block = gimple_bb (stmt); val = evaluate_stmt (stmt); if (val.lattice_val != CONSTANT - || !val.mask.is_zero ()) + || val.mask != 0) return SSA_PROP_VARYING; /* Find which edge out of the conditional block will be taken and add it @@ -2042,7 +2343,7 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p) value, return SSA_PROP_VARYING. */ static enum ssa_prop_result -ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) +ccp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p) { tree def; ssa_op_iter iter; @@ -2088,59 +2389,80 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) SSA_NAMEs represent unknown modifications to their outputs. Mark them VARYING. */ FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS) - { - prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } }; - set_lattice_value (def, v); - } + set_value_varying (def); return SSA_PROP_VARYING; } -/* Main entry point for SSA Conditional Constant Propagation. */ +/* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P, + record nonzero bits. */ static unsigned int -do_ssa_ccp (void) +do_ssa_ccp (bool nonzero_p) { unsigned int todo = 0; calculate_dominance_info (CDI_DOMINATORS); + ccp_initialize (); ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node); - if (ccp_finalize ()) - todo = (TODO_cleanup_cfg | TODO_update_ssa | TODO_remove_unused_locals); + if (ccp_finalize (nonzero_p)) + { + todo = (TODO_cleanup_cfg | TODO_update_ssa); + + /* ccp_finalize does not preserve loop-closed ssa. */ + loops_state_clear (LOOP_CLOSED_SSA); + } + free_dominance_info (CDI_DOMINATORS); return todo; } -static bool -gate_ccp (void) +namespace { + +const pass_data pass_data_ccp = { - return flag_tree_ccp != 0; -} + GIMPLE_PASS, /* type */ + "ccp", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + TV_TREE_CCP, /* tv_id */ + ( PROP_cfg | PROP_ssa ), /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_update_address_taken, /* todo_flags_finish */ +}; +class pass_ccp : public gimple_opt_pass +{ +public: + pass_ccp (gcc::context *ctxt) + : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false) + {} + + /* opt_pass methods: */ + opt_pass * clone () { return new pass_ccp (m_ctxt); } + void set_pass_param (unsigned int n, bool param) + { + gcc_assert (n == 0); + nonzero_p = param; + } + virtual bool gate (function *) { return flag_tree_ccp != 0; } + virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); } + + private: + /* Determines whether the pass instance records nonzero bits. */ + bool nonzero_p; +}; // class pass_ccp -struct gimple_opt_pass pass_ccp = +} // anon namespace + +gimple_opt_pass * +make_pass_ccp (gcc::context *ctxt) { - { - GIMPLE_PASS, - "ccp", /* name */ - OPTGROUP_NONE, /* optinfo_flags */ - gate_ccp, /* gate */ - do_ssa_ccp, /* execute */ - NULL, /* sub */ - NULL, /* next */ - 0, /* static_pass_number */ - TV_TREE_CCP, /* tv_id */ - PROP_cfg | PROP_ssa, /* properties_required */ - 0, /* properties_provided */ - 0, /* properties_destroyed */ - 0, /* todo_flags_start */ - TODO_verify_ssa - | TODO_update_address_taken - | TODO_verify_stmts | TODO_ggc_collect/* todo_flags_finish */ - } -}; + return new pass_ccp (ctxt); +} @@ -2154,10 +2476,10 @@ static tree optimize_stack_restore (gimple_stmt_iterator i) { tree callee; - gimple stmt; + gimple *stmt; basic_block bb = gsi_bb (i); - gimple call = gsi_stmt (i); + gimple *call = gsi_stmt (i); if (gimple_code (call) != GIMPLE_CALL || gimple_call_num_args (call) != 1 @@ -2194,7 +2516,7 @@ optimize_stack_restore (gimple_stmt_iterator i) case 0: break; case 1: - if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR) + if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) return NULL_TREE; break; default: @@ -2208,7 +2530,7 @@ optimize_stack_restore (gimple_stmt_iterator i) or not is irrelevant to removing the call to __builtin_stack_restore. */ if (has_single_use (gimple_call_arg (call, 0))) { - gimple stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0)); + gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0)); if (is_gimple_call (stack_save)) { callee = gimple_call_fndecl (stack_save); @@ -2236,7 +2558,7 @@ optimize_stack_restore (gimple_stmt_iterator i) pointer assignment. */ static tree -optimize_stdarg_builtin (gimple call) +optimize_stdarg_builtin (gimple *call) { tree callee, lhs, rhs, cfun_va_list; bool va_list_simple_ptr; @@ -2314,11 +2636,14 @@ optimize_unreachable (gimple_stmt_iterator i) { basic_block bb = gsi_bb (i); gimple_stmt_iterator gsi; - gimple stmt; + gimple *stmt; edge_iterator ei; edge e; bool ret; + if (flag_sanitize & SANITIZE_UNREACHABLE) + return false; + for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); @@ -2326,10 +2651,10 @@ optimize_unreachable (gimple_stmt_iterator i) if (is_gimple_debug (stmt)) continue; - if (gimple_code (stmt) == GIMPLE_LABEL) + if (glabel *label_stmt = dyn_cast (stmt)) { /* Verify we do not need to preserve the label. */ - if (FORCED_LABEL (gimple_label_label (stmt))) + if (FORCED_LABEL (gimple_label_label (label_stmt))) return false; continue; @@ -2350,15 +2675,15 @@ optimize_unreachable (gimple_stmt_iterator i) continue; stmt = gsi_stmt (gsi); - if (gimple_code (stmt) == GIMPLE_COND) + if (gcond *cond_stmt = dyn_cast (stmt)) { if (e->flags & EDGE_TRUE_VALUE) - gimple_cond_make_false (stmt); + gimple_cond_make_false (cond_stmt); else if (e->flags & EDGE_FALSE_VALUE) - gimple_cond_make_true (stmt); + gimple_cond_make_true (cond_stmt); else gcc_unreachable (); - update_stmt (stmt); + update_stmt (cond_stmt); } else { @@ -2375,85 +2700,135 @@ optimize_unreachable (gimple_stmt_iterator i) /* A simple pass that attempts to fold all builtin functions. This pass is run after we've propagated as many constants as we can. */ -static unsigned int -execute_fold_all_builtins (void) +namespace { + +const pass_data pass_data_fold_builtins = +{ + GIMPLE_PASS, /* type */ + "fab", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + TV_NONE, /* tv_id */ + ( PROP_cfg | PROP_ssa ), /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_update_ssa, /* todo_flags_finish */ +}; + +class pass_fold_builtins : public gimple_opt_pass +{ +public: + pass_fold_builtins (gcc::context *ctxt) + : gimple_opt_pass (pass_data_fold_builtins, ctxt) + {} + + /* opt_pass methods: */ + opt_pass * clone () { return new pass_fold_builtins (m_ctxt); } + virtual unsigned int execute (function *); + +}; // class pass_fold_builtins + +unsigned int +pass_fold_builtins::execute (function *fun) { bool cfg_changed = false; basic_block bb; unsigned int todoflags = 0; - FOR_EACH_BB (bb) + FOR_EACH_BB_FN (bb, fun) { gimple_stmt_iterator i; for (i = gsi_start_bb (bb); !gsi_end_p (i); ) { - gimple stmt, old_stmt; - tree callee, result; + gimple *stmt, *old_stmt; + tree callee; enum built_in_function fcode; stmt = gsi_stmt (i); if (gimple_code (stmt) != GIMPLE_CALL) { + /* Remove all *ssaname_N ={v} {CLOBBER}; stmts, + after the last GIMPLE DSE they aren't needed and might + unnecessarily keep the SSA_NAMEs live. */ + if (gimple_clobber_p (stmt)) + { + tree lhs = gimple_assign_lhs (stmt); + if (TREE_CODE (lhs) == MEM_REF + && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME) + { + unlink_stmt_vdef (stmt); + gsi_remove (&i, true); + release_defs (stmt); + continue; + } + } gsi_next (&i); continue; } + callee = gimple_call_fndecl (stmt); if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL) { gsi_next (&i); continue; } - fcode = DECL_FUNCTION_CODE (callee); - - result = gimple_fold_builtin (stmt); - if (result) - gimple_remove_stmt_histograms (cfun, stmt); + fcode = DECL_FUNCTION_CODE (callee); + if (fold_stmt (&i)) + ; + else + { + tree result = NULL_TREE; + switch (DECL_FUNCTION_CODE (callee)) + { + case BUILT_IN_CONSTANT_P: + /* Resolve __builtin_constant_p. If it hasn't been + folded to integer_one_node by now, it's fairly + certain that the value simply isn't constant. */ + result = integer_zero_node; + break; - if (!result) - switch (DECL_FUNCTION_CODE (callee)) - { - case BUILT_IN_CONSTANT_P: - /* Resolve __builtin_constant_p. If it hasn't been - folded to integer_one_node by now, it's fairly - certain that the value simply isn't constant. */ - result = integer_zero_node; - break; + case BUILT_IN_ASSUME_ALIGNED: + /* Remove __builtin_assume_aligned. */ + result = gimple_call_arg (stmt, 0); + break; - case BUILT_IN_ASSUME_ALIGNED: - /* Remove __builtin_assume_aligned. */ - result = gimple_call_arg (stmt, 0); - break; + case BUILT_IN_STACK_RESTORE: + result = optimize_stack_restore (i); + if (result) + break; + gsi_next (&i); + continue; - case BUILT_IN_STACK_RESTORE: - result = optimize_stack_restore (i); - if (result) + case BUILT_IN_UNREACHABLE: + if (optimize_unreachable (i)) + cfg_changed = true; break; - gsi_next (&i); - continue; - case BUILT_IN_UNREACHABLE: - if (optimize_unreachable (i)) - cfg_changed = true; - break; + case BUILT_IN_VA_START: + case BUILT_IN_VA_END: + case BUILT_IN_VA_COPY: + /* These shouldn't be folded before pass_stdarg. */ + result = optimize_stdarg_builtin (stmt); + if (result) + break; + /* FALLTHRU */ - case BUILT_IN_VA_START: - case BUILT_IN_VA_END: - case BUILT_IN_VA_COPY: - /* These shouldn't be folded before pass_stdarg. */ - result = optimize_stdarg_builtin (stmt); - if (result) - break; - /* FALLTHRU */ + default:; + } - default: - gsi_next (&i); - continue; - } + if (!result) + { + gsi_next (&i); + continue; + } - if (result == NULL_TREE) - break; + if (!update_call_from_tree (&i, result)) + gimplify_and_update_call_from_tree (&i, result); + } + + todoflags |= TODO_update_address_taken; if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -2462,12 +2837,6 @@ execute_fold_all_builtins (void) } old_stmt = stmt; - if (!update_call_from_tree (&i, result)) - { - gimplify_and_update_call_from_tree (&i, result); - todoflags |= TODO_update_address_taken; - } - stmt = gsi_stmt (i); update_stmt (stmt); @@ -2504,24 +2873,10 @@ execute_fold_all_builtins (void) return todoflags; } +} // anon namespace -struct gimple_opt_pass pass_fold_builtins = +gimple_opt_pass * +make_pass_fold_builtins (gcc::context *ctxt) { - { - GIMPLE_PASS, - "fab", /* name */ - OPTGROUP_NONE, /* optinfo_flags */ - NULL, /* gate */ - execute_fold_all_builtins, /* execute */ - NULL, /* sub */ - NULL, /* next */ - 0, /* static_pass_number */ - TV_NONE, /* tv_id */ - PROP_cfg | PROP_ssa, /* properties_required */ - 0, /* properties_provided */ - 0, /* properties_destroyed */ - 0, /* todo_flags_start */ - TODO_verify_ssa - | TODO_update_ssa /* todo_flags_finish */ - } -}; + return new pass_fold_builtins (ctxt); +}