/* Forward propagation of expressions for single use variables.
- Copyright (C) 2004-2015 Free Software Foundation, Inc.
+ Copyright (C) 2004-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "alias.h"
-#include "symtab.h"
+#include "backend.h"
+#include "rtl.h"
#include "tree.h"
+#include "gimple.h"
+#include "cfghooks.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "expmed.h"
+#include "optabs-query.h"
+#include "gimple-pretty-print.h"
#include "fold-const.h"
#include "stor-layout.h"
-#include "tm_p.h"
-#include "predict.h"
-#include "hard-reg-set.h"
-#include "function.h"
-#include "dominance.h"
-#include "cfg.h"
-#include "basic-block.h"
-#include "gimple-pretty-print.h"
-#include "tree-ssa-alias.h"
-#include "internal-fn.h"
#include "gimple-fold.h"
#include "tree-eh.h"
-#include "gimple-expr.h"
-#include "gimple.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
-#include "gimple-ssa.h"
#include "tree-cfg.h"
-#include "tree-phinodes.h"
-#include "ssa-iterators.h"
-#include "stringpool.h"
-#include "tree-ssanames.h"
-#include "rtl.h"
-#include "flags.h"
-#include "insn-config.h"
-#include "expmed.h"
-#include "dojump.h"
-#include "explow.h"
-#include "calls.h"
-#include "emit-rtl.h"
-#include "varasm.h"
-#include "stmt.h"
#include "expr.h"
#include "tree-dfa.h"
-#include "tree-pass.h"
-#include "langhooks.h"
-#include "diagnostic.h"
-#include "cfgloop.h"
-#include "insn-codes.h"
-#include "optabs.h"
#include "tree-ssa-propagate.h"
#include "tree-ssa-dom.h"
#include "builtins.h"
#include "tree-cfgcleanup.h"
-#include "tree-into-ssa.h"
#include "cfganal.h"
+#include "optabs-tree.h"
+#include "tree-vector-builder.h"
+#include "vec-perm-indices.h"
+#include "internal-fn.h"
+#include "cgraph.h"
+#include "tree-ssa.h"
/* This pass propagates the RHS of assignment statements into use
sites of the LHS of the assignment. It's basically a specialized
/* Set to true if we delete dead edges during the optimization. */
static bool cfg_changed;
-static tree rhs_to_tree (tree type, gimple stmt);
+static tree rhs_to_tree (tree type, gimple *stmt);
static bitmap to_purge;
it is set to whether the chain to NAME is a single use chain
or not. SINGLE_USE_P is not written to if SINGLE_USE_ONLY is set. */
-static gimple
+static gimple *
get_prop_source_stmt (tree name, bool single_use_only, bool *single_use_p)
{
bool single_use = true;
do {
- gimple def_stmt = SSA_NAME_DEF_STMT (name);
+ gimple *def_stmt = SSA_NAME_DEF_STMT (name);
if (!has_single_use (name))
{
propagation source. Returns true if so, otherwise false. */
static bool
-can_propagate_from (gimple def_stmt)
+can_propagate_from (gimple *def_stmt)
{
gcc_assert (is_gimple_assign (def_stmt));
return false;
/* If the definition is a conversion of a pointer to a function type,
- then we can not apply optimizations as some targets require
+ then we cannot apply optimizations as some targets require
function pointers to be canonicalized and in this case this
optimization could eliminate a necessary canonicalization. */
if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
remove_prop_source_from_use (tree name)
{
gimple_stmt_iterator gsi;
- gimple stmt;
+ gimple *stmt;
bool cfg_changed = false;
do {
routines that deal with gimple exclusively . */
static tree
-rhs_to_tree (tree type, gimple stmt)
+rhs_to_tree (tree type, gimple *stmt)
{
location_t loc = gimple_location (stmt);
enum tree_code code = gimple_assign_rhs_code (stmt);
- if (get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS)
- return fold_build3_loc (loc, code, type, gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt),
- gimple_assign_rhs3 (stmt));
- else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
- return fold_build2_loc (loc, code, type, gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt));
- else if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
- return build1 (code, type, gimple_assign_rhs1 (stmt));
- else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
- return gimple_assign_rhs1 (stmt);
- else
- gcc_unreachable ();
+ switch (get_gimple_rhs_class (code))
+ {
+ case GIMPLE_TERNARY_RHS:
+ return fold_build3_loc (loc, code, type, gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt),
+ gimple_assign_rhs3 (stmt));
+ case GIMPLE_BINARY_RHS:
+ return fold_build2_loc (loc, code, type, gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt));
+ case GIMPLE_UNARY_RHS:
+ return build1 (code, type, gimple_assign_rhs1 (stmt));
+ case GIMPLE_SINGLE_RHS:
+ return gimple_assign_rhs1 (stmt);
+ default:
+ gcc_unreachable ();
+ }
}
/* Combine OP0 CODE OP1 in the context of a COND_EXPR. Returns
considered simplified. */
static tree
-combine_cond_expr_cond (gimple stmt, enum tree_code code, tree type,
+combine_cond_expr_cond (gimple *stmt, enum tree_code code, tree type,
tree op0, tree op1, bool invariant_only)
{
tree t;
were no simplifying combines. */
static tree
-forward_propagate_into_comparison_1 (gimple stmt,
+forward_propagate_into_comparison_1 (gimple *stmt,
enum tree_code code, tree type,
tree op0, tree op1)
{
simplify comparisons against constants. */
if (TREE_CODE (op0) == SSA_NAME)
{
- gimple def_stmt = get_prop_source_stmt (op0, false, &single_use0_p);
+ gimple *def_stmt = get_prop_source_stmt (op0, false, &single_use0_p);
if (def_stmt && can_propagate_from (def_stmt))
{
enum tree_code def_code = gimple_assign_rhs_code (def_stmt);
/* If that wasn't successful, try the second operand. */
if (TREE_CODE (op1) == SSA_NAME)
{
- gimple def_stmt = get_prop_source_stmt (op1, false, &single_use1_p);
+ gimple *def_stmt = get_prop_source_stmt (op1, false, &single_use1_p);
if (def_stmt && can_propagate_from (def_stmt))
{
rhs1 = rhs_to_tree (TREE_TYPE (op0), def_stmt);
static int
forward_propagate_into_comparison (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
tree tmp;
bool cfg_changed = false;
tree type = TREE_TYPE (gimple_assign_lhs (stmt));
tmp = forward_propagate_into_comparison_1 (stmt, code,
boolean_type_node,
rhs1, rhs2);
- if (tmp)
+ if (tmp
+ && is_gimple_condexpr_for_cond (tmp))
{
- if (dump_file && tmp)
+ if (dump_file)
{
fprintf (dump_file, " Replaced '");
- print_gimple_expr (dump_file, stmt, 0, 0);
+ print_gimple_expr (dump_file, stmt, 0);
fprintf (dump_file, "' with '");
- print_generic_expr (dump_file, tmp, 0);
+ print_generic_expr (dump_file, tmp);
fprintf (dump_file, "'\n");
}
static bool
forward_propagate_into_cond (gimple_stmt_iterator *gsi_p)
{
- gimple stmt = gsi_stmt (*gsi_p);
+ gimple *stmt = gsi_stmt (*gsi_p);
tree tmp = NULL_TREE;
tree cond = gimple_assign_rhs1 (stmt);
enum tree_code code = gimple_assign_rhs_code (stmt);
{
enum tree_code def_code;
tree name = cond;
- gimple def_stmt = get_prop_source_stmt (name, true, NULL);
+ gimple *def_stmt = get_prop_source_stmt (name, true, NULL);
if (!def_stmt || !can_propagate_from (def_stmt))
return 0;
if (tmp
&& is_gimple_condexpr (tmp))
{
- if (dump_file && tmp)
+ if (dump_file)
{
fprintf (dump_file, " Replaced '");
- print_generic_expr (dump_file, cond, 0);
+ print_generic_expr (dump_file, cond);
fprintf (dump_file, "' with '");
- print_generic_expr (dump_file, tmp, 0);
+ print_generic_expr (dump_file, tmp);
fprintf (dump_file, "'\n");
}
relevant data structures to match. */
static void
-tidy_after_forward_propagate_addr (gimple stmt)
+tidy_after_forward_propagate_addr (gimple *stmt)
{
/* We may have turned a trapping insn into a non-trapping insn. */
if (maybe_clean_or_replace_eh_stmt (stmt, stmt))
bool single_use_p)
{
tree lhs, rhs, rhs2, array_ref;
- gimple use_stmt = gsi_stmt (*use_stmt_gsi);
+ gimple *use_stmt = gsi_stmt (*use_stmt_gsi);
enum tree_code rhs_code;
bool res = true;
if (TREE_CODE (new_def_rhs) == MEM_REF
&& !is_gimple_mem_ref_addr (TREE_OPERAND (new_def_rhs, 0)))
return false;
- new_def_rhs = build_fold_addr_expr_with_type (new_def_rhs,
- TREE_TYPE (rhs));
+ new_def_rhs = build1 (ADDR_EXPR, TREE_TYPE (rhs), new_def_rhs);
/* Recurse. If we could propagate into all uses of lhs do not
bother to replace into the current use but just pretend we did. */
- if (TREE_CODE (new_def_rhs) == ADDR_EXPR
- && forward_propagate_addr_expr (lhs, new_def_rhs, single_use_p))
+ if (forward_propagate_addr_expr (lhs, new_def_rhs, single_use_p))
return true;
- if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (new_def_rhs)))
+ if (useless_type_conversion_p (TREE_TYPE (lhs),
+ TREE_TYPE (new_def_rhs)))
gimple_assign_set_rhs_with_ops (use_stmt_gsi, TREE_CODE (new_def_rhs),
new_def_rhs);
else if (is_gimple_min_invariant (new_def_rhs))
&& TREE_OPERAND (lhs, 0) == name)
{
tree def_rhs_base;
- HOST_WIDE_INT def_rhs_offset;
+ poly_int64 def_rhs_offset;
/* If the address is invariant we can always fold it. */
if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0),
&def_rhs_offset)))
{
- offset_int off = mem_ref_offset (lhs);
+ poly_offset_int off = mem_ref_offset (lhs);
tree new_ptr;
off += def_rhs_offset;
if (TREE_CODE (def_rhs_base) == MEM_REF)
&& TREE_OPERAND (rhs, 0) == name)
{
tree def_rhs_base;
- HOST_WIDE_INT def_rhs_offset;
+ poly_int64 def_rhs_offset;
if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0),
&def_rhs_offset)))
{
- offset_int off = mem_ref_offset (rhs);
+ poly_offset_int off = mem_ref_offset (rhs);
tree new_ptr;
off += def_rhs_offset;
if (TREE_CODE (def_rhs_base) == MEM_REF)
forward_propagate_addr_expr (tree name, tree rhs, bool parent_single_use_p)
{
imm_use_iterator iter;
- gimple use_stmt;
+ gimple *use_stmt;
bool all = true;
bool single_use_p = parent_single_use_p && has_single_use (name);
for (i = 0; i < gimple_switch_num_labels (stmt); i++)
{
tree elt = gimple_switch_label (stmt, i);
- basic_block target = label_to_block (CASE_LABEL (elt));
+ basic_block target = label_to_block (cfun, CASE_LABEL (elt));
bitmap_set_bit (target_blocks, target->index);
}
for (ei = ei_start (gimple_bb (stmt)->succs); (e = ei_safe_edge (ei)); )
tree cond = gimple_switch_index (stmt);
if (TREE_CODE (cond) == SSA_NAME)
{
- gimple def_stmt = SSA_NAME_DEF_STMT (cond);
+ gimple *def_stmt = SSA_NAME_DEF_STMT (cond);
if (gimple_assign_cast_p (def_stmt))
{
tree def = gimple_assign_rhs1 (def_stmt);
{
tree p = i ? p1 : p2;
tree off = size_zero_node;
- gimple stmt;
+ gimple *stmt;
enum tree_code code;
/* For each of p1 and p2 we need to iterate at least
if (TREE_CODE (p) == ADDR_EXPR)
{
tree q = TREE_OPERAND (p, 0);
- HOST_WIDE_INT offset;
+ poly_int64 offset;
tree base = get_addr_base_and_unit_offset (q, &offset);
if (base)
{
q = base;
- if (offset)
+ if (maybe_ne (offset, 0))
off = size_binop (PLUS_EXPR, off, size_int (offset));
}
if (TREE_CODE (q) == MEM_REF
static bool
simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2)
{
- gimple stmt1, stmt2 = gsi_stmt (*gsi_p);
+ gimple *stmt1, *stmt2 = gsi_stmt (*gsi_p);
tree vuse = gimple_vuse (stmt2);
if (vuse == NULL)
return false;
tree val2 = gimple_call_arg (stmt2, 1);
tree len2 = gimple_call_arg (stmt2, 2);
tree diff, vdef, new_str_cst;
- gimple use_stmt;
+ gimple *use_stmt;
unsigned int ptr1_align;
unsigned HOST_WIDE_INT src_len;
char *src_buf;
constant length. */
callee1 = gimple_call_fndecl (stmt1);
if (callee1 == NULL_TREE
- || DECL_BUILT_IN_CLASS (callee1) != BUILT_IN_NORMAL
+ || !fndecl_built_in_p (callee1, BUILT_IN_NORMAL)
|| gimple_call_num_args (stmt1) != 3)
break;
if (DECL_FUNCTION_CODE (callee1) != BUILT_IN_MEMCPY
lhs1 = gimple_call_lhs (stmt1);
if (!tree_fits_uhwi_p (len1))
break;
- str1 = string_constant (src1, &off1);
+ str1 = string_constant (src1, &off1, NULL, NULL);
if (str1 == NULL_TREE)
break;
if (!tree_fits_uhwi_p (off1)
|| !tree_fits_shwi_p (src1))
break;
ptr1 = build_fold_addr_expr (ptr1);
+ STRIP_USELESS_TYPE_CONVERSION (ptr1);
callee1 = NULL_TREE;
len1 = size_one_node;
lhs1 = NULL_TREE;
build_int_cst (TREE_TYPE (len1), src_len));
update_stmt (stmt1);
unlink_stmt_vdef (stmt2);
- gsi_remove (gsi_p, true);
+ gsi_replace (gsi_p, gimple_build_nop (), false);
fwprop_invalidate_lattice (gimple_get_lhs (stmt2));
release_defs (stmt2);
if (lhs1 && DECL_FUNCTION_CODE (callee1) == BUILT_IN_MEMPCPY)
if (!is_gimple_val (ptr1))
ptr1 = force_gimple_operand_gsi (gsi_p, ptr1, true, NULL_TREE,
true, GSI_SAME_STMT);
- gimple_call_set_fndecl (stmt2,
- builtin_decl_explicit (BUILT_IN_MEMCPY));
+ tree fndecl = builtin_decl_explicit (BUILT_IN_MEMCPY);
+ gimple_call_set_fndecl (stmt2, fndecl);
+ gimple_call_set_fntype (as_a <gcall *> (stmt2),
+ TREE_TYPE (fndecl));
gimple_call_set_arg (stmt2, 0, ptr1);
gimple_call_set_arg (stmt2, 1, new_str_cst);
gimple_call_set_arg (stmt2, 2,
static inline void
defcodefor_name (tree name, enum tree_code *code, tree *arg1, tree *arg2)
{
- gimple def;
+ gimple *def;
enum tree_code code1;
tree arg11;
tree arg21;
code1 = TREE_CODE (name);
arg11 = name;
arg21 = NULL_TREE;
+ arg31 = NULL_TREE;
grhs_class = get_gimple_rhs_class (code1);
if (code1 == SSA_NAME)
code1 = gimple_assign_rhs_code (def);
arg11 = gimple_assign_rhs1 (def);
arg21 = gimple_assign_rhs2 (def);
- arg31 = gimple_assign_rhs2 (def);
+ arg31 = gimple_assign_rhs3 (def);
}
}
- else if (grhs_class == GIMPLE_TERNARY_RHS
- || GIMPLE_BINARY_RHS
- || GIMPLE_UNARY_RHS
- || GIMPLE_SINGLE_RHS)
- extract_ops_from_tree_1 (name, &code1, &arg11, &arg21, &arg31);
+ else if (grhs_class != GIMPLE_SINGLE_RHS)
+ code1 = ERROR_MARK;
*code = code1;
*arg1 = arg11;
if (arg2)
*arg2 = arg21;
- /* Ignore arg3 currently. */
+ if (arg31)
+ *code = ERROR_MARK;
}
applied, otherwise return false.
We are looking for X with unsigned type T with bitsize B, OP being
- +, | or ^, some type T2 wider than T and
+ +, | or ^, some type T2 wider than T. For:
(X << CNT1) OP (X >> CNT2) iff CNT1 + CNT2 == B
((T) ((T2) X << CNT1)) OP ((T) ((T2) X >> CNT2)) iff CNT1 + CNT2 == B
+
+ transform these into:
+ X r<< CNT1
+
+ Or for:
(X << Y) OP (X >> (B - Y))
(X << (int) Y) OP (X >> (int) (B - Y))
((T) ((T2) X << Y)) OP ((T) ((T2) X >> (B - Y)))
((T) ((T2) X << Y)) | ((T) ((T2) X >> ((-Y) & (B - 1))))
((T) ((T2) X << (int) Y)) | ((T) ((T2) X >> (int) ((-Y) & (B - 1))))
- and transform these into:
- X r<< CNT1
+ transform these into:
X r<< Y
+ Or for:
+ (X << (Y & (B - 1))) | (X >> ((-Y) & (B - 1)))
+ (X << (int) (Y & (B - 1))) | (X >> (int) ((-Y) & (B - 1)))
+ ((T) ((T2) X << (Y & (B - 1)))) | ((T) ((T2) X >> ((-Y) & (B - 1))))
+ ((T) ((T2) X << (int) (Y & (B - 1)))) \
+ | ((T) ((T2) X >> (int) ((-Y) & (B - 1))))
+
+ transform these into:
+ X r<< (Y & (B - 1))
+
Note, in the patterns with T2 type, the type of OP operands
- might be even a signed type, but should have precision B. */
+ might be even a signed type, but should have precision B.
+ Expressions with & (B - 1) should be recognized only if B is
+ a power of 2. */
static bool
simplify_rotate (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
tree arg[2], rtype, rotcnt = NULL_TREE;
tree def_arg1[2], def_arg2[2];
enum tree_code def_code[2];
tree lhs;
int i;
bool swapped_p = false;
- gimple g;
+ gimple *g;
arg[0] = gimple_assign_rhs1 (stmt);
arg[1] = gimple_assign_rhs2 (stmt);
/* Only create rotates in complete modes. Other cases are not
expanded properly. */
if (!INTEGRAL_TYPE_P (rtype)
- || TYPE_PRECISION (rtype) != GET_MODE_PRECISION (TYPE_MODE (rtype)))
+ || !type_has_mode_precision_p (rtype))
return false;
for (i = 0; i < 2; i++)
defcodefor_name (arg[i], &def_code[i], &def_arg1[i], &def_arg2[i]);
- /* Look through narrowing conversions. */
+ /* Look through narrowing (or same precision) conversions. */
if (CONVERT_EXPR_CODE_P (def_code[0])
&& CONVERT_EXPR_CODE_P (def_code[1])
&& INTEGRAL_TYPE_P (TREE_TYPE (def_arg1[0]))
&& INTEGRAL_TYPE_P (TREE_TYPE (def_arg1[1]))
&& TYPE_PRECISION (TREE_TYPE (def_arg1[0]))
== TYPE_PRECISION (TREE_TYPE (def_arg1[1]))
- && TYPE_PRECISION (TREE_TYPE (def_arg1[0])) > TYPE_PRECISION (rtype)
+ && TYPE_PRECISION (TREE_TYPE (def_arg1[0])) >= TYPE_PRECISION (rtype)
&& has_single_use (arg[0])
&& has_single_use (arg[1]))
{
defcodefor_name (arg[i], &def_code[i], &def_arg1[i], &def_arg2[i]);
}
}
+ else
+ {
+ /* Handle signed rotate; the RSHIFT_EXPR has to be done
+ in unsigned type but LSHIFT_EXPR could be signed. */
+ i = (def_code[0] == LSHIFT_EXPR || def_code[0] == RSHIFT_EXPR);
+ if (CONVERT_EXPR_CODE_P (def_code[i])
+ && (def_code[1 - i] == LSHIFT_EXPR || def_code[1 - i] == RSHIFT_EXPR)
+ && INTEGRAL_TYPE_P (TREE_TYPE (def_arg1[i]))
+ && TYPE_PRECISION (rtype) == TYPE_PRECISION (TREE_TYPE (def_arg1[i]))
+ && has_single_use (arg[i]))
+ {
+ arg[i] = def_arg1[i];
+ defcodefor_name (arg[i], &def_code[i], &def_arg1[i], &def_arg2[i]);
+ }
+ }
/* One operand has to be LSHIFT_EXPR and one RSHIFT_EXPR. */
for (i = 0; i < 2; i++)
def_arg1[i] = tem;
}
/* Both shifts have to use the same first operand. */
- if (TREE_CODE (def_arg1[0]) != SSA_NAME || def_arg1[0] != def_arg1[1])
- return false;
- if (!TYPE_UNSIGNED (TREE_TYPE (def_arg1[0])))
+ if (!operand_equal_for_phi_arg_p (def_arg1[0], def_arg1[1])
+ || !types_compatible_p (TREE_TYPE (def_arg1[0]),
+ TREE_TYPE (def_arg1[1])))
+ {
+ if ((TYPE_PRECISION (TREE_TYPE (def_arg1[0]))
+ != TYPE_PRECISION (TREE_TYPE (def_arg1[1])))
+ || (TYPE_UNSIGNED (TREE_TYPE (def_arg1[0]))
+ == TYPE_UNSIGNED (TREE_TYPE (def_arg1[1]))))
+ return false;
+
+ /* Handle signed rotate; the RSHIFT_EXPR has to be done
+ in unsigned type but LSHIFT_EXPR could be signed. */
+ i = def_code[0] != RSHIFT_EXPR;
+ if (!TYPE_UNSIGNED (TREE_TYPE (def_arg1[i])))
+ return false;
+
+ tree tem;
+ enum tree_code code;
+ defcodefor_name (def_arg1[i], &code, &tem, NULL);
+ if (!CONVERT_EXPR_CODE_P (code)
+ || !INTEGRAL_TYPE_P (TREE_TYPE (tem))
+ || TYPE_PRECISION (TREE_TYPE (tem)) != TYPE_PRECISION (rtype))
+ return false;
+ def_arg1[i] = tem;
+ if (!operand_equal_for_phi_arg_p (def_arg1[0], def_arg1[1])
+ || !types_compatible_p (TREE_TYPE (def_arg1[0]),
+ TREE_TYPE (def_arg1[1])))
+ return false;
+ }
+ else if (!TYPE_UNSIGNED (TREE_TYPE (def_arg1[0])))
return false;
/* CNT1 + CNT2 == B case above. */
&& INTEGRAL_TYPE_P (TREE_TYPE (cdef_arg1[i]))
&& TYPE_PRECISION (TREE_TYPE (cdef_arg1[i]))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (cdef_arg1[i]))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (cdef_arg1[i]))))
+ && type_has_mode_precision_p (TREE_TYPE (cdef_arg1[i])))
{
def_arg2_alt[i] = cdef_arg1[i];
defcodefor_name (def_arg2_alt[i], &cdef_code[i],
&& INTEGRAL_TYPE_P (TREE_TYPE (tem))
&& TYPE_PRECISION (TREE_TYPE (tem))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem)))
+ && type_has_mode_precision_p (TREE_TYPE (tem))
&& (tem == def_arg2[1 - i]
|| tem == def_arg2_alt[1 - i]))
{
/* The above sequence isn't safe for Y being 0,
because then one of the shifts triggers undefined behavior.
This alternative is safe even for rotation count of 0.
- One shift count is Y and the other (-Y) & (B - 1). */
+ One shift count is Y and the other (-Y) & (B - 1).
+ Or one shift count is Y & (B - 1) and the other (-Y) & (B - 1). */
else if (cdef_code[i] == BIT_AND_EXPR
+ && pow2p_hwi (TYPE_PRECISION (rtype))
&& tree_fits_shwi_p (cdef_arg2[i])
&& tree_to_shwi (cdef_arg2[i])
== TYPE_PRECISION (rtype) - 1
&& INTEGRAL_TYPE_P (TREE_TYPE (tem))
&& TYPE_PRECISION (TREE_TYPE (tem))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem))))
+ && type_has_mode_precision_p (TREE_TYPE (tem)))
defcodefor_name (tem, &code, &tem, NULL);
if (code == NEGATE_EXPR)
rotcnt = tem;
break;
}
- defcodefor_name (tem, &code, &tem, NULL);
+ tree tem2;
+ defcodefor_name (tem, &code, &tem2, NULL);
if (CONVERT_EXPR_CODE_P (code)
- && INTEGRAL_TYPE_P (TREE_TYPE (tem))
- && TYPE_PRECISION (TREE_TYPE (tem))
+ && INTEGRAL_TYPE_P (TREE_TYPE (tem2))
+ && TYPE_PRECISION (TREE_TYPE (tem2))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem)))
- && (tem == def_arg2[1 - i]
- || tem == def_arg2_alt[1 - i]))
+ && type_has_mode_precision_p (TREE_TYPE (tem2)))
{
- rotcnt = tem;
- break;
+ if (tem2 == def_arg2[1 - i]
+ || tem2 == def_arg2_alt[1 - i])
+ {
+ rotcnt = tem2;
+ break;
+ }
+ }
+ else
+ tem2 = NULL_TREE;
+
+ if (cdef_code[1 - i] == BIT_AND_EXPR
+ && tree_fits_shwi_p (cdef_arg2[1 - i])
+ && tree_to_shwi (cdef_arg2[1 - i])
+ == TYPE_PRECISION (rtype) - 1
+ && TREE_CODE (cdef_arg1[1 - i]) == SSA_NAME)
+ {
+ if (tem == cdef_arg1[1 - i]
+ || tem2 == cdef_arg1[1 - i])
+ {
+ rotcnt = def_arg2[1 - i];
+ break;
+ }
+ tree tem3;
+ defcodefor_name (cdef_arg1[1 - i], &code, &tem3, NULL);
+ if (CONVERT_EXPR_CODE_P (code)
+ && INTEGRAL_TYPE_P (TREE_TYPE (tem3))
+ && TYPE_PRECISION (TREE_TYPE (tem3))
+ > floor_log2 (TYPE_PRECISION (rtype))
+ && type_has_mode_precision_p (TREE_TYPE (tem3)))
+ {
+ if (tem == tem3 || tem2 == tem3)
+ {
+ rotcnt = def_arg2[1 - i];
+ break;
+ }
+ }
}
}
}
return true;
}
+
+/* Check whether an array contains a valid ctz table. */
+static bool
+check_ctz_array (tree ctor, unsigned HOST_WIDE_INT mulc,
+ HOST_WIDE_INT &zero_val, unsigned shift, unsigned bits)
+{
+ tree elt, idx;
+ unsigned HOST_WIDE_INT i, mask;
+ unsigned matched = 0;
+
+ mask = ((HOST_WIDE_INT_1U << (bits - shift)) - 1) << shift;
+
+ zero_val = 0;
+
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), i, idx, elt)
+ {
+ if (TREE_CODE (idx) != INTEGER_CST || TREE_CODE (elt) != INTEGER_CST)
+ return false;
+ if (i > bits * 2)
+ return false;
+
+ unsigned HOST_WIDE_INT index = tree_to_shwi (idx);
+ HOST_WIDE_INT val = tree_to_shwi (elt);
+
+ if (index == 0)
+ {
+ zero_val = val;
+ matched++;
+ }
+
+ if (val >= 0 && val < bits && (((mulc << val) & mask) >> shift) == index)
+ matched++;
+
+ if (matched > bits)
+ return true;
+ }
+
+ return false;
+}
+
+/* Check whether a string contains a valid ctz table. */
+static bool
+check_ctz_string (tree string, unsigned HOST_WIDE_INT mulc,
+ HOST_WIDE_INT &zero_val, unsigned shift, unsigned bits)
+{
+ unsigned HOST_WIDE_INT len = TREE_STRING_LENGTH (string);
+ unsigned HOST_WIDE_INT mask;
+ unsigned matched = 0;
+ const unsigned char *p = (const unsigned char *) TREE_STRING_POINTER (string);
+
+ if (len < bits || len > bits * 2)
+ return false;
+
+ mask = ((HOST_WIDE_INT_1U << (bits - shift)) - 1) << shift;
+
+ zero_val = p[0];
+
+ for (unsigned i = 0; i < len; i++)
+ if (p[i] < bits && (((mulc << p[i]) & mask) >> shift) == i)
+ matched++;
+
+ return matched == bits;
+}
+
+/* Recognize count trailing zeroes idiom.
+ The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic
+ constant which when multiplied by a power of 2 creates a unique value
+ in the top 5 or 6 bits. This is then indexed into a table which maps it
+ to the number of trailing zeroes. Array[0] is returned so the caller can
+ emit an appropriate sequence depending on whether ctz (0) is defined on
+ the target. */
+static bool
+optimize_count_trailing_zeroes (tree array_ref, tree x, tree mulc,
+ tree tshift, HOST_WIDE_INT &zero_val)
+{
+ tree type = TREE_TYPE (array_ref);
+ tree array = TREE_OPERAND (array_ref, 0);
+
+ gcc_assert (TREE_CODE (mulc) == INTEGER_CST);
+ gcc_assert (TREE_CODE (tshift) == INTEGER_CST);
+
+ tree input_type = TREE_TYPE (x);
+ unsigned input_bits = tree_to_shwi (TYPE_SIZE (input_type));
+
+ /* Check the array element type is not wider than 32 bits and the input is
+ an unsigned 32-bit or 64-bit type. */
+ if (TYPE_PRECISION (type) > 32 || !TYPE_UNSIGNED (input_type))
+ return false;
+ if (input_bits != 32 && input_bits != 64)
+ return false;
+
+ if (!direct_internal_fn_supported_p (IFN_CTZ, input_type, OPTIMIZE_FOR_BOTH))
+ return false;
+
+ /* Check the lower bound of the array is zero. */
+ tree low = array_ref_low_bound (array_ref);
+ if (!low || !integer_zerop (low))
+ return false;
+
+ unsigned shiftval = tree_to_shwi (tshift);
+
+ /* Check the shift extracts the top 5..7 bits. */
+ if (shiftval < input_bits - 7 || shiftval > input_bits - 5)
+ return false;
+
+ tree ctor = ctor_for_folding (array);
+ if (!ctor)
+ return false;
+
+ unsigned HOST_WIDE_INT val = tree_to_uhwi (mulc);
+
+ if (TREE_CODE (ctor) == CONSTRUCTOR)
+ return check_ctz_array (ctor, val, zero_val, shiftval, input_bits);
+
+ if (TREE_CODE (ctor) == STRING_CST
+ && TYPE_PRECISION (type) == CHAR_TYPE_SIZE)
+ return check_ctz_string (ctor, val, zero_val, shiftval, input_bits);
+
+ return false;
+}
+
+/* Match.pd function to match the ctz expression. */
+extern bool gimple_ctz_table_index (tree, tree *, tree (*)(tree));
+
+static bool
+simplify_count_trailing_zeroes (gimple_stmt_iterator *gsi)
+{
+ gimple *stmt = gsi_stmt (*gsi);
+ tree array_ref = gimple_assign_rhs1 (stmt);
+ tree res_ops[3];
+ HOST_WIDE_INT zero_val;
+
+ gcc_checking_assert (TREE_CODE (array_ref) == ARRAY_REF);
+
+ if (!gimple_ctz_table_index (TREE_OPERAND (array_ref, 1), &res_ops[0], NULL))
+ return false;
+
+ if (optimize_count_trailing_zeroes (array_ref, res_ops[0],
+ res_ops[1], res_ops[2], zero_val))
+ {
+ tree type = TREE_TYPE (res_ops[0]);
+ HOST_WIDE_INT ctz_val = 0;
+ HOST_WIDE_INT type_size = tree_to_shwi (TYPE_SIZE (type));
+ bool zero_ok
+ = CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type), ctz_val) == 2;
+
+ /* If the input value can't be zero, don't special case ctz (0). */
+ if (tree_expr_nonzero_p (res_ops[0]))
+ {
+ zero_ok = true;
+ zero_val = 0;
+ ctz_val = 0;
+ }
+
+ /* Skip if there is no value defined at zero, or if we can't easily
+ return the correct value for zero. */
+ if (!zero_ok)
+ return false;
+ if (zero_val != ctz_val && !(zero_val == 0 && ctz_val == type_size))
+ return false;
+
+ gimple_seq seq = NULL;
+ gimple *g;
+ gcall *call = gimple_build_call_internal (IFN_CTZ, 1, res_ops[0]);
+ gimple_set_location (call, gimple_location (stmt));
+ gimple_set_lhs (call, make_ssa_name (integer_type_node));
+ gimple_seq_add_stmt (&seq, call);
+
+ tree prev_lhs = gimple_call_lhs (call);
+
+ /* Emit ctz (x) & 31 if ctz (0) is 32 but we need to return 0. */
+ if (zero_val == 0 && ctz_val == type_size)
+ {
+ g = gimple_build_assign (make_ssa_name (integer_type_node),
+ BIT_AND_EXPR, prev_lhs,
+ build_int_cst (integer_type_node,
+ type_size - 1));
+ gimple_set_location (g, gimple_location (stmt));
+ gimple_seq_add_stmt (&seq, g);
+ prev_lhs = gimple_assign_lhs (g);
+ }
+
+ g = gimple_build_assign (gimple_assign_lhs (stmt), NOP_EXPR, prev_lhs);
+ gimple_seq_add_stmt (&seq, g);
+ gsi_replace_with_seq (gsi, seq, true);
+ return true;
+ }
+
+ return false;
+}
+
+
/* Combine an element access with a shuffle. Returns true if there were
any changes made, else it returns false. */
static bool
simplify_bitfield_ref (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
- gimple def_stmt;
- tree op, op0, op1, op2;
+ gimple *stmt = gsi_stmt (*gsi);
+ gimple *def_stmt;
+ tree op, op0, op1;
tree elem_type;
- unsigned idx, n, size;
+ unsigned idx, size;
enum tree_code code;
op = gimple_assign_rhs1 (stmt);
return false;
op1 = TREE_OPERAND (op, 1);
- op2 = TREE_OPERAND (op, 2);
code = gimple_assign_rhs_code (def_stmt);
-
- if (code == CONSTRUCTOR)
- {
- tree tem = fold_ternary (BIT_FIELD_REF, TREE_TYPE (op),
- gimple_assign_rhs1 (def_stmt), op1, op2);
- if (!tem || !valid_gimple_rhs_p (tem))
- return false;
- gimple_assign_set_rhs_from_tree (gsi, tem);
- update_stmt (gsi_stmt (*gsi));
- return true;
- }
-
elem_type = TREE_TYPE (TREE_TYPE (op0));
if (TREE_TYPE (op) != elem_type)
return false;
size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
- n = TREE_INT_CST_LOW (op1) / size;
- if (n != 1)
+ if (maybe_ne (bit_field_size (op), size))
return false;
- idx = TREE_INT_CST_LOW (op2) / size;
- if (code == VEC_PERM_EXPR)
+ if (code == VEC_PERM_EXPR
+ && constant_multiple_p (bit_field_offset (op), size, &idx))
{
- tree p, m, index, tem;
- unsigned nelts;
+ tree p, m, tem;
+ unsigned HOST_WIDE_INT nelts;
m = gimple_assign_rhs3 (def_stmt);
- if (TREE_CODE (m) != VECTOR_CST)
+ if (TREE_CODE (m) != VECTOR_CST
+ || !VECTOR_CST_NELTS (m).is_constant (&nelts))
return false;
- nelts = VECTOR_CST_NELTS (m);
idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
idx %= 2 * nelts;
if (idx < nelts)
p = gimple_assign_rhs2 (def_stmt);
idx -= nelts;
}
- index = build_int_cst (TREE_TYPE (TREE_TYPE (m)), idx * size);
tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
- unshare_expr (p), op1, index);
+ unshare_expr (p), op1, bitsize_int (idx * size));
gimple_assign_set_rhs1 (stmt, tem);
fold_stmt (gsi);
update_stmt (gsi_stmt (*gsi));
is_combined_permutation_identity (tree mask1, tree mask2)
{
tree mask;
- unsigned int nelts, i, j;
+ unsigned HOST_WIDE_INT nelts, i, j;
bool maybe_identity1 = true;
bool maybe_identity2 = true;
gcc_checking_assert (TREE_CODE (mask1) == VECTOR_CST
&& TREE_CODE (mask2) == VECTOR_CST);
mask = fold_ternary (VEC_PERM_EXPR, TREE_TYPE (mask1), mask1, mask1, mask2);
- gcc_assert (TREE_CODE (mask) == VECTOR_CST);
+ if (mask == NULL_TREE || TREE_CODE (mask) != VECTOR_CST)
+ return 0;
- nelts = VECTOR_CST_NELTS (mask);
+ if (!VECTOR_CST_NELTS (mask).is_constant (&nelts))
+ return 0;
for (i = 0; i < nelts; i++)
{
tree val = VECTOR_CST_ELT (mask, i);
static int
simplify_permutation (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
- gimple def_stmt;
+ gimple *stmt = gsi_stmt (*gsi);
+ gimple *def_stmt;
tree op0, op1, op2, op3, arg0, arg1;
enum tree_code code;
bool single_use_op0 = false;
{
enum tree_code code2;
- gimple def_stmt2 = get_prop_source_stmt (op1, true, NULL);
+ gimple *def_stmt2 = get_prop_source_stmt (op1, true, NULL);
if (!def_stmt2 || !can_propagate_from (def_stmt2))
return 0;
return 0;
}
+/* Get the BIT_FIELD_REF definition of VAL, if any, looking through
+ conversions with code CONV_CODE or update it if still ERROR_MARK.
+ Return NULL_TREE if no such matching def was found. */
+
+static tree
+get_bit_field_ref_def (tree val, enum tree_code &conv_code)
+{
+ if (TREE_CODE (val) != SSA_NAME)
+ return NULL_TREE ;
+ gimple *def_stmt = get_prop_source_stmt (val, false, NULL);
+ if (!def_stmt)
+ return NULL_TREE;
+ enum tree_code code = gimple_assign_rhs_code (def_stmt);
+ if (code == FLOAT_EXPR
+ || code == FIX_TRUNC_EXPR
+ || CONVERT_EXPR_CODE_P (code))
+ {
+ tree op1 = gimple_assign_rhs1 (def_stmt);
+ if (conv_code == ERROR_MARK)
+ conv_code = code;
+ else if (conv_code != code)
+ return NULL_TREE;
+ if (TREE_CODE (op1) != SSA_NAME)
+ return NULL_TREE;
+ def_stmt = SSA_NAME_DEF_STMT (op1);
+ if (! is_gimple_assign (def_stmt))
+ return NULL_TREE;
+ code = gimple_assign_rhs_code (def_stmt);
+ }
+ if (code != BIT_FIELD_REF)
+ return NULL_TREE;
+ return gimple_assign_rhs1 (def_stmt);
+}
+
/* Recognize a VEC_PERM_EXPR. Returns true if there were any changes. */
static bool
simplify_vector_constructor (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
- gimple def_stmt;
- tree op, op2, orig, type, elem_type;
- unsigned elem_size, nelts, i;
- enum tree_code code;
+ gimple *stmt = gsi_stmt (*gsi);
+ tree op, orig[2], type, elem_type;
+ unsigned elem_size, i;
+ unsigned HOST_WIDE_INT nelts;
+ unsigned HOST_WIDE_INT refnelts;
+ enum tree_code conv_code;
constructor_elt *elt;
- unsigned char *sel;
- bool maybe_ident;
-
- gcc_checking_assert (gimple_assign_rhs_code (stmt) == CONSTRUCTOR);
op = gimple_assign_rhs1 (stmt);
type = TREE_TYPE (op);
- gcc_checking_assert (TREE_CODE (type) == VECTOR_TYPE);
+ gcc_checking_assert (TREE_CODE (op) == CONSTRUCTOR
+ && TREE_CODE (type) == VECTOR_TYPE);
- nelts = TYPE_VECTOR_SUBPARTS (type);
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
+ return false;
elem_type = TREE_TYPE (type);
elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
- sel = XALLOCAVEC (unsigned char, nelts);
- orig = NULL;
- maybe_ident = true;
+ orig[0] = NULL;
+ orig[1] = NULL;
+ conv_code = ERROR_MARK;
+ bool maybe_ident = true;
+ bool maybe_blend[2] = { true, true };
+ tree one_constant = NULL_TREE;
+ tree one_nonconstant = NULL_TREE;
+ auto_vec<tree> constants;
+ constants.safe_grow_cleared (nelts);
+ auto_vec<std::pair<unsigned, unsigned>, 64> elts;
FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (op), i, elt)
{
tree ref, op1;
+ unsigned int elem;
if (i >= nelts)
return false;
- if (TREE_CODE (elt->value) != SSA_NAME)
- return false;
- def_stmt = get_prop_source_stmt (elt->value, false, NULL);
- if (!def_stmt)
- return false;
- code = gimple_assign_rhs_code (def_stmt);
- if (code != BIT_FIELD_REF)
+ /* Look for elements extracted and possibly converted from
+ another vector. */
+ op1 = get_bit_field_ref_def (elt->value, conv_code);
+ if (op1
+ && TREE_CODE ((ref = TREE_OPERAND (op1, 0))) == SSA_NAME
+ && VECTOR_TYPE_P (TREE_TYPE (ref))
+ && useless_type_conversion_p (TREE_TYPE (op1),
+ TREE_TYPE (TREE_TYPE (ref)))
+ && constant_multiple_p (bit_field_offset (op1),
+ bit_field_size (op1), &elem)
+ && TYPE_VECTOR_SUBPARTS (TREE_TYPE (ref)).is_constant (&refnelts))
+ {
+ unsigned int j;
+ for (j = 0; j < 2; ++j)
+ {
+ if (!orig[j])
+ {
+ if (j == 0
+ || useless_type_conversion_p (TREE_TYPE (orig[0]),
+ TREE_TYPE (ref)))
+ break;
+ }
+ else if (ref == orig[j])
+ break;
+ }
+ /* Found a suitable vector element. */
+ if (j < 2)
+ {
+ orig[j] = ref;
+ if (elem != i || j != 0)
+ maybe_ident = false;
+ if (elem != i)
+ maybe_blend[j] = false;
+ elts.safe_push (std::make_pair (j, elem));
+ continue;
+ }
+ /* Else fallthru. */
+ }
+ /* Handle elements not extracted from a vector.
+ 1. constants by permuting with constant vector
+ 2. a unique non-constant element by permuting with a splat vector */
+ if (orig[1]
+ && orig[1] != error_mark_node)
return false;
- op1 = gimple_assign_rhs1 (def_stmt);
- ref = TREE_OPERAND (op1, 0);
- if (orig)
+ orig[1] = error_mark_node;
+ if (CONSTANT_CLASS_P (elt->value))
{
- if (ref != orig)
+ if (one_nonconstant)
return false;
+ if (!one_constant)
+ one_constant = elt->value;
+ constants[i] = elt->value;
}
else
{
- if (TREE_CODE (ref) != SSA_NAME)
+ if (one_constant)
return false;
- if (!useless_type_conversion_p (type, TREE_TYPE (ref)))
+ if (!one_nonconstant)
+ one_nonconstant = elt->value;
+ else if (!operand_equal_p (one_nonconstant, elt->value, 0))
return false;
- orig = ref;
}
- if (TREE_INT_CST_LOW (TREE_OPERAND (op1, 1)) != elem_size)
- return false;
- sel[i] = TREE_INT_CST_LOW (TREE_OPERAND (op1, 2)) / elem_size;
- if (sel[i] != i) maybe_ident = false;
+ elts.safe_push (std::make_pair (1, i));
+ maybe_ident = false;
}
if (i < nelts)
return false;
+ if (! orig[0]
+ || ! VECTOR_TYPE_P (TREE_TYPE (orig[0])))
+ return false;
+ refnelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (orig[0])).to_constant ();
+ /* We currently do not handle larger destination vectors. */
+ if (refnelts < nelts)
+ return false;
+
if (maybe_ident)
- gimple_assign_set_rhs_from_tree (gsi, orig);
+ {
+ tree conv_src_type
+ = (nelts != refnelts
+ ? (conv_code != ERROR_MARK
+ ? build_vector_type (TREE_TYPE (TREE_TYPE (orig[0])), nelts)
+ : type)
+ : TREE_TYPE (orig[0]));
+ if (conv_code != ERROR_MARK
+ && !supportable_convert_operation (conv_code, type, conv_src_type,
+ &conv_code))
+ {
+ /* Only few targets implement direct conversion patterns so try
+ some simple special cases via VEC_[UN]PACK[_FLOAT]_LO_EXPR. */
+ optab optab;
+ tree halfvectype, dblvectype;
+ if (CONVERT_EXPR_CODE_P (conv_code)
+ && (2 * TYPE_PRECISION (TREE_TYPE (TREE_TYPE (orig[0])))
+ == TYPE_PRECISION (TREE_TYPE (type)))
+ && mode_for_vector (as_a <scalar_mode>
+ (TYPE_MODE (TREE_TYPE (TREE_TYPE (orig[0])))),
+ nelts * 2).exists ()
+ && (dblvectype
+ = build_vector_type (TREE_TYPE (TREE_TYPE (orig[0])),
+ nelts * 2))
+ && (optab = optab_for_tree_code (FLOAT_TYPE_P (TREE_TYPE (type))
+ ? VEC_UNPACK_FLOAT_LO_EXPR
+ : VEC_UNPACK_LO_EXPR,
+ dblvectype,
+ optab_default))
+ && (optab_handler (optab, TYPE_MODE (dblvectype))
+ != CODE_FOR_nothing))
+ {
+ gimple_seq stmts = NULL;
+ tree dbl;
+ if (refnelts == nelts)
+ {
+ /* ??? Paradoxical subregs don't exist, so insert into
+ the lower half of a wider zero vector. */
+ dbl = gimple_build (&stmts, BIT_INSERT_EXPR, dblvectype,
+ build_zero_cst (dblvectype), orig[0],
+ bitsize_zero_node);
+ }
+ else if (refnelts == 2 * nelts)
+ dbl = orig[0];
+ else
+ dbl = gimple_build (&stmts, BIT_FIELD_REF, dblvectype,
+ orig[0], TYPE_SIZE (dblvectype),
+ bitsize_zero_node);
+ gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+ gimple_assign_set_rhs_with_ops (gsi,
+ FLOAT_TYPE_P (TREE_TYPE (type))
+ ? VEC_UNPACK_FLOAT_LO_EXPR
+ : VEC_UNPACK_LO_EXPR,
+ dbl);
+ }
+ else if (CONVERT_EXPR_CODE_P (conv_code)
+ && (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (orig[0])))
+ == 2 * TYPE_PRECISION (TREE_TYPE (type)))
+ && mode_for_vector (as_a <scalar_mode>
+ (TYPE_MODE
+ (TREE_TYPE (TREE_TYPE (orig[0])))),
+ nelts / 2).exists ()
+ && (halfvectype
+ = build_vector_type (TREE_TYPE (TREE_TYPE (orig[0])),
+ nelts / 2))
+ && (optab = optab_for_tree_code (VEC_PACK_TRUNC_EXPR,
+ halfvectype,
+ optab_default))
+ && (optab_handler (optab, TYPE_MODE (halfvectype))
+ != CODE_FOR_nothing))
+ {
+ gimple_seq stmts = NULL;
+ tree low = gimple_build (&stmts, BIT_FIELD_REF, halfvectype,
+ orig[0], TYPE_SIZE (halfvectype),
+ bitsize_zero_node);
+ tree hig = gimple_build (&stmts, BIT_FIELD_REF, halfvectype,
+ orig[0], TYPE_SIZE (halfvectype),
+ TYPE_SIZE (halfvectype));
+ gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+ gimple_assign_set_rhs_with_ops (gsi, VEC_PACK_TRUNC_EXPR,
+ low, hig);
+ }
+ else
+ return false;
+ update_stmt (gsi_stmt (*gsi));
+ return true;
+ }
+ if (nelts != refnelts)
+ {
+ gassign *lowpart
+ = gimple_build_assign (make_ssa_name (conv_src_type),
+ build3 (BIT_FIELD_REF, conv_src_type,
+ orig[0], TYPE_SIZE (conv_src_type),
+ bitsize_zero_node));
+ gsi_insert_before (gsi, lowpart, GSI_SAME_STMT);
+ orig[0] = gimple_assign_lhs (lowpart);
+ }
+ if (conv_code == ERROR_MARK)
+ {
+ tree src_type = TREE_TYPE (orig[0]);
+ if (!useless_type_conversion_p (type, src_type))
+ {
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (src_type))
+ && useless_type_conversion_p (TREE_TYPE (type),
+ TREE_TYPE (src_type)));
+ tree rhs = build1 (VIEW_CONVERT_EXPR, type, orig[0]);
+ orig[0] = make_ssa_name (type);
+ gassign *assign = gimple_build_assign (orig[0], rhs);
+ gsi_insert_before (gsi, assign, GSI_SAME_STMT);
+ }
+ gimple_assign_set_rhs_from_tree (gsi, orig[0]);
+ }
+ else
+ gimple_assign_set_rhs_with_ops (gsi, conv_code, orig[0],
+ NULL_TREE, NULL_TREE);
+ }
else
{
- tree mask_type, *mask_elts;
+ /* If we combine a vector with a non-vector avoid cases where
+ we'll obviously end up with more GIMPLE stmts which is when
+ we'll later not fold this to a single insert into the vector
+ and we had a single extract originally. See PR92819. */
+ if (nelts == 2
+ && refnelts > 2
+ && orig[1] == error_mark_node
+ && !maybe_blend[0])
+ return false;
+ tree mask_type, perm_type, conv_src_type;
+ perm_type = TREE_TYPE (orig[0]);
+ conv_src_type = (nelts == refnelts
+ ? perm_type
+ : build_vector_type (TREE_TYPE (perm_type), nelts));
+ if (conv_code != ERROR_MARK
+ && !supportable_convert_operation (conv_code, type, conv_src_type,
+ &conv_code))
+ return false;
- if (!can_vec_perm_p (TYPE_MODE (type), false, sel))
+ /* Now that we know the number of elements of the source build the
+ permute vector.
+ ??? When the second vector has constant values we can shuffle
+ it and its source indexes to make the permutation supported.
+ For now it mimics a blend. */
+ vec_perm_builder sel (refnelts, refnelts, 1);
+ bool all_same_p = true;
+ for (i = 0; i < elts.length (); ++i)
+ {
+ sel.quick_push (elts[i].second + elts[i].first * refnelts);
+ all_same_p &= known_eq (sel[i], sel[0]);
+ }
+ /* And fill the tail with "something". It's really don't care,
+ and ideally we'd allow VEC_PERM to have a smaller destination
+ vector. As a heuristic:
+
+ (a) if what we have so far duplicates a single element, make the
+ tail do the same
+
+ (b) otherwise preserve a uniform orig[0]. This facilitates
+ later pattern-matching of VEC_PERM_EXPR to a BIT_INSERT_EXPR. */
+ for (; i < refnelts; ++i)
+ sel.quick_push (all_same_p
+ ? sel[0]
+ : (elts[0].second == 0 && elts[0].first == 0
+ ? 0 : refnelts) + i);
+ vec_perm_indices indices (sel, orig[1] ? 2 : 1, refnelts);
+ if (!can_vec_perm_const_p (TYPE_MODE (perm_type), indices))
return false;
mask_type
= build_vector_type (build_nonstandard_integer_type (elem_size, 1),
- nelts);
+ refnelts);
if (GET_MODE_CLASS (TYPE_MODE (mask_type)) != MODE_VECTOR_INT
- || GET_MODE_SIZE (TYPE_MODE (mask_type))
- != GET_MODE_SIZE (TYPE_MODE (type)))
+ || maybe_ne (GET_MODE_SIZE (TYPE_MODE (mask_type)),
+ GET_MODE_SIZE (TYPE_MODE (perm_type))))
return false;
- mask_elts = XALLOCAVEC (tree, nelts);
- for (i = 0; i < nelts; i++)
- mask_elts[i] = build_int_cst (TREE_TYPE (mask_type), sel[i]);
- op2 = build_vector (mask_type, mask_elts);
- gimple_assign_set_rhs_with_ops (gsi, VEC_PERM_EXPR, orig, orig, op2);
+ tree op2 = vec_perm_indices_to_tree (mask_type, indices);
+ bool converted_orig1 = false;
+ gimple_seq stmts = NULL;
+ if (!orig[1])
+ orig[1] = orig[0];
+ else if (orig[1] == error_mark_node
+ && one_nonconstant)
+ {
+ /* ??? We can see if we can safely convert to the original
+ element type. */
+ converted_orig1 = conv_code != ERROR_MARK;
+ orig[1] = gimple_build_vector_from_val (&stmts, UNKNOWN_LOCATION,
+ converted_orig1
+ ? type : perm_type,
+ one_nonconstant);
+ }
+ else if (orig[1] == error_mark_node)
+ {
+ /* ??? See if we can convert the vector to the original type. */
+ converted_orig1 = conv_code != ERROR_MARK;
+ unsigned n = converted_orig1 ? nelts : refnelts;
+ tree_vector_builder vec (converted_orig1
+ ? type : perm_type, n, 1);
+ for (unsigned i = 0; i < n; ++i)
+ if (i < nelts && constants[i])
+ vec.quick_push (constants[i]);
+ else
+ /* ??? Push a don't-care value. */
+ vec.quick_push (one_constant);
+ orig[1] = vec.build ();
+ }
+ tree blend_op2 = NULL_TREE;
+ if (converted_orig1)
+ {
+ /* Make sure we can do a blend in the target type. */
+ vec_perm_builder sel (nelts, nelts, 1);
+ for (i = 0; i < elts.length (); ++i)
+ sel.quick_push (elts[i].first
+ ? elts[i].second + nelts : i);
+ vec_perm_indices indices (sel, 2, nelts);
+ if (!can_vec_perm_const_p (TYPE_MODE (type), indices))
+ return false;
+ mask_type
+ = build_vector_type (build_nonstandard_integer_type (elem_size, 1),
+ nelts);
+ if (GET_MODE_CLASS (TYPE_MODE (mask_type)) != MODE_VECTOR_INT
+ || maybe_ne (GET_MODE_SIZE (TYPE_MODE (mask_type)),
+ GET_MODE_SIZE (TYPE_MODE (type))))
+ return false;
+ blend_op2 = vec_perm_indices_to_tree (mask_type, indices);
+ }
+ tree orig1_for_perm
+ = converted_orig1 ? build_zero_cst (perm_type) : orig[1];
+ tree res = gimple_build (&stmts, VEC_PERM_EXPR, perm_type,
+ orig[0], orig1_for_perm, op2);
+ if (nelts != refnelts)
+ res = gimple_build (&stmts, BIT_FIELD_REF,
+ conv_code != ERROR_MARK ? conv_src_type : type,
+ res, TYPE_SIZE (type), bitsize_zero_node);
+ if (conv_code != ERROR_MARK)
+ res = gimple_build (&stmts, conv_code, type, res);
+ else if (!useless_type_conversion_p (type, TREE_TYPE (res)))
+ {
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (perm_type))
+ && useless_type_conversion_p (TREE_TYPE (type),
+ TREE_TYPE (perm_type)));
+ res = gimple_build (&stmts, VIEW_CONVERT_EXPR, type, res);
+ }
+ /* Blend in the actual constant. */
+ if (converted_orig1)
+ res = gimple_build (&stmts, VEC_PERM_EXPR, type,
+ res, orig[1], blend_op2);
+ gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+ gimple_assign_set_rhs_with_ops (gsi, SSA_NAME, res);
}
update_stmt (gsi_stmt (*gsi));
return true;
lattice.create (num_ssa_names);
lattice.quick_grow_cleared (num_ssa_names);
int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (fun));
- int postorder_num = inverted_post_order_compute (postorder);
- auto_vec<gimple, 4> to_fixup;
+ int postorder_num = pre_and_rev_post_order_compute_fn (cfun, NULL,
+ postorder, false);
+ auto_vec<gimple *, 4> to_fixup;
+ auto_vec<gimple *, 32> to_remove;
to_purge = BITMAP_ALLOC (NULL);
for (int i = 0; i < postorder_num; ++i)
{
gimple_stmt_iterator gsi;
basic_block bb = BASIC_BLOCK_FOR_FN (fun, postorder[i]);
+ /* Record degenerate PHIs in the lattice. */
+ for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
+ gsi_next (&si))
+ {
+ gphi *phi = si.phi ();
+ tree res = gimple_phi_result (phi);
+ if (virtual_operand_p (res))
+ continue;
+
+ use_operand_p use_p;
+ ssa_op_iter it;
+ tree first = NULL_TREE;
+ bool all_same = true;
+ FOR_EACH_PHI_ARG (use_p, phi, it, SSA_OP_USE)
+ {
+ tree use = USE_FROM_PTR (use_p);
+ if (! first)
+ first = use;
+ else if (! operand_equal_p (first, use, 0))
+ {
+ all_same = false;
+ break;
+ }
+ }
+ if (all_same)
+ {
+ if (may_propagate_copy (res, first))
+ to_remove.safe_push (phi);
+ fwprop_set_lattice_val (res, first);
+ }
+ }
+
/* Apply forward propagation to all stmts in the basic-block.
Note we update GSI within the loop as necessary. */
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
tree lhs, rhs;
enum tree_code code;
/* If this statement sets an SSA_NAME to an address,
try to propagate the address into the uses of the SSA_NAME. */
- if (code == ADDR_EXPR
- /* Handle pointer conversions on invariant addresses
- as well, as this is valid gimple. */
- || (CONVERT_EXPR_CODE_P (code)
- && TREE_CODE (rhs) == ADDR_EXPR
- && POINTER_TYPE_P (TREE_TYPE (lhs))))
+ if ((code == ADDR_EXPR
+ /* Handle pointer conversions on invariant addresses
+ as well, as this is valid gimple. */
+ || (CONVERT_EXPR_CODE_P (code)
+ && TREE_CODE (rhs) == ADDR_EXPR
+ && POINTER_TYPE_P (TREE_TYPE (lhs))))
+ && TREE_CODE (TREE_OPERAND (rhs, 0)) != TARGET_MEM_REF)
{
tree base = get_base_address (TREE_OPERAND (rhs, 0));
if ((!base
&& !gimple_has_volatile_ops (stmt)
&& (TREE_CODE (gimple_assign_rhs1 (stmt))
!= TARGET_MEM_REF)
- && !stmt_can_throw_internal (stmt))
+ && !stmt_can_throw_internal (cfun, stmt))
{
/* Rewrite loads used only in real/imagpart extractions to
component-wise loads. */
bool rewrite = true;
FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
{
- gimple use_stmt = USE_STMT (use_p);
+ gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (!is_gimple_assign (use_stmt)
|| (gimple_assign_rhs_code (use_stmt) != REALPART_EXPR
- && gimple_assign_rhs_code (use_stmt) != IMAGPART_EXPR))
+ && gimple_assign_rhs_code (use_stmt) != IMAGPART_EXPR)
+ || TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) != lhs)
{
rewrite = false;
break;
}
if (rewrite)
{
- gimple use_stmt;
+ gimple *use_stmt;
FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
{
if (is_gimple_debug (use_stmt))
tree new_rhs = build1 (gimple_assign_rhs_code (use_stmt),
TREE_TYPE (TREE_TYPE (rhs)),
unshare_expr (rhs));
- gimple new_stmt
+ gimple *new_stmt
+ = gimple_build_assign (gimple_assign_lhs (use_stmt),
+ new_rhs);
+
+ location_t loc = gimple_location (use_stmt);
+ gimple_set_location (new_stmt, loc);
+ gimple_stmt_iterator gsi2 = gsi_for_stmt (use_stmt);
+ unlink_stmt_vdef (use_stmt);
+ gsi_remove (&gsi2, true);
+
+ gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
+ }
+
+ release_defs (stmt);
+ gsi_remove (&gsi, true);
+ }
+ else
+ gsi_next (&gsi);
+ }
+ else if (TREE_CODE (TREE_TYPE (lhs)) == VECTOR_TYPE
+ && TYPE_MODE (TREE_TYPE (lhs)) == BLKmode
+ && gimple_assign_load_p (stmt)
+ && !gimple_has_volatile_ops (stmt)
+ && (TREE_CODE (gimple_assign_rhs1 (stmt))
+ != TARGET_MEM_REF)
+ && !stmt_can_throw_internal (cfun, stmt))
+ {
+ /* Rewrite loads used only in BIT_FIELD_REF extractions to
+ component-wise loads. */
+ use_operand_p use_p;
+ imm_use_iterator iter;
+ bool rewrite = true;
+ FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
+ {
+ gimple *use_stmt = USE_STMT (use_p);
+ if (is_gimple_debug (use_stmt))
+ continue;
+ if (!is_gimple_assign (use_stmt)
+ || gimple_assign_rhs_code (use_stmt) != BIT_FIELD_REF
+ || TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) != lhs)
+ {
+ rewrite = false;
+ break;
+ }
+ }
+ if (rewrite)
+ {
+ gimple *use_stmt;
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+ {
+ if (is_gimple_debug (use_stmt))
+ {
+ if (gimple_debug_bind_p (use_stmt))
+ {
+ gimple_debug_bind_reset_value (use_stmt);
+ update_stmt (use_stmt);
+ }
+ continue;
+ }
+
+ tree bfr = gimple_assign_rhs1 (use_stmt);
+ tree new_rhs = fold_build3 (BIT_FIELD_REF,
+ TREE_TYPE (bfr),
+ unshare_expr (rhs),
+ TREE_OPERAND (bfr, 1),
+ TREE_OPERAND (bfr, 2));
+ gimple *new_stmt
= gimple_build_assign (gimple_assign_lhs (use_stmt),
new_rhs);
else
gsi_next (&gsi);
}
+
else if (code == COMPLEX_EXPR)
{
/* Rewrite stores of a single-use complex build expression
to component-wise stores. */
use_operand_p use_p;
- gimple use_stmt;
+ gimple *use_stmt;
if (single_imm_use (lhs, &use_p, &use_stmt)
&& gimple_store_p (use_stmt)
&& !gimple_has_volatile_ops (use_stmt)
!= TARGET_MEM_REF))
{
tree use_lhs = gimple_assign_lhs (use_stmt);
+ if (auto_var_p (use_lhs))
+ DECL_NOT_GIMPLE_REG_P (use_lhs) = 1;
tree new_lhs = build1 (REALPART_EXPR,
TREE_TYPE (TREE_TYPE (use_lhs)),
unshare_expr (use_lhs));
- gimple new_stmt = gimple_build_assign (new_lhs, rhs);
+ gimple *new_stmt = gimple_build_assign (new_lhs, rhs);
location_t loc = gimple_location (use_stmt);
gimple_set_location (new_stmt, loc);
gimple_set_vuse (new_stmt, gimple_vuse (use_stmt));
else
gsi_next (&gsi);
}
+ else if (code == CONSTRUCTOR
+ && VECTOR_TYPE_P (TREE_TYPE (rhs))
+ && TYPE_MODE (TREE_TYPE (rhs)) == BLKmode
+ && CONSTRUCTOR_NELTS (rhs) > 0
+ && (!VECTOR_TYPE_P (TREE_TYPE (CONSTRUCTOR_ELT (rhs, 0)->value))
+ || (TYPE_MODE (TREE_TYPE (CONSTRUCTOR_ELT (rhs, 0)->value))
+ != BLKmode)))
+ {
+ /* Rewrite stores of a single-use vector constructors
+ to component-wise stores if the mode isn't supported. */
+ use_operand_p use_p;
+ gimple *use_stmt;
+ if (single_imm_use (lhs, &use_p, &use_stmt)
+ && gimple_store_p (use_stmt)
+ && !gimple_has_volatile_ops (use_stmt)
+ && !stmt_can_throw_internal (cfun, use_stmt)
+ && is_gimple_assign (use_stmt)
+ && (TREE_CODE (gimple_assign_lhs (use_stmt))
+ != TARGET_MEM_REF))
+ {
+ tree elt_t = TREE_TYPE (CONSTRUCTOR_ELT (rhs, 0)->value);
+ unsigned HOST_WIDE_INT elt_w
+ = tree_to_uhwi (TYPE_SIZE (elt_t));
+ unsigned HOST_WIDE_INT n
+ = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (rhs)));
+ tree use_lhs = gimple_assign_lhs (use_stmt);
+ if (auto_var_p (use_lhs))
+ DECL_NOT_GIMPLE_REG_P (use_lhs) = 1;
+ for (unsigned HOST_WIDE_INT bi = 0; bi < n; bi += elt_w)
+ {
+ unsigned HOST_WIDE_INT ci = bi / elt_w;
+ tree new_rhs;
+ if (ci < CONSTRUCTOR_NELTS (rhs))
+ new_rhs = CONSTRUCTOR_ELT (rhs, ci)->value;
+ else
+ new_rhs = build_zero_cst (elt_t);
+ tree new_lhs = build3 (BIT_FIELD_REF,
+ elt_t,
+ unshare_expr (use_lhs),
+ bitsize_int (elt_w),
+ bitsize_int (bi));
+ gimple *new_stmt = gimple_build_assign (new_lhs, new_rhs);
+ location_t loc = gimple_location (use_stmt);
+ gimple_set_location (new_stmt, loc);
+ gimple_set_vuse (new_stmt, gimple_vuse (use_stmt));
+ gimple_set_vdef (new_stmt,
+ make_ssa_name (gimple_vop (cfun)));
+ SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
+ gimple_set_vuse (use_stmt, gimple_vdef (new_stmt));
+ gimple_stmt_iterator gsi2 = gsi_for_stmt (use_stmt);
+ gsi_insert_before (&gsi2, new_stmt, GSI_SAME_STMT);
+ }
+ gimple_stmt_iterator gsi2 = gsi_for_stmt (use_stmt);
+ unlink_stmt_vdef (use_stmt);
+ release_defs (use_stmt);
+ gsi_remove (&gsi2, true);
+ release_defs (stmt);
+ gsi_remove (&gsi, true);
+ }
+ else
+ gsi_next (&gsi);
+ }
else
gsi_next (&gsi);
}
/* Combine stmts with the stmts defining their operands.
Note we update GSI within the loop as necessary. */
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
- gimple orig_stmt = stmt;
- bool changed = false;
- bool was_noreturn = (is_gimple_call (stmt)
- && gimple_call_noreturn_p (stmt));
+ gimple *stmt = gsi_stmt (gsi);
/* Mark stmt as potentially needing revisiting. */
gimple_set_plf (stmt, GF_PLF_1, false);
- if (fold_stmt (&gsi, fwprop_ssa_val))
+ /* Substitute from our lattice. We need to do so only once. */
+ bool substituted_p = false;
+ use_operand_p usep;
+ ssa_op_iter iter;
+ FOR_EACH_SSA_USE_OPERAND (usep, stmt, iter, SSA_OP_USE)
{
- changed = true;
- stmt = gsi_stmt (gsi);
- if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
- bitmap_set_bit (to_purge, bb->index);
- if (!was_noreturn
- && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
- to_fixup.safe_push (stmt);
- /* Cleanup the CFG if we simplified a condition to
- true or false. */
- if (gcond *cond = dyn_cast <gcond *> (stmt))
- if (gimple_cond_true_p (cond)
- || gimple_cond_false_p (cond))
- cfg_changed = true;
- update_stmt (stmt);
+ tree use = USE_FROM_PTR (usep);
+ tree val = fwprop_ssa_val (use);
+ if (val && val != use && may_propagate_copy (use, val))
+ {
+ propagate_value (usep, val);
+ substituted_p = true;
+ }
}
+ if (substituted_p
+ && is_gimple_assign (stmt)
+ && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
+ recompute_tree_invariant_for_addr_expr (gimple_assign_rhs1 (stmt));
- switch (gimple_code (stmt))
+ bool changed;
+ do
{
- case GIMPLE_ASSIGN:
- {
- tree rhs1 = gimple_assign_rhs1 (stmt);
- enum tree_code code = gimple_assign_rhs_code (stmt);
+ gimple *orig_stmt = stmt = gsi_stmt (gsi);
+ bool was_noreturn = (is_gimple_call (stmt)
+ && gimple_call_noreturn_p (stmt));
+ changed = false;
+
+ if (fold_stmt (&gsi, fwprop_ssa_val))
+ {
+ changed = true;
+ stmt = gsi_stmt (gsi);
+ /* Cleanup the CFG if we simplified a condition to
+ true or false. */
+ if (gcond *cond = dyn_cast <gcond *> (stmt))
+ if (gimple_cond_true_p (cond)
+ || gimple_cond_false_p (cond))
+ cfg_changed = true;
+ }
+
+ if (changed || substituted_p)
+ {
+ if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
+ bitmap_set_bit (to_purge, bb->index);
+ if (!was_noreturn
+ && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
+ to_fixup.safe_push (stmt);
+ update_stmt (stmt);
+ substituted_p = false;
+ }
- if (code == COND_EXPR
- || code == VEC_COND_EXPR)
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_ASSIGN:
{
- /* In this case the entire COND_EXPR is in rhs1. */
- if (forward_propagate_into_cond (&gsi))
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+
+ if (code == COND_EXPR
+ || code == VEC_COND_EXPR)
{
- changed = true;
- stmt = gsi_stmt (gsi);
+ /* In this case the entire COND_EXPR is in rhs1. */
+ if (forward_propagate_into_cond (&gsi))
+ {
+ changed = true;
+ stmt = gsi_stmt (gsi);
+ }
}
+ else if (TREE_CODE_CLASS (code) == tcc_comparison)
+ {
+ int did_something;
+ did_something = forward_propagate_into_comparison (&gsi);
+ if (maybe_clean_or_replace_eh_stmt (stmt, gsi_stmt (gsi)))
+ bitmap_set_bit (to_purge, bb->index);
+ if (did_something == 2)
+ cfg_changed = true;
+ changed = did_something != 0;
+ }
+ else if ((code == PLUS_EXPR
+ || code == BIT_IOR_EXPR
+ || code == BIT_XOR_EXPR)
+ && simplify_rotate (&gsi))
+ changed = true;
+ else if (code == VEC_PERM_EXPR)
+ {
+ int did_something = simplify_permutation (&gsi);
+ if (did_something == 2)
+ cfg_changed = true;
+ changed = did_something != 0;
+ }
+ else if (code == BIT_FIELD_REF)
+ changed = simplify_bitfield_ref (&gsi);
+ else if (code == CONSTRUCTOR
+ && TREE_CODE (TREE_TYPE (rhs1)) == VECTOR_TYPE)
+ changed = simplify_vector_constructor (&gsi);
+ else if (code == ARRAY_REF)
+ changed = simplify_count_trailing_zeroes (&gsi);
+ break;
}
- else if (TREE_CODE_CLASS (code) == tcc_comparison)
+
+ case GIMPLE_SWITCH:
+ changed = simplify_gimple_switch (as_a <gswitch *> (stmt));
+ break;
+
+ case GIMPLE_COND:
{
- int did_something;
- did_something = forward_propagate_into_comparison (&gsi);
+ int did_something = forward_propagate_into_gimple_cond
+ (as_a <gcond *> (stmt));
if (did_something == 2)
cfg_changed = true;
changed = did_something != 0;
+ break;
}
- else if ((code == PLUS_EXPR
- || code == BIT_IOR_EXPR
- || code == BIT_XOR_EXPR)
- && simplify_rotate (&gsi))
- changed = true;
- else if (code == VEC_PERM_EXPR)
+
+ case GIMPLE_CALL:
{
- int did_something = simplify_permutation (&gsi);
- if (did_something == 2)
- cfg_changed = true;
- changed = did_something != 0;
+ tree callee = gimple_call_fndecl (stmt);
+ if (callee != NULL_TREE
+ && fndecl_built_in_p (callee, BUILT_IN_NORMAL))
+ changed = simplify_builtin_call (&gsi, callee);
+ break;
}
- else if (code == BIT_FIELD_REF)
- changed = simplify_bitfield_ref (&gsi);
- else if (code == CONSTRUCTOR
- && TREE_CODE (TREE_TYPE (rhs1)) == VECTOR_TYPE)
- changed = simplify_vector_constructor (&gsi);
- break;
- }
-
- case GIMPLE_SWITCH:
- changed = simplify_gimple_switch (as_a <gswitch *> (stmt));
- break;
-
- case GIMPLE_COND:
- {
- int did_something
- = forward_propagate_into_gimple_cond (as_a <gcond *> (stmt));
- if (did_something == 2)
- cfg_changed = true;
- changed = did_something != 0;
- break;
- }
- case GIMPLE_CALL:
- {
- tree callee = gimple_call_fndecl (stmt);
- if (callee != NULL_TREE
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL)
- changed = simplify_builtin_call (&gsi, callee);
- break;
- }
+ default:;
+ }
- default:;
+ if (changed)
+ {
+ /* If the stmt changed then re-visit it and the statements
+ inserted before it. */
+ for (; !gsi_end_p (gsi); gsi_prev (&gsi))
+ if (gimple_plf (gsi_stmt (gsi), GF_PLF_1))
+ break;
+ if (gsi_end_p (gsi))
+ gsi = gsi_start_bb (bb);
+ else
+ gsi_next (&gsi);
+ }
}
+ while (changed);
- if (changed)
- {
- /* If the stmt changed then re-visit it and the statements
- inserted before it. */
- for (; !gsi_end_p (gsi); gsi_prev (&gsi))
- if (gimple_plf (gsi_stmt (gsi), GF_PLF_1))
- break;
- if (gsi_end_p (gsi))
- gsi = gsi_start_bb (bb);
- else
- gsi_next (&gsi);
- }
- else
- {
- /* Stmt no longer needs to be revisited. */
- gimple_set_plf (stmt, GF_PLF_1, true);
+ /* Stmt no longer needs to be revisited. */
+ stmt = gsi_stmt (gsi);
+ gcc_checking_assert (!gimple_plf (stmt, GF_PLF_1));
+ gimple_set_plf (stmt, GF_PLF_1, true);
- /* Fill up the lattice. */
- if (gimple_assign_single_p (stmt))
+ /* Fill up the lattice. */
+ if (gimple_assign_single_p (stmt))
+ {
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (lhs) == SSA_NAME)
{
- tree lhs = gimple_assign_lhs (stmt);
- tree rhs = gimple_assign_rhs1 (stmt);
- if (TREE_CODE (lhs) == SSA_NAME)
- {
- tree val = lhs;
- if (TREE_CODE (rhs) == SSA_NAME)
- val = fwprop_ssa_val (rhs);
- else if (is_gimple_min_invariant (rhs))
- val = rhs;
- fwprop_set_lattice_val (lhs, val);
- }
+ tree val = lhs;
+ if (TREE_CODE (rhs) == SSA_NAME)
+ val = fwprop_ssa_val (rhs);
+ else if (is_gimple_min_invariant (rhs))
+ val = rhs;
+ /* If we can propagate the lattice-value mark the
+ stmt for removal. */
+ if (val != lhs
+ && may_propagate_copy (lhs, val))
+ to_remove.safe_push (stmt);
+ fwprop_set_lattice_val (lhs, val);
}
-
- gsi_next (&gsi);
}
+ else if (gimple_nop_p (stmt))
+ to_remove.safe_push (stmt);
}
+
+ /* Substitute in destination PHI arguments. */
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ for (gphi_iterator gsi = gsi_start_phis (e->dest);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gphi *phi = gsi.phi ();
+ use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
+ tree arg = USE_FROM_PTR (use_p);
+ if (TREE_CODE (arg) != SSA_NAME
+ || virtual_operand_p (arg))
+ continue;
+ tree val = fwprop_ssa_val (arg);
+ if (val != arg
+ && may_propagate_copy (arg, val))
+ propagate_value (use_p, val);
+ }
}
free (postorder);
lattice.release ();
+ /* Remove stmts in reverse order to make debug stmt creation possible. */
+ while (!to_remove.is_empty())
+ {
+ gimple *stmt = to_remove.pop ();
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Removing dead stmt ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "\n");
+ }
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ if (gimple_code (stmt) == GIMPLE_PHI)
+ remove_phi_node (&gsi, true);
+ else
+ {
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&gsi, true);
+ release_defs (stmt);
+ }
+ }
+
/* Fixup stmts that became noreturn calls. This may require splitting
blocks and thus isn't possible during the walk. Do this
in reverse order so we don't inadvertedly remove a stmt we want to
fixup by visiting a dominating now noreturn call first. */
while (!to_fixup.is_empty ())
{
- gimple stmt = to_fixup.pop ();
+ gimple *stmt = to_fixup.pop ();
if (dump_file && dump_flags & TDF_DETAILS)
{
fprintf (dump_file, "Fixing up noreturn call ");
- print_gimple_stmt (dump_file, stmt, 0, 0);
+ print_gimple_stmt (dump_file, stmt, 0);
fprintf (dump_file, "\n");
}
cfg_changed |= fixup_noreturn_call (stmt);