/* If-conversion for vectorizer.
- Copyright (C) 2004-2015 Free Software Foundation, Inc.
+ Copyright (C) 2004-2016 Free Software Foundation, Inc.
Contributed by Devang Patel <dpatel@apple.com>
This file is part of GCC.
#include "system.h"
#include "coretypes.h"
#include "backend.h"
-#include "cfghooks.h"
+#include "rtl.h"
#include "tree.h"
#include "gimple.h"
-#include "rtl.h"
+#include "cfghooks.h"
+#include "tree-pass.h"
#include "ssa.h"
+#include "expmed.h"
+#include "optabs-query.h"
+#include "gimple-pretty-print.h"
#include "alias.h"
#include "fold-const.h"
#include "stor-layout.h"
-#include "flags.h"
-#include "gimple-pretty-print.h"
-#include "internal-fn.h"
#include "gimple-fold.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "tree-into-ssa.h"
#include "tree-ssa.h"
#include "cfgloop.h"
-#include "tree-chrec.h"
#include "tree-data-ref.h"
#include "tree-scalar-evolution.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-address.h"
-#include "tree-pass.h"
#include "dbgcnt.h"
-#include "insn-config.h"
-#include "expmed.h"
-#include "dojump.h"
-#include "explow.h"
-#include "calls.h"
-#include "emit-rtl.h"
-#include "varasm.h"
-#include "stmt.h"
-#include "expr.h"
-#include "insn-codes.h"
-#include "optabs.h"
#include "tree-hash-traits.h"
+#include "varasm.h"
+#include "builtins.h"
+#include "params.h"
/* List of basic blocks in if-conversion-suitable order. */
static basic_block *ifc_bbs;
/* Apply more aggressive (extended) if-conversion if true. */
static bool aggressive_if_conv;
+/* Hash table to store references, DR pairs. */
+static hash_map<tree_operand_hash, data_reference_p> *ref_DR_map;
+
+/* Hash table to store base reference, DR pairs. */
+static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
+
/* Structure used to predicate basic blocks. This is attached to the
->aux field of the BBs in the loop to be if-converted. */
-typedef struct bb_predicate_s {
+struct bb_predicate {
/* The condition under which this basic block is executed. */
tree predicate;
recorded here, in order to avoid the duplication of computations
that occur in previous conditions. See PR44483. */
gimple_seq predicate_gimplified_stmts;
-} *bb_predicate_p;
+};
/* Returns true when the basic block BB has a predicate. */
static inline tree
bb_predicate (basic_block bb)
{
- return ((bb_predicate_p) bb->aux)->predicate;
+ return ((struct bb_predicate *) bb->aux)->predicate;
}
/* Sets the gimplified predicate COND for basic block BB. */
gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
&& is_gimple_condexpr (TREE_OPERAND (cond, 0)))
|| is_gimple_condexpr (cond));
- ((bb_predicate_p) bb->aux)->predicate = cond;
+ ((struct bb_predicate *) bb->aux)->predicate = cond;
}
/* Returns the sequence of statements of the gimplification of the
static inline gimple_seq
bb_predicate_gimplified_stmts (basic_block bb)
{
- return ((bb_predicate_p) bb->aux)->predicate_gimplified_stmts;
+ return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
}
/* Sets the sequence of statements STMTS of the gimplification of the
static inline void
set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
{
- ((bb_predicate_p) bb->aux)->predicate_gimplified_stmts = stmts;
+ ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
}
/* Adds the sequence of statements STMTS to the sequence of statements
add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
{
gimple_seq_add_seq
- (&(((bb_predicate_p) bb->aux)->predicate_gimplified_stmts), stmts);
+ (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
}
/* Initializes to TRUE the predicate of basic block BB. */
static inline void
init_bb_predicate (basic_block bb)
{
- bb->aux = XNEW (struct bb_predicate_s);
+ bb->aux = XNEW (struct bb_predicate);
set_bb_predicate_gimplified_stmts (bb, NULL);
set_bb_predicate (bb, boolean_true_node);
}
ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
{
tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
- gimple stmt = gimple_build_assign (new_name, expr);
+ gimple *stmt = gimple_build_assign (new_name, expr);
gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
return new_name;
}
static enum tree_code
parse_predicate (tree cond, tree *op0, tree *op1)
{
- gimple s;
+ gimple *s;
if (TREE_CODE (cond) == SSA_NAME
&& is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
PHI is not if-convertible if:
- it has more than 2 arguments.
- When the flag_tree_loop_if_convert_stores is not set, PHI is not
+ When we didn't see if-convertible stores, PHI is not
if-convertible if:
- a virtual PHI is immediately used in another PHI node,
- there is a virtual PHI in a BB other than the loop->header.
}
}
- if (flag_tree_loop_if_convert_stores || any_mask_load_store)
+ if (any_mask_load_store)
return true;
- /* When the flag_tree_loop_if_convert_stores is not set, check
+ /* When there were no if-convertible stores, check
that there are no memory writes in the branches of the loop to be
if-converted. */
if (virtual_operand_p (gimple_phi_result (phi)))
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (phi))
{
if (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI
- && USE_STMT (use_p) != (gimple) phi)
+ && USE_STMT (use_p) != phi)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Difficult to handle this virtual phi.\n");
each DR->aux field. */
struct ifc_dr {
- /* -1 when not initialized, 0 when false, 1 when true. */
- int written_at_least_once;
+ bool rw_unconditionally;
+ bool w_unconditionally;
+ bool written_at_least_once;
- /* -1 when not initialized, 0 when false, 1 when true. */
- int rw_unconditionally;
+ tree rw_predicate;
+ tree w_predicate;
+ tree base_w_predicate;
};
#define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
-#define DR_WRITTEN_AT_LEAST_ONCE(DR) (IFC_DR (DR)->written_at_least_once)
+#define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
#define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
-
-/* Returns true when the memory references of STMT are read or written
- unconditionally. In other words, this function returns true when
- for every data reference A in STMT there exist other accesses to
- a data reference with the same base with predicates that add up (OR-up) to
- the true predicate: this ensures that the data reference A is touched
- (read or written) on every iteration of the if-converted loop. */
-
-static bool
-memrefs_read_or_written_unconditionally (gimple stmt,
- vec<data_reference_p> drs)
-{
- int i, j;
- data_reference_p a, b;
- tree ca = bb_predicate (gimple_bb (stmt));
-
- for (i = 0; drs.iterate (i, &a); i++)
- if (DR_STMT (a) == stmt)
- {
- bool found = false;
- int x = DR_RW_UNCONDITIONALLY (a);
-
- if (x == 0)
- return false;
-
- if (x == 1)
- continue;
-
- for (j = 0; drs.iterate (j, &b); j++)
- {
- tree ref_base_a = DR_REF (a);
- tree ref_base_b = DR_REF (b);
-
- if (DR_STMT (b) == stmt)
- continue;
-
- while (TREE_CODE (ref_base_a) == COMPONENT_REF
- || TREE_CODE (ref_base_a) == IMAGPART_EXPR
- || TREE_CODE (ref_base_a) == REALPART_EXPR)
- ref_base_a = TREE_OPERAND (ref_base_a, 0);
-
- while (TREE_CODE (ref_base_b) == COMPONENT_REF
- || TREE_CODE (ref_base_b) == IMAGPART_EXPR
- || TREE_CODE (ref_base_b) == REALPART_EXPR)
- ref_base_b = TREE_OPERAND (ref_base_b, 0);
-
- if (operand_equal_p (ref_base_a, ref_base_b, 0))
- {
- tree cb = bb_predicate (gimple_bb (DR_STMT (b)));
-
- if (DR_RW_UNCONDITIONALLY (b) == 1
- || is_true_predicate (cb)
- || is_true_predicate (ca
- = fold_or_predicates (EXPR_LOCATION (cb), ca, cb)))
- {
- DR_RW_UNCONDITIONALLY (a) = 1;
- DR_RW_UNCONDITIONALLY (b) = 1;
- found = true;
- break;
- }
- }
- }
-
- if (!found)
- {
- DR_RW_UNCONDITIONALLY (a) = 0;
- return false;
- }
- }
-
- return true;
-}
-
-/* Returns true when the memory references of STMT are unconditionally
- written. In other words, this function returns true when for every
- data reference A written in STMT, there exist other writes to the
- same data reference with predicates that add up (OR-up) to the true
- predicate: this ensures that the data reference A is written on
- every iteration of the if-converted loop. */
-
-static bool
-write_memrefs_written_at_least_once (gimple stmt,
- vec<data_reference_p> drs)
+#define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
+
+/* Iterates over DR's and stores refs, DR and base refs, DR pairs in
+ HASH tables. While storing them in HASH table, it checks if the
+ reference is unconditionally read or written and stores that as a flag
+ information. For base reference it checks if it is written atlest once
+ unconditionally and stores it as flag information along with DR.
+ In other words for every data reference A in STMT there exist other
+ accesses to a data reference with the same base with predicates that
+ add up (OR-up) to the true predicate: this ensures that the data
+ reference A is touched (read or written) on every iteration of the
+ if-converted loop. */
+static void
+hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
{
- int i, j;
- data_reference_p a, b;
- tree ca = bb_predicate (gimple_bb (stmt));
-
- for (i = 0; drs.iterate (i, &a); i++)
- if (DR_STMT (a) == stmt
- && DR_IS_WRITE (a))
- {
- bool found = false;
- int x = DR_WRITTEN_AT_LEAST_ONCE (a);
-
- if (x == 0)
- return false;
-
- if (x == 1)
- continue;
-
- for (j = 0; drs.iterate (j, &b); j++)
- if (DR_STMT (b) != stmt
- && DR_IS_WRITE (b)
- && same_data_refs_base_objects (a, b))
- {
- tree cb = bb_predicate (gimple_bb (DR_STMT (b)));
-
- if (DR_WRITTEN_AT_LEAST_ONCE (b) == 1
- || is_true_predicate (cb)
- || is_true_predicate (ca = fold_or_predicates (EXPR_LOCATION (cb),
- ca, cb)))
- {
- DR_WRITTEN_AT_LEAST_ONCE (a) = 1;
- DR_WRITTEN_AT_LEAST_ONCE (b) = 1;
- found = true;
- break;
- }
- }
-
- if (!found)
- {
- DR_WRITTEN_AT_LEAST_ONCE (a) = 0;
- return false;
- }
- }
- return true;
+ data_reference_p *master_dr, *base_master_dr;
+ tree ref = DR_REF (a);
+ tree base_ref = DR_BASE_OBJECT (a);
+ tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
+ bool exist1, exist2;
+
+ while (TREE_CODE (ref) == COMPONENT_REF
+ || TREE_CODE (ref) == IMAGPART_EXPR
+ || TREE_CODE (ref) == REALPART_EXPR)
+ ref = TREE_OPERAND (ref, 0);
+
+ master_dr = &ref_DR_map->get_or_insert (ref, &exist1);
+ if (!exist1)
+ *master_dr = a;
+
+ if (DR_IS_WRITE (a))
+ {
+ IFC_DR (*master_dr)->w_predicate
+ = fold_or_predicates (UNKNOWN_LOCATION, ca,
+ IFC_DR (*master_dr)->w_predicate);
+ if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
+ DR_W_UNCONDITIONALLY (*master_dr) = true;
+ }
+ IFC_DR (*master_dr)->rw_predicate
+ = fold_or_predicates (UNKNOWN_LOCATION, ca,
+ IFC_DR (*master_dr)->rw_predicate);
+ if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
+ DR_RW_UNCONDITIONALLY (*master_dr) = true;
+
+ if (DR_IS_WRITE (a))
+ {
+ base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
+ if (!exist2)
+ *base_master_dr = a;
+ IFC_DR (*base_master_dr)->base_w_predicate
+ = fold_or_predicates (UNKNOWN_LOCATION, ca,
+ IFC_DR (*base_master_dr)->base_w_predicate);
+ if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
+ DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
+ }
}
/* Return true when the memory references of STMT won't trap in the
iteration. To check that the memory accesses are correctly formed
and that we are allowed to read and write in these locations, we
check that the memory accesses to be if-converted occur at every
- iteration unconditionally. */
-
+ iteration unconditionally.
+
+ Returns true for the memory reference in STMT, same memory reference
+ is read or written unconditionally atleast once and the base memory
+ reference is written unconditionally once. This is to check reference
+ will not write fault. Also retuns true if the memory reference is
+ unconditionally read once then we are conditionally writing to memory
+ which is defined as read and write and is bound to the definition
+ we are seeing. */
static bool
-ifcvt_memrefs_wont_trap (gimple stmt, vec<data_reference_p> refs)
+ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
{
- return write_memrefs_written_at_least_once (stmt, refs)
- && memrefs_read_or_written_unconditionally (stmt, refs);
-}
+ data_reference_p *master_dr, *base_master_dr;
+ data_reference_p a = drs[gimple_uid (stmt) - 1];
-/* Wrapper around gimple_could_trap_p refined for the needs of the
- if-conversion. Try to prove that the memory accesses of STMT could
- not trap in the innermost loop containing STMT. */
+ tree ref_base_a = DR_REF (a);
+ tree base = DR_BASE_OBJECT (a);
-static bool
-ifcvt_could_trap_p (gimple stmt, vec<data_reference_p> refs)
-{
- if (gimple_vuse (stmt)
- && !gimple_could_trap_p_1 (stmt, false, false)
- && ifcvt_memrefs_wont_trap (stmt, refs))
- return false;
+ gcc_assert (DR_STMT (a) == stmt);
+
+ while (TREE_CODE (ref_base_a) == COMPONENT_REF
+ || TREE_CODE (ref_base_a) == IMAGPART_EXPR
+ || TREE_CODE (ref_base_a) == REALPART_EXPR)
+ ref_base_a = TREE_OPERAND (ref_base_a, 0);
- return gimple_could_trap_p (stmt);
+ master_dr = ref_DR_map->get (ref_base_a);
+ base_master_dr = baseref_DR_map->get (base);
+
+ gcc_assert (master_dr != NULL);
+
+ /* If a is unconditionally written to it doesn't trap. */
+ if (DR_W_UNCONDITIONALLY (*master_dr))
+ return true;
+
+ /* If a is unconditionally accessed then ... */
+ if (DR_RW_UNCONDITIONALLY (*master_dr))
+ {
+ /* an unconditional read won't trap. */
+ if (DR_IS_READ (a))
+ return true;
+
+ /* an unconditionaly write won't trap if the base is written
+ to unconditionally. */
+ if (base_master_dr
+ && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
+ return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
+ else
+ {
+ /* or the base is know to be not readonly. */
+ tree base_tree = get_base_address (DR_REF (a));
+ if (DECL_P (base_tree)
+ && decl_binds_to_current_def_p (base_tree)
+ && ! TREE_READONLY (base_tree))
+ return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
+ }
+ }
+ return false;
}
/* Return true if STMT could be converted into a masked load or store
(conditional load or store based on a mask computed from bb predicate). */
static bool
-ifcvt_can_use_mask_load_store (gimple stmt)
+ifcvt_can_use_mask_load_store (gimple *stmt)
{
tree lhs, ref;
machine_mode mode;
|| VECTOR_MODE_P (mode))
return false;
- if (can_vec_mask_load_store_p (mode, is_load))
+ if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
return true;
return false;
- LHS is not var decl. */
static bool
-if_convertible_gimple_assign_stmt_p (gimple stmt,
+if_convertible_gimple_assign_stmt_p (gimple *stmt,
vec<data_reference_p> refs,
bool *any_mask_load_store)
{
tree lhs = gimple_assign_lhs (stmt);
- basic_block bb;
if (dump_file && (dump_flags & TDF_DETAILS))
{
we can perform loop versioning. */
gimple_set_plf (stmt, GF_PLF_2, false);
- if (flag_tree_loop_if_convert_stores)
- {
- if (ifcvt_could_trap_p (stmt, refs))
- {
- if (ifcvt_can_use_mask_load_store (stmt))
- {
- gimple_set_plf (stmt, GF_PLF_2, true);
- *any_mask_load_store = true;
- return true;
- }
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "tree could trap...\n");
- return false;
- }
- return true;
- }
-
- if (ifcvt_could_trap_p (stmt, refs))
+ if ((! gimple_vuse (stmt)
+ || gimple_could_trap_p_1 (stmt, false, false)
+ || ! ifcvt_memrefs_wont_trap (stmt, refs))
+ && gimple_could_trap_p (stmt))
{
if (ifcvt_can_use_mask_load_store (stmt))
{
return false;
}
- bb = gimple_bb (stmt);
-
- if (TREE_CODE (lhs) != SSA_NAME
- && bb != bb->loop_father->header
- && !bb_with_exit_edge_p (bb->loop_father, bb))
- {
- if (ifcvt_can_use_mask_load_store (stmt))
- {
- gimple_set_plf (stmt, GF_PLF_2, true);
- *any_mask_load_store = true;
- return true;
- }
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "LHS is not var\n");
- print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
- }
- return false;
- }
+ /* When if-converting stores force versioning, likewise if we
+ ended up generating store data races. */
+ if (gimple_vdef (stmt))
+ *any_mask_load_store = true;
return true;
}
- it is builtins call. */
static bool
-if_convertible_stmt_p (gimple stmt, vec<data_reference_p> refs,
+if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs,
bool *any_mask_load_store)
{
switch (gimple_code (stmt))
{
basic_block bb = ifc_bbs[i];
tree cond;
- gimple stmt;
+ gimple *stmt;
/* The loop latch and loop exit block are always executed and
have no extra conditions to be processed: skip them. */
static bool
if_convertible_loop_p_1 (struct loop *loop,
- vec<loop_p> *loop_nest,
vec<data_reference_p> *refs,
- vec<ddr_p> *ddrs, bool *any_mask_load_store)
+ bool *any_mask_load_store)
{
- bool res;
unsigned int i;
basic_block exit_bb = NULL;
- /* Don't if-convert the loop when the data dependences cannot be
- computed: the loop won't be vectorized in that case. */
- res = compute_data_dependences_for_loop (loop, true, loop_nest, refs, ddrs);
- if (!res)
+ if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
return false;
calculate_dominance_info (CDI_DOMINATORS);
case GIMPLE_CALL:
case GIMPLE_DEBUG:
case GIMPLE_COND:
+ gimple_set_uid (gsi_stmt (gsi), 0);
break;
default:
return false;
data_reference_p dr;
+ ref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
+ baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
+
+ predicate_bbs (loop);
+
for (i = 0; refs->iterate (i, &dr); i++)
{
dr->aux = XNEW (struct ifc_dr);
- DR_WRITTEN_AT_LEAST_ONCE (dr) = -1;
- DR_RW_UNCONDITIONALLY (dr) = -1;
+ DR_BASE_W_UNCONDITIONALLY (dr) = false;
+ DR_RW_UNCONDITIONALLY (dr) = false;
+ DR_W_UNCONDITIONALLY (dr) = false;
+ IFC_DR (dr)->rw_predicate = boolean_false_node;
+ IFC_DR (dr)->w_predicate = boolean_false_node;
+ IFC_DR (dr)->base_w_predicate = boolean_false_node;
+ if (gimple_uid (DR_STMT (dr)) == 0)
+ gimple_set_uid (DR_STMT (dr), i + 1);
+ hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
}
- predicate_bbs (loop);
for (i = 0; i < loop->num_nodes; i++)
{
edge_iterator ei;
bool res = false;
vec<data_reference_p> refs;
- vec<ddr_p> ddrs;
/* Handle only innermost loop. */
if (!loop || loop->inner)
return false;
refs.create (5);
- ddrs.create (25);
- auto_vec<loop_p, 3> loop_nest;
- res = if_convertible_loop_p_1 (loop, &loop_nest, &refs, &ddrs,
- any_mask_load_store);
+ res = if_convertible_loop_p_1 (loop, &refs, any_mask_load_store);
data_reference_p dr;
unsigned int i;
free (dr->aux);
free_data_refs (refs);
- free_dependence_relations (ddrs);
+
+ delete ref_DR_map;
+ ref_DR_map = NULL;
+
+ delete baseref_DR_map;
+ baseref_DR_map = NULL;
+
return res;
}
EXTENDED is true if PHI has > 2 arguments. */
static bool
-is_cond_scalar_reduction (gimple phi, gimple *reduc, tree arg_0, tree arg_1,
+is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
tree *op0, tree *op1, bool extended)
{
tree lhs, r_op1, r_op2;
- gimple stmt;
- gimple header_phi = NULL;
+ gimple *stmt;
+ gimple *header_phi = NULL;
enum tree_code reduction_op;
basic_block bb = gimple_bb (phi);
struct loop *loop = bb->loop_father;
/* Check that R_OP1 is used in reduction stmt or in PHI only. */
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
{
- gimple use_stmt = USE_STMT (use_p);
+ gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (use_stmt == stmt)
Returns rhs of resulting PHI assignment. */
static tree
-convert_scalar_cond_reduction (gimple reduc, gimple_stmt_iterator *gsi,
+convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
tree cond, tree op0, tree op1, bool swap)
{
gimple_stmt_iterator stmt_it;
- gimple new_assign;
+ gimple *new_assign;
tree rhs;
tree rhs1 = gimple_assign_rhs1 (reduc);
tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
static void
predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
{
- gimple new_stmt = NULL, reduc;
+ gimple *new_stmt = NULL, *reduc;
tree rhs, res, arg0, arg1, op0, op1, scev;
tree cond;
unsigned int index0;
stmts = bb_predicate_gimplified_stmts (bb);
if (stmts)
{
- if (flag_tree_loop_if_convert_stores
- || any_mask_load_store)
+ if (any_mask_load_store)
{
/* Insert the predicate of the BB just after the label,
as the if-conversion of memory writes will use this
basic_block bb = ifc_bbs[i];
tree cond = bb_predicate (bb);
bool swap;
- gimple stmt;
+ gimple *stmt;
int index;
if (is_true_predicate (cond))
{
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
- tree ref, addr, ptr, masktype, mask_op0, mask_op1, mask;
- gimple new_stmt;
+ tree ref, addr, ptr, mask;
+ gimple *new_stmt;
+ gimple_seq stmts = NULL;
int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
mark_addressable (ref);
mask = vect_masks[index];
else
{
- masktype = build_nonstandard_integer_type (bitsize, 1);
- mask_op0 = build_int_cst (masktype, swap ? 0 : -1);
- mask_op1 = build_int_cst (masktype, swap ? -1 : 0);
- cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
- is_gimple_condexpr,
- NULL_TREE,
- true, GSI_SAME_STMT);
- mask = fold_build_cond_expr (masktype, unshare_expr (cond),
- mask_op0, mask_op1);
- mask = ifc_temp_var (masktype, mask, &gsi);
+ if (COMPARISON_CLASS_P (cond))
+ mask = gimple_build (&stmts, TREE_CODE (cond),
+ boolean_type_node,
+ TREE_OPERAND (cond, 0),
+ TREE_OPERAND (cond, 1));
+ else
+ {
+ gcc_assert (TREE_CODE (cond) == SSA_NAME);
+ mask = cond;
+ }
+
+ if (swap)
+ {
+ tree true_val
+ = constant_boolean_node (true, TREE_TYPE (mask));
+ mask = gimple_build (&stmts, BIT_XOR_EXPR,
+ TREE_TYPE (mask), mask, true_val);
+ }
+ gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
+
+ mask = ifc_temp_var (TREE_TYPE (mask), mask, &gsi);
/* Save mask and its size for further use. */
vect_sizes.safe_push (bitsize);
vect_masks.safe_push (mask);
}
- ptr = build_int_cst (reference_alias_ptr_type (ref), 0);
+ ptr = build_int_cst (reference_alias_ptr_type (ref),
+ get_object_alignment (ref));
/* Copy points-to info if possible. */
if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
insert_gimplified_predicates (loop, any_mask_load_store);
predicate_all_scalar_phis (loop);
- if (flag_tree_loop_if_convert_stores || any_mask_load_store)
+ if (any_mask_load_store)
predicate_mem_writes (loop);
/* Merge basic blocks: first remove all the edges in the loop,
could have derived it from. */
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
gimple_set_bb (stmt, merge_target_bb);
if (predicated[i])
{
basic_block cond_bb;
tree cond = make_ssa_name (boolean_type_node);
struct loop *new_loop;
- gimple g;
+ gimple *g;
gimple_stmt_iterator gsi;
g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
basic_block bb;
unsigned int num = loop->num_nodes;
unsigned int i;
- gimple stmt;
+ gimple *stmt;
edge e;
edge_iterator ei;
use statement with newly created lhs. */
static void
-ifcvt_split_def_stmt (gimple def_stmt, gimple use_stmt)
+ifcvt_split_def_stmt (gimple *def_stmt, gimple *use_stmt)
{
tree var;
tree lhs;
- gimple copy_stmt;
+ gimple *copy_stmt;
gimple_stmt_iterator gsi;
use_operand_p use_p;
imm_use_iterator imm_iter;
not have single use. */
static void
-ifcvt_walk_pattern_tree (tree var, vec<gimple> *defuse_list,
- gimple use_stmt)
+ifcvt_walk_pattern_tree (tree var, vec<gimple *> *defuse_list,
+ gimple *use_stmt)
{
tree rhs1, rhs2;
enum tree_code code;
- gimple def_stmt;
+ gimple *def_stmt;
def_stmt = SSA_NAME_DEF_STMT (var);
if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
by vectorizer. */
static bool
-stmt_is_root_of_bool_pattern (gimple stmt)
+stmt_is_root_of_bool_pattern (gimple *stmt)
{
enum tree_code code;
tree lhs, rhs;
ifcvt_repair_bool_pattern (basic_block bb)
{
tree rhs;
- gimple stmt;
+ gimple *stmt;
gimple_stmt_iterator gsi;
- vec<gimple> defuse_list = vNULL;
- vec<gimple> pattern_roots = vNULL;
+ vec<gimple *> defuse_list = vNULL;
+ vec<gimple *> pattern_roots = vNULL;
bool repeat = true;
int niter = 0;
unsigned int ix;
while (defuse_list.length () > 0)
{
repeat = true;
- gimple def_stmt, use_stmt;
+ gimple *def_stmt, *use_stmt;
use_stmt = defuse_list.pop ();
def_stmt = defuse_list.pop ();
ifcvt_split_def_stmt (def_stmt, use_stmt);
static void
ifcvt_local_dce (basic_block bb)
{
- gimple stmt;
- gimple stmt1;
- gimple phi;
+ gimple *stmt;
+ gimple *stmt1;
+ gimple *phi;
gimple_stmt_iterator gsi;
- vec<gimple> worklist;
+ auto_vec<gimple *> worklist;
enum gimple_code code;
use_operand_p use_p;
imm_use_iterator imm_iter;
}
todo |= TODO_cleanup_cfg;
- if (flag_tree_loop_if_convert_stores || any_mask_load_store)
+ if (any_mask_load_store)
{
mark_virtual_operands_for_renaming (cfun);
todo |= TODO_update_ssa_only_virtuals;
/* Tree if-conversion pass management. */
-static const pass_data pass_data_if_conversion =
+namespace {
+
+const pass_data pass_data_if_conversion =
{
GIMPLE_PASS, /* type */
"ifcvt", /* name */
0, /* todo_flags_finish */
};
-class pass_if_conversion GCC_FINAL : public gimple_opt_pass
+class pass_if_conversion : public gimple_opt_pass
{
public:
pass_if_conversion (gcc::context *ctxt)
&& !loop->dont_vectorize))
todo |= tree_if_conversion (loop);
-#ifdef ENABLE_CHECKING
- {
- basic_block bb;
- FOR_EACH_BB_FN (bb, fun)
- gcc_assert (!bb->aux);
- }
-#endif
+ if (flag_checking)
+ {
+ basic_block bb;
+ FOR_EACH_BB_FN (bb, fun)
+ gcc_assert (!bb->aux);
+ }
return todo;
}
+} // anon namespace
+
gimple_opt_pass *
make_pass_if_conversion (gcc::context *ctxt)
{