!gsi_end_p (si); gsi_next (&si))
{
stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
- STMT_SLP_TYPE (stmt_info) = loop_vect;
+ STMT_SLP_TYPE (stmt_info) = not_vect;
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
{
if (is_gimple_debug (gsi_stmt (si)))
continue;
stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
- STMT_SLP_TYPE (stmt_info) = loop_vect;
+ STMT_SLP_TYPE (stmt_info) = not_vect;
if (STMT_VINFO_IN_PATTERN_P (stmt_info))
{
stmt_vec_info pattern_stmt_info
STMT_VINFO_IN_PATTERN_P (stmt_info) = false;
gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
- STMT_SLP_TYPE (pattern_stmt_info) = loop_vect;
+ STMT_SLP_TYPE (pattern_stmt_info) = not_vect;
for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
!gsi_end_p (pi); gsi_next (&pi))
STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
- = loop_vect;
+ = not_vect;
}
}
}
else
STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
- STMT_SLP_TYPE (res) = loop_vect;
+ STMT_SLP_TYPE (res) = not_vect;
/* This is really "uninitialized" until vect_compute_data_ref_alignment. */
res->dr_aux.misalignment = DR_MISALIGNMENT_UNINITIALIZED;
vect_used_in_scope
};
-/* The type of vectorization that can be applied to the stmt: regular loop-based
- vectorization; pure SLP - the stmt is a part of SLP instances and does not
- have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
- a part of SLP instance and also must be loop-based vectorized, since it has
- uses outside SLP sequences.
-
- In the loop context the meanings of pure and hybrid SLP are slightly
- different. By saying that pure SLP is applied to the loop, we mean that we
- exploit only intra-iteration parallelism in the loop; i.e., the loop can be
- vectorized without doing any conceptual unrolling, cause we don't pack
- together stmts from different iterations, only within a single iteration.
- Loop hybrid SLP means that we exploit both intra-iteration and
- inter-iteration parallelism (e.g., number of elements in the vector is 4
- and the slp-group-size is 2, in which case we don't have enough parallelism
- within an iteration, so we obtain the rest of the parallelism from subsequent
- iterations by unrolling the loop by 2). */
+/* The type of vectorization. pure_slp means the stmt is covered by the
+ SLP graph, not_vect means it is not. This is mostly used by BB
+ vectorization. */
enum slp_vect_type {
- loop_vect = 0,
+ not_vect = 0,
pure_slp,
hybrid
};
#define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
-#define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
#define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
#define STMT_SLP_TYPE(S) (S)->slp_type