+2012-10-24 Sharad Singhai <singhai@google.com>
+
+ * dumpfile.c (dump_enabled_p): Make it inline and move the definition
+ to dumpfile.h.
+ (dump_kind_p): Deleted. Functionality replaced by dump_enabled_p.
+ Make alt_dump_file extern.
+ * dumpfile.h (dump_enabled_p): Move inline definition here.
+ (dump_kind_p): Delete declaration.
+ Add extern declaration of alt_dump_file.
+ * toplev.c: Move dump_file and dump_file_name to dumpfile.c.
+ * tree-vect-loop-manip.c: Replace all uses of dump_kind_p with
+ dump_enabled_p.
+ * tree-vectorizer.c: Likewise.
+ * tree-vect-loop.c: Likewise.
+ * tree-vect-data-refs.c: Likewise.
+ * tree-vect-patterns.c: Likewise.
+ * tree-vect-stmts.c: Likewise.
+ * tree-vect-slp.c: Likewise.
+
2012-10-24 Richard Sandiford <rdsandiford@googlemail.com>
* expmed.c (lowpart_bit_field_p): Add missing == 0 check.
static int pflags; /* current dump_flags */
static int alt_flags; /* current opt_info flags */
-static FILE *alt_dump_file = NULL;
static void dump_loc (int, FILE *, source_location);
static int dump_phase_enabled_p (int);
static FILE *dump_open_alternate_stream (struct dump_file_info *);
+/* These are currently used for communicating between passes.
+ However, instead of accessing them directly, the passes can use
+ dump_printf () for dumps. */
+FILE *dump_file = NULL;
+FILE *alt_dump_file = NULL;
+const char *dump_file_name;
+
/* Table of tree dump switches. This must be consistent with the
TREE_DUMP_INDEX enumeration in dumpfile.h. */
static struct dump_file_info dump_files[TDI_end] =
}
}
-/* Return true if any of the dumps are enabled, false otherwise. */
-
-inline bool
-dump_enabled_p (void)
-{
- return (dump_file || alt_dump_file);
-}
-
/* Returns nonzero if tree dump PHASE has been initialized. */
int
return opt_info_enable_all ((TDF_TREE | TDF_RTL | TDF_IPA), flags, filename);
}
-/* Return true if any dumps are enabled for the given MSG_TYPE, false
- otherwise. */
-
-bool
-dump_kind_p (int msg_type)
-{
- return (dump_file && (msg_type & pflags))
- || (alt_dump_file && (msg_type & alt_flags));
-}
-
/* Print basic block on the dump streams. */
void
int num; /* dump file number */
};
-
/* In dumpfile.c */
extern char *get_dump_file_name (int);
extern int dump_initialized_p (int);
extern int dump_switch_p (const char *);
extern int opt_info_switch_p (const char *);
extern const char *dump_flag_name (int);
-extern bool dump_kind_p (int);
-extern inline bool dump_enabled_p (void);
extern void dump_printf (int, const char *, ...) ATTRIBUTE_PRINTF_2;
extern void dump_printf_loc (int, source_location,
const char *, ...) ATTRIBUTE_PRINTF_3;
/* Global variables used to communicate with passes. */
extern FILE *dump_file;
+extern FILE *alt_dump_file;
extern int dump_flags;
extern const char *dump_file_name;
/* Return the dump_file_info for the given phase. */
extern struct dump_file_info *get_dump_file_info (int);
+/* Return true if any of the dumps are enabled, false otherwise. */
+
+static inline bool
+dump_enabled_p (void)
+{
+ return (dump_file || alt_dump_file);
+}
+
#endif /* GCC_DUMPFILE_H */
FILE *asm_out_file;
FILE *aux_info_file;
FILE *stack_usage_file = NULL;
-FILE *dump_file = NULL;
-const char *dump_file_name;
/* The current working directory of a translation. It's generally the
directory from which compilation was initiated, but a preprocessed
if (array_mode == BLKmode)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]",
GET_MODE_NAME (mode), count);
if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot use %s<%s><%s>", name,
GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
return false;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"can use %s<%s><%s>", name, GET_MODE_NAME (array_mode),
GET_MODE_NAME (mode));
if (diff_mod_size == 0)
{
vect_update_interleaving_chain (drb, dra);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving ");
if (diff_mod_size == 0)
{
vect_update_interleaving_chain (dra, drb);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving ");
if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
return false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"mark for run-time aliasing test between ");
if (optimize_loop_nest_for_size_p (loop))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not supported when optimizing for size.");
return false;
/* FORNOW: We don't support versioning with outer-loop vectorization. */
if (loop->inner)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for outer-loops.");
return false;
if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
|| TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for non-constant "
"step");
if (loop_vinfo)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
if (DR_IS_READ (dra) && DR_IS_READ (drb))
return false;
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine dependence between ");
if (dra != drb && vect_check_interleaving (dra, drb))
return false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"determined dependence between ");
/* Loop-based vectorization and known data dependence. */
if (DDR_NUM_DIST_VECTS (ddr) == 0)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
{
int dist = dist_v[loop_depth];
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.", dist);
if (dist == 0)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance == 0 between ");
/* If DDR_REVERSED_P the order of the data-refs in DDR was
reversed (to make distance vector positive), and the actual
distance is negative. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"dependence distance negative.");
continue;
/* The dependence distance requires reduction of the maximal
vectorization factor. */
*max_vf = abs (dist);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"adjusting maximal vectorization factor to %i",
*max_vf);
{
/* Dependence distance does not create dependence, as far as
vectorization is concerned, in this case. */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance >= VF.");
continue;
}
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized, possible dependence "
VEC (ddr_p, heap) *ddrs = NULL;
struct data_dependence_relation *ddr;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_dependences ===");
if (loop_vinfo)
tree misalign;
tree aligned_to, alignment;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_compute_data_ref_alignment:");
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"inner step divides the vector-size.");
misalign = STMT_VINFO_DR_INIT (stmt_info);
}
else
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner step doesn't divide the vector-size.");
misalign = NULL_TREE;
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP: step doesn't divide the vector-size.");
misalign = NULL_TREE;
if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0)
|| !misalign)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown alignment for access: ");
if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
|| (TREE_STATIC (base) && flag_section_anchors))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"can't force alignment of ref: ");
/* Force the alignment of the decl.
NOTE: This is the only change to the code we make during
the analysis phase, before deciding to vectorize the loop. */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
if (!host_integerp (misalign, 1))
{
/* Negative or overflowed misalignment value. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected misalign value");
return false;
SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign));
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
return;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.");
SET_DR_MISALIGNMENT (dr, -1);
}
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
if (DR_IS_READ (dr))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
}
return false;
}
- if (supportable_dr_alignment != dr_aligned
- && dump_kind_p (MSG_NOTE))
+ if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Vectorizing an unaligned access.");
}
{
HOST_WIDE_INT elmsize =
int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
}
if (DR_MISALIGNMENT (dr) % elmsize)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"data size does not divide the misalignment.\n");
return false;
{
tree type = TREE_TYPE (DR_REF (dr));
bool is_packed = not_size_aligned (DR_REF (dr));
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown misalignment, is_packed = %d",is_packed);
if (targetm.vectorize.vector_alignment_reachable (type, is_packed))
else
vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_data_access_cost: inside_cost = %d, "
"outside_cost = %d.", *inside_cost, *outside_cost);
unsigned int nelements, mis, same_align_drs_max = 0;
stmt_vector_for_cost body_cost_vec = NULL;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_enhance_data_refs_alignment ===");
and so we can't generate the new base for the pointer. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"strided load prevents peeling");
do_peeling = false;
{
if (!aligned_access_p (dr))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vector alignment may not be reachable");
break;
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= GROUP_SIZE (stmt_info);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Try peeling by %d", npeel);
}
else
LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0);
SET_DR_MISALIGNMENT (dr0, 0);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using peeling.");
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
dr = STMT_VINFO_DATA_REF (stmt_info);
SET_DR_MISALIGNMENT (dr, 0);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using versioning.");
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Versioning for alignment will be applied.");
{
int dist = dist_v[loop_depth];
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.", dist);
/* Two references with distance zero have the same alignment. */
VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a), drb);
VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b), dra);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"accesses have the same alignment.");
vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_refs_alignment ===");
if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment "
"for data ref.");
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected single element interleaving ");
if (loop_vinfo)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Data access with gaps requires scalar "
"epilogue loop");
if (loop->inner)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not"
" supported");
return true;
}
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access ");
{
if (DR_IS_WRITE (data_ref))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two store stmts share the same dr.");
return false;
if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
|| GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"READ_WRITE dependence in interleaving.");
return false;
next_step = DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
if (tree_int_cst_compare (step, next_step))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access in interleaving");
return false;
slp_impossible = true;
if (DR_IS_WRITE (data_ref))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps");
return false;
greater than STEP. */
if (dr_step && dr_step < count_in_bytes + gaps * type_size)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaving size is greater than step for ");
}
else
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps");
return false;
/* Check that STEP is a multiple of type size. */
if (dr_step && (dr_step % type_size) != 0)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step is not a multiple of type size: step ");
groupsize = count;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving of size %d", (int)groupsize);
/* There is a gap in the end of the group. */
if (groupsize - last_accessed_element > 0 && loop_vinfo)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Data access with gaps requires scalar "
"epilogue loop");
if (loop->inner)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not supported");
return false;
if (loop_vinfo && !step)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data-ref access in loop");
return false;
step = STMT_VINFO_DR_STEP (stmt_info);
if (integer_zerop (step))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in outer loop.");
if (DR_IS_READ (dr))
if (loop && nested_in_vect_loop_p (loop, stmt))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"grouped access in outer loop.");
return false;
VEC (data_reference_p, heap) *datarefs;
struct data_reference *dr;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_ref_accesses ===");
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_analyze_data_ref_access (dr))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated access pattern.");
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
unsigned i, j;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_prune_runtime_alias_test_list ===");
if (vect_vfa_range_equal (ddr_i, ddr_j))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"found equal ranges ");
if (VEC_length (ddr_p, ddrs) >
(unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"disable versioning for alias - max number of "
tree scalar_type;
bool res, stop_bb_analysis = false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_refs ===\n");
if (!res)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function calls"
" or data references that cannot be analyzed");
if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
&BB_VINFO_DDRS (bb_vinfo), NULL, true))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: basic block contains function"
" calls or data references that cannot be"
if (!dr || !DR_REF (dr))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data-ref ");
return false;
if (!gather)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data ref analysis "
if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: base addr of dr is a "
"constant");
if (TREE_THIS_VOLATILE (DR_REF (dr)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: volatile type ");
if (stmt_can_throw_internal (stmt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: statement can throw an "
if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
&& DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: statement is bitfield "
if (is_gimple_call (stmt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: dr in a call ");
tree inner_base = build_fold_indirect_ref
(fold_build_pointer_plus (base, init));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"analyze in outer-loop: ");
if (pbitpos % BITS_PER_UNIT != 0)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"failed: bit offset alignment.\n");
return false;
if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
&base_iv, false))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"failed: evolution of base is not affine.\n");
return false;
else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
&offset_iv, false))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"evolution of offset is not affine.\n");
return false;
STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
size_int (highest_pow2_factor (offset_iv.base));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"\touter base_address: ");
if (STMT_VINFO_DATA_REF (stmt_info))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: more than one data ref "
get_vectype_for_scalar_type (scalar_type);
if (!STMT_VINFO_VECTYPE (stmt_info))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: no vectype for stmt: ");
{
STMT_VINFO_DATA_REF (stmt_info) = NULL;
free_data_ref (dr);
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not suitable for gather "
if (bad)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data dependence conflict"
= vect_check_strided_load (stmt, loop_vinfo, NULL, NULL);
if (!strided_load)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not suitable for strided "
mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (vec_stmt));
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "created ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vec_stmt);
in LOOP. */
base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
tree data_ref_base = base_name;
dump_printf_loc (MSG_NOTE, vect_location,
/* vect_permute_store_chain requires the group size to be a power of two. */
if (exact_log2 (count) == -1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2");
}
}
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION,
"interleave op not supported by target.");
return false;
/* vect_permute_load_chain requires the group size to be a power of two. */
if (exact_log2 (count) == -1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2");
}
}
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"extract even/odd not supported by target");
return false;
free_stmt_vec_info (orig_cond);
loop_loc = find_loop_location (loop);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
if (LOCATION_LOCUS (loop_loc) != UNKNOWN_LOC)
dump_printf (MSG_NOTE, "\nloop at %s:%d: ", LOC_FILE (loop_loc),
/* Analyze phi functions of the loop header. */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "vect_can_advance_ivs_p:");
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
tree evolution_part;
phi = gsi_stmt (gsi);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
if (virtual_operand_p (PHI_RESULT (phi)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"virtual phi. skip.");
continue;
if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc phi. skip.");
continue;
if (!access_fn)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"No Access function.");
return false;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Access function of PHI: ");
if (evolution_part == NULL_TREE)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION, "No evolution.");
return false;
}
phi = gsi_stmt (gsi);
phi1 = gsi_stmt (gsi1);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_update_ivs_after_vectorizer: phi: ");
/* Skip virtual phi's. */
if (virtual_operand_p (PHI_RESULT (phi)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"virtual phi. skip.");
continue;
stmt_info = vinfo_for_stmt (phi);
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc phi. skip.");
continue;
tree cond_expr = NULL_TREE;
gimple_seq cond_expr_stmt_list = NULL;
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"=== vect_do_peeling_for_loop_bound ===");
{
int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"known peeling = %d.", npeel);
if (TREE_CODE (loop_niters) != INTEGER_CST)
iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"niters for prolog loop: ");
VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct data_reference *dr;
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"=== vect_update_inits_of_dr ===");
int max_iter;
int bound = 0;
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"=== vect_do_peeling_for_alignment ===");
segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"create runtime check for data references ");
*cond_expr = part_cond_expr;
}
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"created %u versioning for alias checks.\n",
VEC_length (ddr_p, may_alias_ddrs));
gimple_stmt_iterator pattern_def_si = gsi_none ();
bool analyze_pattern_stmt = false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_determine_vectorization_factor ===");
{
phi = gsi_stmt (si);
stmt_info = vinfo_for_stmt (phi);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
scalar_type = TREE_TYPE (PHI_RESULT (phi));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
}
STMT_VINFO_VECTYPE (stmt_info) = vectype;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
}
nunits = TYPE_VECTOR_SUBPARTS (vectype);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d", nunits);
if (!vectorization_factor
stmt_info = vinfo_for_stmt (stmt);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining statement: ");
{
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (pattern_stmt);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
}
else
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "skip.");
gsi_next (&si);
continue;
if (!gsi_end_p (pattern_def_si))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def stmt: ");
if (gimple_get_lhs (stmt) == NULL_TREE)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: irregular stmt.");
if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector stmt in loop:");
{
gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
support one vector size per loop). */
scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
&dummy);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
vf_vectype = get_vectype_for_scalar_type (scalar_type);
if (!vf_vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type ");
if ((GET_MODE_SIZE (TYPE_MODE (vectype))
!= GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized vector "
return false;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
}
nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d", nunits);
if (!vectorization_factor
|| (nunits > vectorization_factor))
}
/* TODO: Analyze cost. Decide if worth while to vectorize. */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d",
vectorization_factor);
if (vectorization_factor <= 1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type");
return false;
step_expr = evolution_part;
init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "step: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
if (TREE_CODE (step_expr) != INTEGER_CST)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step unknown.");
return false;
gimple_stmt_iterator gsi;
bool double_reduc;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_scalar_cycles ===");
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
if (access_fn)
{
STRIP_NOPS (access_fn);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Access function of PHI: ");
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
}
gimple reduc_stmt;
bool nested_cycle;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
{
if (double_reduc)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected double reduction.");
{
if (nested_cycle)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected vectorizable nested cycle.");
}
else
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected reduction.");
}
}
else
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown def-use cycle pattern.");
}
{
tree niters;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== get_loop_niters ===");
niters = number_of_exit_cond_executions (loop);
{
*number_of_iterations = niters;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "==> get_loop_niters:");
dump_generic_expr (MSG_NOTE, TDF_SLIM, *number_of_iterations);
{
loop_vec_info loop_vinfo;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"===== analyze_loop_nest_1 =====");
loop_vinfo = vect_analyze_loop_form (loop);
if (!loop_vinfo)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad inner-loop form.");
return NULL;
tree number_of_iterations = NULL;
loop_vec_info inner_loop_vinfo = NULL;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_form ===");
if (loop->num_nodes != 2)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.");
return NULL;
if (empty_block_p (loop->header))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: empty loop.");
return NULL;
if ((loop->inner)->inner || (loop->inner)->next)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple nested loops.");
return NULL;
inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
if (!inner_loop_vinfo)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: Bad inner loop.");
return NULL;
if (!expr_invariant_in_loop_p (loop,
LOOP_VINFO_NITERS (inner_loop_vinfo)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: inner-loop count not invariant.");
destroy_loop_vec_info (inner_loop_vinfo, true);
if (loop->num_nodes != 5)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.");
destroy_loop_vec_info (inner_loop_vinfo, true);
|| !single_exit (innerloop)
|| single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported outerloop form.");
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Considering outer-loop vectorization.");
}
if (!single_exit (loop)
|| EDGE_COUNT (loop->header->preds) != 2)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
if (!single_exit (loop))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
if (!empty_block_p (loop->latch)
|| !gimple_seq_empty_p (phi_nodes (loop->latch)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unexpected loop form.");
if (inner_loop_vinfo)
if (!(e->flags & EDGE_ABNORMAL))
{
split_loop_exit_edge (e);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf (MSG_NOTE, "split exit edge.");
}
else
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: abnormal loop exit edge.");
if (inner_loop_vinfo)
loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
if (!loop_cond)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated exit condition.");
if (inner_loop_vinfo)
if (!number_of_iterations)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations cannot be "
"computed.");
if (chrec_contains_undetermined (number_of_iterations))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Infinite number of iterations.");
if (inner_loop_vinfo)
if (!NITERS_KNOWN_P (number_of_iterations))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Symbolic number of iterations is ");
}
else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations = 0.");
if (inner_loop_vinfo)
HOST_WIDE_INT estimated_niter;
int min_profitable_estimate;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_operations ===");
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Updating vectorization factor to %d ",
vectorization_factor);
ok = true;
stmt_info = vinfo_for_stmt (phi);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
&& STMT_VINFO_DEF_TYPE (stmt_info)
!= vect_double_reduction_def)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported loop-closed phi in "
"outer-loop.");
if (STMT_VINFO_LIVE_P (stmt_info))
{
/* FORNOW: not yet supported. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: value used after loop.");
return false;
&& STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
{
/* A scalar-dependence cycle that we don't support. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: scalar dependence cycle.");
return false;
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant phi not "
touching this loop. */
if (!need_to_vectorize)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"All the computation can be taken out of the loop.");
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: redundant loop. no profit to "
"vectorize.");
return false;
}
- if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- && dump_kind_p (MSG_NOTE))
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vectorization_factor = %d, niters = "
HOST_WIDE_INT_PRINT_DEC, vectorization_factor,
|| ((max_niter = max_stmt_executions_int (loop)) != -1
&& (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: iteration count too small.");
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: iteration count smaller than "
"vectorization factor.");
if (min_profitable_iters < 0)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.");
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector version will never be "
"profitable.");
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.");
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: iteration count smaller than user "
"specified loop bound parameter or minimum profitable "
&& ((unsigned HOST_WIDE_INT) estimated_niter
<= MAX (th, (unsigned)min_profitable_estimate)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: estimated iteration count too "
"small.");
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: estimated iteration count smaller "
"than specified loop bound parameter or minimum "
|| LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
|| LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required.");
if (!vect_can_advance_ivs_p (loop_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create epilog loop 1.");
return false;
}
if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create epilog loop 2.");
return false;
ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data references.");
return false;
ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected pattern.");
return false;
if (!ok
|| max_vf < min_vf)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.");
return false;
ok = vect_determine_vectorization_factor (loop_vinfo);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine vectorization factor.");
return false;
}
if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.");
return false;
ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.");
return false;
ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data access.");
return false;
ok = vect_prune_runtime_alias_test_list (loop_vinfo);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"too long list of versioning for alias "
"run-time tests.");
ok = vect_enhance_data_refs_alignment (loop_vinfo);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.");
return false;
ok = vect_analyze_loop_operations (loop_vinfo, slp);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad operation or unsupported loop bound.");
return false;
current_vector_size = 0;
vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"===== analyze_loop_nest =====");
&& loop_vec_info_for_loop (loop_outer (loop))
&& LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop already vectorized.");
return NULL;
loop_vinfo = vect_analyze_loop_form (loop);
if (!loop_vinfo)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad loop form.");
return NULL;
/* Try the next biggest vector size. */
current_vector_size = 1 << floor_log2 (vector_sizes);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"***** Re-trying analysis with "
"vector size %d\n", current_vector_size);
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"intermediate value used outside loop.");
nloop_uses++;
if (nloop_uses > 1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction used in loop.");
return NULL;
if (TREE_CODE (loop_arg) != SSA_NAME)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: not ssa_name: ");
def_stmt = SSA_NAME_DEF_STMT (loop_arg);
if (!def_stmt)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: no def_stmt.");
return NULL;
if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
return NULL;
}
nloop_uses++;
if (nloop_uses > 1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction used in loop.");
return NULL;
if (gimple_phi_num_args (def_stmt) != 1
|| TREE_CODE (op1) != SSA_NAME)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported phi node definition.");
&& flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
&& is_gimple_assign (def1))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"detected double reduction: ");
if (check_reduction
&& (!commutative_tree_code (code) || !associative_tree_code (code)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: not commutative/associative: ");
return NULL;
{
if (code != COND_EXPR)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: not binary operation: ");
if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: uses not ssa_names: ");
if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: uses not ssa_names: ");
|| (op4 && TREE_CODE (op4) == SSA_NAME
&& !types_compatible_p (type, TREE_TYPE (op4))))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"reduction: multiple types: operation type: ");
&& check_reduction)
{
/* Changing the order of operations changes the semantics. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: unsafe fp math optimization: ");
return NULL;
&& check_reduction)
{
/* Changing the order of operations changes the semantics. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: unsafe int math optimization: ");
return NULL;
else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
{
/* Changing the order of operations changes the semantics. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: unsafe fixed-point math optimization: ");
return NULL;
if (code != COND_EXPR
&& ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
return NULL;
}
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def1)))))))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
return def_stmt;
}
/* Swap operands (just for simplicity - so that the rest of the code
can assume that the reduction variable is always the last (second)
argument). */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"detected reduction: need to swap operands: ");
}
else
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
}
/* Try to find SLP reduction chain. */
if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"reduction: detected reduction chain: ");
return def_stmt;
}
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: unknown pattern: ");
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
*peel_iters_epilogue = vf/2;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"cost model: epilogue peel iters set to vf/2 "
"because loop iterations are unknown .");
/* vector version will never be profitable. */
else
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cost model: the vector iteration cost = %d "
"divided by the scalar iteration cost = %d "
return;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
then skip the vectorized loop. */
min_profitable_iters--;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
" Runtime profitability threshold = %d\n", min_profitable_iters);
}
min_profitable_estimate --;
min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
" Static estimate profitability threshold = %d\n",
min_profitable_iters);
vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
if (!vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported data-type ");
}
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf (MSG_NOTE,
"vect_model_reduction_cost: inside_cost = %d, "
"prologue_cost = %d, epilogue_cost = %d .", inside_cost,
prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
stmt_info, 0, vect_prologue);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_induction_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
gcc_assert (!new_bb);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"created new init_stmt: ");
&& !STMT_VINFO_LIVE_P (stmt_vinfo));
STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vector of inductions after inner-loop:");
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"transform induction: created def-use cycle: ");
add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"transform reduction: created def-use cycle: ");
/*** Case 1: Create:
v_out2 = reduc_expr <v_out1> */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using direct vector reduction.");
Create: va = vop <va, va'>
} */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using vector shifts");
Create: s = op <s, s'> // For non SLP cases
} */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using scalar code. ");
{
tree rhs;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"extract scalar result");
UNKNOWN_LOCATION);
add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"created double reduction phi node: ");
{
if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported condition in reduction");
optab = optab_for_tree_code (code, vectype_in, optab_default);
if (!optab)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.");
if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf (MSG_NOTE, "op not supported by target.");
if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
< vect_min_worthwhile_factor (code))
return false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf (MSG_NOTE, "proceeding using word mode.");
}
&& LOOP_VINFO_VECT_FACTOR (loop_vinfo)
< vect_min_worthwhile_factor (code))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.");
optab_default);
if (!reduc_optab)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab for reduction.");
if (reduc_optab
&& optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc op not supported by target.");
{
if (!nested_cycle || double_reduc)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no reduc code for scalar code.");
if (double_reduc && ncopies > 1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in double reduction");
ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
else
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"invalid types in dot-prod");
/** Transform. **/
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.");
/* FORNOW: Multiple types are not supported for condition. */
if (ncopies > 1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.");
return false;
if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
&& !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner-loop induction only used outside "
"of the outer vectorized loop.");
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_induction ===");
vect_model_induction_cost (stmt_info, ncopies);
/** Transform. **/
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.");
vec_def = get_initial_def_for_induction (phi);
&& !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def,
&dt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
{
if (gimple_debug_bind_p (ustmt))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"killing debug use");
bool check_profitability = false;
int th;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===");
/* Use the more conservative vectorization threshold. If the number
if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
&& !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Profitability threshold is %d loop iterations.", th);
check_profitability = true;
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
phi = gsi_stmt (si);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing phi: ");
if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
!= (unsigned HOST_WIDE_INT) vectorization_factor)
- && dump_kind_p (MSG_NOTE))
+ && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.");
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform phi.");
vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
}
else
stmt = gsi_stmt (si);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing statement: ");
if (!gsi_end_p (pattern_def_si))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> vectorizing pattern def "
STMT_VINFO_VECTYPE (stmt_info));
if (!STMT_SLP_TYPE (stmt_info)
&& nunits != (unsigned int) vectorization_factor
- && dump_kind_p (MSG_NOTE))
+ && dump_enabled_p ())
/* For SLP VF is set according to unrolling factor, and not to
vector size, hence for SLP this print is not valid. */
dump_printf_loc (MSG_NOTE, vect_location,
{
slp_scheduled = true;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== scheduling SLP instances ===");
}
/* -------- vectorize statement ------------ */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform statement.");
grouped_store = false;
until all the loops have been transformed? */
update_ssa (TODO_update_ssa);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location, "LOOP VECTORIZED.");
- if (loop->inner && dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (loop->inner && dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"OUTER LOOP VECTORIZED.");
}
pattern_stmt = gimple_build_assign_with_ops (DOT_PROD_EXPR, var,
oprnd00, oprnd01, oprnd1);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_dot_prod_pattern: detected: ");
return NULL;
/* Pattern detected. */
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_widen_mult_pattern: detected: ");
pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
oprnd1);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
VEC_safe_push (gimple, heap, *stmts, last_stmt);
pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
oprnd0, oprnd1);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_widen_sum_pattern: detected: ");
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)) = pattern_stmt;
new_pattern_def_seq (vinfo_for_stmt (stmt), new_def_stmt);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"created pattern stmt: ");
return NULL;
/* Pattern detected. */
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_over_widening_pattern: detected: ");
return NULL;
/* Pattern detected. */
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_widen_shift_pattern: detected: ");
pattern_stmt =
gimple_build_assign_with_ops (WIDEN_LSHIFT_EXPR, var, oprnd0, oprnd1);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
VEC_safe_push (gimple, heap, *stmts, last_stmt);
}
/* Pattern detected. */
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_vector_vector_shift_pattern: detected: ");
var = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL);
pattern_stmt = gimple_build_assign_with_ops (rhs_code, var, oprnd0, def);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
VEC_safe_push (gimple, heap, *stmts, last_stmt);
return NULL;
/* Pattern detected. */
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_divmod_pattern: detected: ");
signmask);
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt,
0);
}
/* Pattern detected. */
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_divmod_pattern: detected: ");
*type_in = vecitype;
*type_out = vectype;
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_mixed_size_cond_pattern: detected: ");
*type_out = vectype;
*type_in = vectype;
VEC_safe_push (gimple, heap, *stmts, last_stmt);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_bool_pattern: detected: ");
*type_out = vectype;
*type_in = vectype;
VEC_safe_push (gimple, heap, *stmts, last_stmt);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_bool_pattern: detected: ");
return pattern_stmt;
}
/* Found a vectorizable pattern. */
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"pattern recognized: ");
{
stmt_info = vinfo_for_stmt (stmt);
pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"additional pattern stmt: ");
VEC (gimple, heap) *stmts_to_replace = VEC_alloc (gimple, heap, 1);
gimple stmt;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_pattern_recog ===");
&def, &dt)
|| (!def_stmt && dt != vect_constant_def))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: can't find def for ");
pattern = true;
if (!first && !oprnd_info->first_pattern)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: some of the stmts"
if (dt == vect_unknown_def_type)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported pattern.");
return false;
break;
default:
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported defining stmt: ");
return false;
{
if (number_of_oprnds != 2)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different types ");
&& !types_compatible_p (oprnd_info->first_def_type,
TREE_TYPE (def_op0))))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Swapping operands of ");
}
else
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different types ");
default:
/* FORNOW: Not supported. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: illegal type of def ");
/* For every stmt in NODE find its def stmt/s. */
FOR_EACH_VEC_ELT (gimple, stmts, i, stmt)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
/* Fail to vectorize statements marked as unvectorizable. */
if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unvectorizable statement ");
lhs = gimple_get_lhs (stmt);
if (lhs == NULL_TREE)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: not GIMPLE_ASSIGN nor "
&& (cond = gimple_assign_rhs1 (stmt))
&& !COMPARISON_CLASS_P (cond))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: condition is not "
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data-type ");
|| !gimple_call_nothrow_p (stmt)
|| gimple_call_chain (stmt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported call type ");
if (!optab)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: no optab.");
vect_free_oprnd_info (&oprnds_info);
icode = (int) optab_handler (optab, vec_mode);
if (icode == CODE_FOR_nothing)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: "
"op not supported by target.");
|| first_stmt_code == COMPONENT_REF
|| first_stmt_code == MEM_REF)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different operation "
if (need_same_oprnds
&& !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different shift "
|| gimple_call_fntype (first_stmt)
!= gimple_call_fntype (stmt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different calls in ");
|| (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
&& GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: grouped "
if (loop_vinfo
&& GROUP_SIZE (vinfo_for_stmt (stmt)) > ncopies * group_size)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: the number "
&& rhs_code != REALPART_EXPR
&& rhs_code != IMAGPART_EXPR)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
if (vect_supportable_dr_alignment (first_dr, false)
== dr_unaligned_unsupported)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
{
/* Not grouped load. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: not grouped load ");
&& rhs_code != COND_EXPR
&& rhs_code != CALL_EXPR)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: operation");
first_cond_code = TREE_CODE (cond_expr);
else if (first_cond_code != TREE_CODE (cond_expr))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different"
/* Check that the loads are all in the same interleaving chain. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (scalar_stmt)) != first_load)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data "
if (!slp_instn)
return false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
FOR_EACH_VEC_ELT (int, load_permutation, i, next)
if (vect_supportable_dr_alignment (dr, false)
== dr_unaligned_unsupported)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
if (!vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data-type ");
unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
if (unrolling_factor != 1 && !loop_vinfo)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unrolling required in basic"
" block SLP");
if (unrolling_factor != 1 && !loop_vinfo)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unrolling required in basic"
" block SLP");
if (!vect_supported_load_permutation_p (new_instance, group_size,
load_permutation))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported load "
VEC_safe_push (slp_instance, heap, BB_VINFO_SLP_INSTANCES (bb_vinfo),
new_instance);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
vect_print_slp_tree (MSG_NOTE, node);
return true;
gimple first_element;
bool ok = false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===");
if (loop_vinfo)
if (bb_vinfo && !ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Failed to SLP the basic block.");
slp_instance instance;
int decided_to_slp = 0;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ===");
FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
- if (decided_to_slp && dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (decided_to_slp && dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"Decided to SLP %d instances. Unrolling factor %d",
decided_to_slp, unrolling_factor);
VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
slp_instance instance;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ===");
FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data-ref in basic "
"block.\n");
ddrs = BB_VINFO_DDRS (bb_vinfo);
if (!VEC_length (ddr_p, ddrs))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not enough data-refs in "
"basic block.\n");
if (!vect_analyze_data_ref_dependences (NULL, bb_vinfo, &max_vf)
|| min_vf > max_vf)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data dependence "
"in basic block.\n");
if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad data alignment in basic "
"block.\n");
if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data access in "
"basic block.\n");
trees. */
if (!vect_analyze_slp (NULL, bb_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: failed to find SLP opportunities "
"in basic block.\n");
if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported alignment in basic "
"block.\n");
if (!vect_slp_analyze_operations (bb_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad operation in basic block.\n");
if (flag_vect_cost_model
&& !vect_bb_vectorization_profitable_p (bb_vinfo))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization is not "
"profitable.\n");
return NULL;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Basic block will be vectorized using SLP\n");
gimple_stmt_iterator gsi;
unsigned int vector_sizes;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: too many instructions in "
"basic block.\n");
/* Try the next biggest vector size. */
current_vector_size = 1 << floor_log2 (vector_sizes);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"***** Re-trying analysis with "
"vector size %d\n", current_vector_size);
stmt_info_for_cost *si;
void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_update_slp_costs_according_to_vf ===");
the next vector as well. */
if (only_one_vec && *current_mask_element >= mask_nunits)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"permutation requires at least two vectors ");
/* We either need the first vector too or have already moved to the
next vector. In both cases, this permutation needs three
vectors. */
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"permutation requires at "
if (!can_vec_perm_p (mode, false, NULL))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vect permute for ");
if (!can_vec_perm_p (mode, false, mask))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE,vect_location,
"------>vectorizing SLP node starting from: ");
/* Schedule the tree of INSTANCE. */
is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
instance, vf);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vectorizing stmts using SLP.");
}
gcc_assert (bb_vinfo);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
gimple stmt = gsi_stmt (si);
stmt_vec_info stmt_info;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>SLPing statement: ");
}
}
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf (MSG_OPTIMIZED_LOCATIONS, "BASIC BLOCK VECTORIZED\n");
destroy_bb_vec_info (bb_vinfo);
bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
gimple pattern_stmt;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"mark relevant %d, live %d.", relevant, live_p);
pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"last stmt in pattern. don't mark"
" relevant/live.");
if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
&& STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"already marked relevant/live.");
return;
if (gimple_code (stmt) != GIMPLE_PHI)
if (gimple_vdef (stmt))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: stmt has vdefs.");
*relevant = vect_used_in_scope;
basic_block bb = gimple_bb (USE_STMT (use_p));
if (!flow_bb_inside_loop_p (loop, bb))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: used out of loop.");
if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported use in stmt.");
return false;
def_bb = gimple_bb (def_stmt);
if (!flow_bb_inside_loop_p (loop, def_bb))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.");
return true;
}
&& STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
&& bb->loop_father == def_bb->loop_father)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"reduc-stmt defining reduc-phi in the same nest.");
if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
... */
if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop def-stmt defining inner-loop stmt.");
stmt # use (d) */
else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"inner-loop def-stmt defining outer-loop stmt.");
enum vect_relevant relevant, tmp_relevant;
enum vect_def_type def_type;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_mark_stmts_to_be_vectorized ===");
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
phi = gsi_stmt (si);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
stmt = gsi_stmt (si);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
ssa_op_iter iter;
stmt = VEC_pop (gimple, worklist);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
/* fall through */
default:
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of reduction.");
VEC_free (gimple, heap, worklist);
&& tmp_relevant != vect_used_in_outer_by_reduction
&& tmp_relevant != vect_used_in_outer)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of nested cycle.");
if (tmp_relevant != vect_unused_in_scope
&& tmp_relevant != vect_used_by_reduction)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of double reduction.");
inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
stmt_info, 0, vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_simple_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
stmt_info, 0, vect_prologue);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_promotion_demotion_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: strided group_size = %d .",
group_size);
/* Costs of the stores. */
vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
vector_store, stmt_info, 0,
vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: aligned.");
break;
*inside_cost += record_stmt_cost (body_cost_vec, ncopies,
unaligned_store, stmt_info,
DR_MISALIGNMENT (dr), vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: unaligned supported by "
"hardware.");
{
*inside_cost = VECT_MAX_COST;
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vect_model_store_cost: unsupported access.");
break;
inside_cost += record_stmt_cost (body_cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: strided group_size = %d .",
group_size);
&inside_cost, &prologue_cost,
prologue_cost_vec, body_cost_vec, true);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
*inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
stmt_info, 0, vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: aligned.");
unaligned_load, stmt_info,
DR_MISALIGNMENT (dr), vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: unaligned supported by "
"hardware.");
*inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
stmt_info, 0, vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign");
}
case dr_explicit_realign_optimized:
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: unaligned software "
"pipelined.");
*inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
stmt_info, 0, vect_body);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign optimized");
{
*inside_cost = VECT_MAX_COST;
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vect_model_load_cost: unsupported access.");
break;
}
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"created new init_stmt: ");
bool is_simple_use;
tree vector_type;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_vec_def_for_operand: ");
is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
&def_stmt, &def, &dt);
gcc_assert (is_simple_use);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
int loc_printed = 0;
if (def)
*scalar_def = op;
/* Create 'vect_cst_ = {cst,cst,...,cst}' */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Create vector_cst. nunits = %d", nunits);
*scalar_def = def;
/* Create 'vec_inv = {inv,inv,..,inv}' */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.");
return vect_init_vector (stmt, def, vector_type, NULL);
set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
bb_vinfo));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
if (rhs_type
&& !types_compatible_p (rhs_type, TREE_TYPE (op)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"argument types differ.");
return false;
if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[i], &opvectype))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
else if (opvectype
&& opvectype != vectype_in)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"argument vector types differ.");
return false;
gcc_assert (vectype_in);
if (!vectype_in)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
if (fndecl == NULL_TREE)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"function is not vectorizable.");
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ===");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
return true;
/** Transform. **/
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform call.");
/* Handle def. */
&& (TYPE_PRECISION (rhs_type)
!= GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"type conversion to/from bit-precision unsupported.");
return false;
if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
gcc_assert (vectype_in);
if (!vectype_in)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
break;
/* FALLTHRU */
unsupported:
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"conversion not supported by target.");
return false;
if (!vec_stmt) /* transformation not required. */
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_conversion ===");
if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
}
/** Transform. **/
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform conversion. ncopies = %d.", ncopies);
if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
> TYPE_PRECISION (TREE_TYPE (op)))
&& TYPE_UNSIGNED (TREE_TYPE (op))))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"type conversion to/from bit-precision "
"unsupported.");
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_assignment ===");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
}
/** Transform. **/
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.");
/* Handle def. */
if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bit-precision shifts not supported.");
return false;
if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
gcc_assert (vectype);
if (!vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
return false;
if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt[1], &op1_vectype))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
}
else
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"operand mode requires invariant argument.");
return false;
if (!scalar_shift_arg)
{
optab = optab_for_tree_code (code, vectype, optab_vector);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/vector shift/rotate found.");
if (op1_vectype == NULL_TREE
|| TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unusable type for last operand in"
" vector/vector shift/rotate.");
if (optab
&& optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/scalar shift/rotate found.");
}
{
scalar_shift_arg = false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/vector shift/rotate found.");
&& TYPE_MODE (TREE_TYPE (vectype))
!= TYPE_MODE (TREE_TYPE (op1)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unusable type for last operand in"
" vector/vector shift/rotate.");
/* Supportable by target? */
if (!optab)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.");
return false;
icode = (int) optab_handler (optab, vec_mode);
if (icode == CODE_FOR_nothing)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.");
/* Check only during analysis. */
|| (vf < vect_min_worthwhile_factor (code)
&& !vec_stmt))
return false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "proceeding using word mode.");
}
&& vf < vect_min_worthwhile_factor (code)
&& !vec_stmt)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.");
return false;
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_shift ===");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
return true;
/** Transform. **/
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform binary/unary operation.");
optab_op2_mode = insn_data[icode].operand[2].mode;
if (!VECTOR_MODE_P (optab_op2_mode))
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"operand 1 using scalar mode.");
vec_oprnd1 = op1;
op_type = TREE_CODE_LENGTH (code);
if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"num. args = %d (not unary/binary/ternary op).",
op_type);
&& code != BIT_XOR_EXPR
&& code != BIT_AND_EXPR)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bit-precision arithmetic not supported.");
return false;
if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
gcc_assert (vectype);
if (!vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt[1]))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt[2]))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
optab = optab_for_tree_code (code, vectype, optab_default);
if (!optab)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.");
return false;
if (icode == CODE_FOR_nothing)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.");
/* Check only during analysis. */
if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
|| (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
return false;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "proceeding using word mode.");
}
&& !vec_stmt
&& vf < vect_min_worthwhile_factor (code))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.");
return false;
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_operation ===");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
/** Transform. **/
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform binary/unary operation.");
/* FORNOW. This restriction should be relaxed. */
if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.");
return false;
if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
size_zero_node) < 0)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step for store.");
return false;
if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
group_size = vec_num = 1;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform store. ncopies = %d", ncopies);
/* FORNOW. This restriction should be relaxed. */
if (nested_in_vect_loop && ncopies > 1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.");
return false;
(e.g. - data copies). */
if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Aligned load, but unsupported type.");
return false;
&def_stmt, &def, &gather_dt,
&gather_off_vectype))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"gather index use not simple.");
return false;
size_zero_node) < 0;
if (negative && ncopies > 1)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types with negative step.");
return false;
if (alignment_support_scheme != dr_aligned
&& alignment_support_scheme != dr_unaligned_supported)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step but alignment required.");
return false;
}
if (!perm_mask_for_reverse (vectype))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step and reversing not supported.");
return false;
return true;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform load. ncopies = %d", ncopies);
/* FORNOW: not yet supported. */
if (STMT_VINFO_LIVE_P (stmt_info))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"value used after loop.");
return false;
gimple pattern_stmt;
gimple_seq pattern_def_seq;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
if (gimple_has_volatile_ops (stmt))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: stmt has volatile operands");
/* Analyze PATTERN_STMT instead of the original stmt. */
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (pattern_stmt);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
}
else
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.");
return true;
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
{
/* Analyze PATTERN_STMT too. */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
{
/* Analyze def stmt of STMT if it's a pattern stmt. */
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def statement: ");
gcc_assert (PURE_SLP_STMT (stmt_info));
scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not SLPed: unsupported data-type ");
return false;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant stmt not ");
if (!ok)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: live stmt not ");
default:
if (!STMT_VINFO_LIVE_P (stmt_info))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"stmt not supported.");
gcc_unreachable ();
tree scalar_dest;
gimple exit_phi;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Record the vdef for outer-loop vectorization.");
return NULL_TREE;
vectype = build_vector_type (scalar_type, nunits);
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype with %d units of type ", nunits);
if (!vectype)
return NULL_TREE;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
if (!VECTOR_MODE_P (TYPE_MODE (vectype))
&& !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"mode not supported by target.");
return NULL_TREE;
*def_stmt = NULL;
*def = NULL_TREE;
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_is_simple_use: operand ");
if (TREE_CODE (operand) == PAREN_EXPR)
{
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "non-associatable copy.");
operand = TREE_OPERAND (operand, 0);
}
if (TREE_CODE (operand) != SSA_NAME)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not ssa-name.");
return false;
*def_stmt = SSA_NAME_DEF_STMT (operand);
if (*def_stmt == NULL)
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no def_stmt.");
return false;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
&& *dt == vect_double_reduction_def
&& gimple_code (stmt) != GIMPLE_PHI))
{
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported pattern.");
return false;
}
- if (dump_kind_p (MSG_NOTE))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "type of def: %d.", *dt);
switch (gimple_code (*def_stmt))
break;
/* FALLTHRU */
default:
- if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported defining stmt: ");
return false;
loop_vec_info loop_vinfo;
vect_location = find_loop_location (loop);
if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOC
- && dump_kind_p (MSG_ALL))
+ && dump_enabled_p ())
dump_printf (MSG_ALL, "\nAnalyzing loop at %s:%d\n",
LOC_FILE (vect_location), LOC_LINE (vect_location));
continue;
if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOC
- && dump_kind_p (MSG_ALL))
+ && dump_enabled_p ())
dump_printf (MSG_ALL, "\n\nVectorizing loop at %s:%d\n",
LOC_FILE (vect_location), LOC_LINE (vect_location));
vect_transform_loop (loop_vinfo);
vect_location = UNKNOWN_LOC;
statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
- if (dump_kind_p (MSG_ALL)
- || (num_vectorized_loops > 0 && dump_kind_p (MSG_ALL)))
+ if (dump_enabled_p ()
+ || (num_vectorized_loops > 0 && dump_enabled_p ()))
dump_printf_loc (MSG_ALL, vect_location,
"vectorized %u loops in function.\n",
num_vectorized_loops);
if (vect_slp_analyze_bb (bb))
{
vect_slp_transform_bb (bb);
- if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
+ if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"basic block vectorized using SLP\n");
}