2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
44 loop_vect() loop_aware_slp() slp_vect()
47 tree-vect-loop.c tree-vect-slp.c
52 tree-vect-stmts.c tree-vect-data-refs.c
59 #include "coretypes.h"
64 #include "tree-pass.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
81 #include "gimple-pretty-print.h"
82 #include "opt-problem.h"
83 #include "internal-fn.h"
86 /* Loop or bb location, with hotness information. */
87 dump_user_location_t vect_location
;
89 /* auto_purge_vect_location's dtor: reset the vect_location
90 global, to avoid stale location_t values that could reference
93 auto_purge_vect_location::~auto_purge_vect_location ()
95 vect_location
= dump_user_location_t ();
98 /* Dump a cost entry according to args to F. */
101 dump_stmt_cost (FILE *f
, void *data
, int count
, enum vect_cost_for_stmt kind
,
102 stmt_vec_info stmt_info
, tree
, int misalign
, unsigned cost
,
103 enum vect_cost_model_location where
)
105 fprintf (f
, "%p ", data
);
108 print_gimple_expr (f
, STMT_VINFO_STMT (stmt_info
), 0, TDF_SLIM
);
112 fprintf (f
, "<unknown> ");
113 fprintf (f
, "%d times ", count
);
114 const char *ks
= "unknown";
132 case vector_gather_load
:
133 ks
= "vector_gather_load";
136 ks
= "unaligned_load";
138 case unaligned_store
:
139 ks
= "unaligned_store";
144 case vector_scatter_store
:
145 ks
= "vector_scatter_store";
148 ks
= "vec_to_scalar";
151 ks
= "scalar_to_vec";
153 case cond_branch_not_taken
:
154 ks
= "cond_branch_not_taken";
156 case cond_branch_taken
:
157 ks
= "cond_branch_taken";
162 case vec_promote_demote
:
163 ks
= "vec_promote_demote";
166 ks
= "vec_construct";
169 fprintf (f
, "%s ", ks
);
170 if (kind
== unaligned_load
|| kind
== unaligned_store
)
171 fprintf (f
, "(misalign %d) ", misalign
);
172 fprintf (f
, "costs %u ", cost
);
173 const char *ws
= "unknown";
186 fprintf (f
, "in %s\n", ws
);
189 /* For mapping simduid to vectorization factor. */
191 class simduid_to_vf
: public free_ptr_hash
<simduid_to_vf
>
194 unsigned int simduid
;
197 /* hash_table support. */
198 static inline hashval_t
hash (const simduid_to_vf
*);
199 static inline int equal (const simduid_to_vf
*, const simduid_to_vf
*);
203 simduid_to_vf::hash (const simduid_to_vf
*p
)
209 simduid_to_vf::equal (const simduid_to_vf
*p1
, const simduid_to_vf
*p2
)
211 return p1
->simduid
== p2
->simduid
;
214 /* This hash maps the OMP simd array to the corresponding simduid used
215 to index into it. Like thus,
217 _7 = GOMP_SIMD_LANE (simduid.0)
223 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
226 struct simd_array_to_simduid
: free_ptr_hash
<simd_array_to_simduid
>
229 unsigned int simduid
;
231 /* hash_table support. */
232 static inline hashval_t
hash (const simd_array_to_simduid
*);
233 static inline int equal (const simd_array_to_simduid
*,
234 const simd_array_to_simduid
*);
238 simd_array_to_simduid::hash (const simd_array_to_simduid
*p
)
240 return DECL_UID (p
->decl
);
244 simd_array_to_simduid::equal (const simd_array_to_simduid
*p1
,
245 const simd_array_to_simduid
*p2
)
247 return p1
->decl
== p2
->decl
;
250 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
251 into their corresponding constants and remove
252 IFN_GOMP_SIMD_ORDERED_{START,END}. */
255 adjust_simduid_builtins (hash_table
<simduid_to_vf
> *htab
)
259 FOR_EACH_BB_FN (bb
, cfun
)
261 gimple_stmt_iterator i
;
263 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
266 enum internal_fn ifn
;
267 gimple
*stmt
= gsi_stmt (i
);
269 if (!is_gimple_call (stmt
)
270 || !gimple_call_internal_p (stmt
))
275 ifn
= gimple_call_internal_fn (stmt
);
278 case IFN_GOMP_SIMD_LANE
:
279 case IFN_GOMP_SIMD_VF
:
280 case IFN_GOMP_SIMD_LAST_LANE
:
282 case IFN_GOMP_SIMD_ORDERED_START
:
283 case IFN_GOMP_SIMD_ORDERED_END
:
284 if (integer_onep (gimple_call_arg (stmt
, 0)))
286 enum built_in_function bcode
287 = (ifn
== IFN_GOMP_SIMD_ORDERED_START
288 ? BUILT_IN_GOMP_ORDERED_START
289 : BUILT_IN_GOMP_ORDERED_END
);
291 = gimple_build_call (builtin_decl_explicit (bcode
), 0);
292 gimple_move_vops (g
, stmt
);
293 gsi_replace (&i
, g
, true);
296 gsi_remove (&i
, true);
297 unlink_stmt_vdef (stmt
);
303 tree arg
= gimple_call_arg (stmt
, 0);
304 gcc_assert (arg
!= NULL_TREE
);
305 gcc_assert (TREE_CODE (arg
) == SSA_NAME
);
306 simduid_to_vf
*p
= NULL
, data
;
307 data
.simduid
= DECL_UID (SSA_NAME_VAR (arg
));
308 /* Need to nullify loop safelen field since it's value is not
309 valid after transformation. */
310 if (bb
->loop_father
&& bb
->loop_father
->safelen
> 0)
311 bb
->loop_father
->safelen
= 0;
314 p
= htab
->find (&data
);
320 case IFN_GOMP_SIMD_VF
:
321 t
= build_int_cst (unsigned_type_node
, vf
);
323 case IFN_GOMP_SIMD_LANE
:
324 t
= build_int_cst (unsigned_type_node
, 0);
326 case IFN_GOMP_SIMD_LAST_LANE
:
327 t
= gimple_call_arg (stmt
, 1);
332 tree lhs
= gimple_call_lhs (stmt
);
334 replace_uses_by (lhs
, t
);
336 gsi_remove (&i
, true);
341 /* Helper structure for note_simd_array_uses. */
343 struct note_simd_array_uses_struct
345 hash_table
<simd_array_to_simduid
> **htab
;
346 unsigned int simduid
;
349 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
352 note_simd_array_uses_cb (tree
*tp
, int *walk_subtrees
, void *data
)
354 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
355 struct note_simd_array_uses_struct
*ns
356 = (struct note_simd_array_uses_struct
*) wi
->info
;
361 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp
))
362 && DECL_CONTEXT (*tp
) == current_function_decl
)
364 simd_array_to_simduid data
;
366 *ns
->htab
= new hash_table
<simd_array_to_simduid
> (15);
368 data
.simduid
= ns
->simduid
;
369 simd_array_to_simduid
**slot
= (*ns
->htab
)->find_slot (&data
, INSERT
);
372 simd_array_to_simduid
*p
= XNEW (simd_array_to_simduid
);
376 else if ((*slot
)->simduid
!= ns
->simduid
)
377 (*slot
)->simduid
= -1U;
383 /* Find "omp simd array" temporaries and map them to corresponding
387 note_simd_array_uses (hash_table
<simd_array_to_simduid
> **htab
)
390 gimple_stmt_iterator gsi
;
391 struct walk_stmt_info wi
;
392 struct note_simd_array_uses_struct ns
;
394 memset (&wi
, 0, sizeof (wi
));
398 FOR_EACH_BB_FN (bb
, cfun
)
399 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
401 gimple
*stmt
= gsi_stmt (gsi
);
402 if (!is_gimple_call (stmt
) || !gimple_call_internal_p (stmt
))
404 switch (gimple_call_internal_fn (stmt
))
406 case IFN_GOMP_SIMD_LANE
:
407 case IFN_GOMP_SIMD_VF
:
408 case IFN_GOMP_SIMD_LAST_LANE
:
413 tree lhs
= gimple_call_lhs (stmt
);
414 if (lhs
== NULL_TREE
)
416 imm_use_iterator use_iter
;
418 ns
.simduid
= DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt
, 0)));
419 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, lhs
)
420 if (!is_gimple_debug (use_stmt
))
421 walk_gimple_op (use_stmt
, note_simd_array_uses_cb
, &wi
);
425 /* Shrink arrays with "omp simd array" attribute to the corresponding
426 vectorization factor. */
430 (hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
,
431 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
)
433 for (hash_table
<simd_array_to_simduid
>::iterator iter
434 = simd_array_to_simduid_htab
->begin ();
435 iter
!= simd_array_to_simduid_htab
->end (); ++iter
)
436 if ((*iter
)->simduid
!= -1U)
438 tree decl
= (*iter
)->decl
;
440 if (simduid_to_vf_htab
)
442 simduid_to_vf
*p
= NULL
, data
;
443 data
.simduid
= (*iter
)->simduid
;
444 p
= simduid_to_vf_htab
->find (&data
);
449 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl
)), vf
);
450 TREE_TYPE (decl
) = atype
;
451 relayout_decl (decl
);
454 delete simd_array_to_simduid_htab
;
457 /* Initialize the vec_info with kind KIND_IN and target cost data
458 TARGET_COST_DATA_IN. */
460 vec_info::vec_info (vec_info::vec_kind kind_in
, void *target_cost_data_in
,
461 vec_info_shared
*shared_
)
464 stmt_vec_info_ro (false),
465 target_cost_data (target_cost_data_in
)
467 stmt_vec_infos
.create (50);
470 vec_info::~vec_info ()
472 slp_instance instance
;
475 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
476 vect_free_slp_instance (instance
, true);
478 destroy_cost_data (target_cost_data
);
479 free_stmt_vec_infos ();
482 vec_info_shared::vec_info_shared ()
484 datarefs_copy (vNULL
),
489 vec_info_shared::~vec_info_shared ()
491 free_data_refs (datarefs
);
492 free_dependence_relations (ddrs
);
493 datarefs_copy
.release ();
497 vec_info_shared::save_datarefs ()
501 datarefs_copy
.reserve_exact (datarefs
.length ());
502 for (unsigned i
= 0; i
< datarefs
.length (); ++i
)
503 datarefs_copy
.quick_push (*datarefs
[i
]);
507 vec_info_shared::check_datarefs ()
511 gcc_assert (datarefs
.length () == datarefs_copy
.length ());
512 for (unsigned i
= 0; i
< datarefs
.length (); ++i
)
513 if (memcmp (&datarefs_copy
[i
], datarefs
[i
], sizeof (data_reference
)) != 0)
517 /* Record that STMT belongs to the vectorizable region. Create and return
518 an associated stmt_vec_info. */
521 vec_info::add_stmt (gimple
*stmt
)
523 stmt_vec_info res
= new_stmt_vec_info (stmt
);
524 set_vinfo_for_stmt (stmt
, res
);
528 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
529 return null. It is safe to call this function on any statement, even if
530 it might not be part of the vectorizable region. */
533 vec_info::lookup_stmt (gimple
*stmt
)
535 unsigned int uid
= gimple_uid (stmt
);
536 if (uid
> 0 && uid
- 1 < stmt_vec_infos
.length ())
538 stmt_vec_info res
= stmt_vec_infos
[uid
- 1];
539 if (res
&& res
->stmt
== stmt
)
545 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
546 return that stmt_vec_info, otherwise return null. It is safe to call
547 this on arbitrary operands. */
550 vec_info::lookup_def (tree name
)
552 if (TREE_CODE (name
) == SSA_NAME
553 && !SSA_NAME_IS_DEFAULT_DEF (name
))
554 return lookup_stmt (SSA_NAME_DEF_STMT (name
));
558 /* See whether there is a single non-debug statement that uses LHS and
559 whether that statement has an associated stmt_vec_info. Return the
560 stmt_vec_info if so, otherwise return null. */
563 vec_info::lookup_single_use (tree lhs
)
567 if (single_imm_use (lhs
, &dummy
, &use_stmt
))
568 return lookup_stmt (use_stmt
);
572 /* Return vectorization information about DR. */
575 vec_info::lookup_dr (data_reference
*dr
)
577 stmt_vec_info stmt_info
= lookup_stmt (DR_STMT (dr
));
578 /* DR_STMT should never refer to a stmt in a pattern replacement. */
579 gcc_checking_assert (!is_pattern_stmt_p (stmt_info
));
580 return STMT_VINFO_DR_INFO (stmt_info
->dr_aux
.stmt
);
583 /* Record that NEW_STMT_INFO now implements the same data reference
587 vec_info::move_dr (stmt_vec_info new_stmt_info
, stmt_vec_info old_stmt_info
)
589 gcc_assert (!is_pattern_stmt_p (old_stmt_info
));
590 STMT_VINFO_DR_INFO (old_stmt_info
)->stmt
= new_stmt_info
;
591 new_stmt_info
->dr_aux
= old_stmt_info
->dr_aux
;
592 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info
)
593 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info
);
594 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info
)
595 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info
);
598 /* Permanently remove the statement described by STMT_INFO from the
602 vec_info::remove_stmt (stmt_vec_info stmt_info
)
604 gcc_assert (!stmt_info
->pattern_stmt_p
);
605 set_vinfo_for_stmt (stmt_info
->stmt
, NULL
);
606 gimple_stmt_iterator si
= gsi_for_stmt (stmt_info
->stmt
);
607 unlink_stmt_vdef (stmt_info
->stmt
);
608 gsi_remove (&si
, true);
609 release_defs (stmt_info
->stmt
);
610 free_stmt_vec_info (stmt_info
);
613 /* Replace the statement at GSI by NEW_STMT, both the vectorization
614 information and the function itself. STMT_INFO describes the statement
618 vec_info::replace_stmt (gimple_stmt_iterator
*gsi
, stmt_vec_info stmt_info
,
621 gimple
*old_stmt
= stmt_info
->stmt
;
622 gcc_assert (!stmt_info
->pattern_stmt_p
&& old_stmt
== gsi_stmt (*gsi
));
623 gimple_set_uid (new_stmt
, gimple_uid (old_stmt
));
624 stmt_info
->stmt
= new_stmt
;
625 gsi_replace (gsi
, new_stmt
, true);
628 /* Create and initialize a new stmt_vec_info struct for STMT. */
631 vec_info::new_stmt_vec_info (gimple
*stmt
)
633 stmt_vec_info res
= XCNEW (class _stmt_vec_info
);
636 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
637 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
638 STMT_VINFO_VECTORIZABLE (res
) = true;
639 STMT_VINFO_REDUC_TYPE (res
) = TREE_CODE_REDUCTION
;
640 STMT_VINFO_REDUC_CODE (res
) = ERROR_MARK
;
641 STMT_VINFO_REDUC_FN (res
) = IFN_LAST
;
642 STMT_VINFO_REDUC_IDX (res
) = -1;
643 STMT_VINFO_SLP_VECT_ONLY (res
) = false;
644 STMT_VINFO_VEC_STMTS (res
) = vNULL
;
646 if (gimple_code (stmt
) == GIMPLE_PHI
647 && is_loop_header_bb_p (gimple_bb (stmt
)))
648 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
650 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
652 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
653 STMT_SLP_TYPE (res
) = loop_vect
;
655 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
656 res
->dr_aux
.misalignment
= DR_MISALIGNMENT_UNINITIALIZED
;
661 /* Associate STMT with INFO. */
664 vec_info::set_vinfo_for_stmt (gimple
*stmt
, stmt_vec_info info
)
666 unsigned int uid
= gimple_uid (stmt
);
669 gcc_assert (!stmt_vec_info_ro
);
670 gcc_checking_assert (info
);
671 uid
= stmt_vec_infos
.length () + 1;
672 gimple_set_uid (stmt
, uid
);
673 stmt_vec_infos
.safe_push (info
);
677 gcc_checking_assert (info
== NULL
);
678 stmt_vec_infos
[uid
- 1] = info
;
682 /* Free the contents of stmt_vec_infos. */
685 vec_info::free_stmt_vec_infos (void)
689 FOR_EACH_VEC_ELT (stmt_vec_infos
, i
, info
)
691 free_stmt_vec_info (info
);
692 stmt_vec_infos
.release ();
695 /* Free STMT_INFO. */
698 vec_info::free_stmt_vec_info (stmt_vec_info stmt_info
)
700 if (stmt_info
->pattern_stmt_p
)
702 gimple_set_bb (stmt_info
->stmt
, NULL
);
703 tree lhs
= gimple_get_lhs (stmt_info
->stmt
);
704 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
705 release_ssa_name (lhs
);
708 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
709 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
710 STMT_VINFO_VEC_STMTS (stmt_info
).release ();
714 /* A helper function to free scev and LOOP niter information, as well as
715 clear loop constraint LOOP_C_FINITE. */
718 vect_free_loop_info_assumptions (class loop
*loop
)
721 /* We need to explicitly reset upper bound information since they are
722 used even after free_numbers_of_iterations_estimates. */
723 loop
->any_upper_bound
= false;
724 loop
->any_likely_upper_bound
= false;
725 free_numbers_of_iterations_estimates (loop
);
726 loop_constraint_clear (loop
, LOOP_C_FINITE
);
729 /* If LOOP has been versioned during ifcvt, return the internal call
733 vect_loop_vectorized_call (class loop
*loop
, gcond
**cond
)
735 basic_block bb
= loop_preheader_edge (loop
)->src
;
742 if (!single_pred_p (bb
))
744 bb
= single_pred (bb
);
747 if (g
&& gimple_code (g
) == GIMPLE_COND
)
750 *cond
= as_a
<gcond
*> (g
);
751 gimple_stmt_iterator gsi
= gsi_for_stmt (g
);
753 if (!gsi_end_p (gsi
))
756 if (gimple_call_internal_p (g
, IFN_LOOP_VECTORIZED
)
757 && (tree_to_shwi (gimple_call_arg (g
, 0)) == loop
->num
758 || tree_to_shwi (gimple_call_arg (g
, 1)) == loop
->num
))
765 /* If LOOP has been versioned during loop distribution, return the gurading
769 vect_loop_dist_alias_call (class loop
*loop
)
773 class loop
*outer
, *orig
;
774 gimple_stmt_iterator gsi
;
777 if (loop
->orig_loop_num
== 0)
780 orig
= get_loop (cfun
, loop
->orig_loop_num
);
783 /* The original loop is somehow destroyed. Clear the information. */
784 loop
->orig_loop_num
= 0;
789 bb
= nearest_common_dominator (CDI_DOMINATORS
, loop
->header
, orig
->header
);
791 bb
= loop_preheader_edge (loop
)->src
;
793 outer
= bb
->loop_father
;
794 entry
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
796 /* Look upward in dominance tree. */
797 for (; bb
!= entry
&& flow_bb_inside_loop_p (outer
, bb
);
798 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
801 if (g
== NULL
|| gimple_code (g
) != GIMPLE_COND
)
804 gsi
= gsi_for_stmt (g
);
810 /* The guarding internal function call must have the same distribution
812 if (gimple_call_internal_p (g
, IFN_LOOP_DIST_ALIAS
)
813 && (tree_to_shwi (gimple_call_arg (g
, 0)) == loop
->orig_loop_num
))
819 /* Set the uids of all the statements in basic blocks inside loop
820 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
821 call guarding the loop which has been if converted. */
823 set_uid_loop_bbs (loop_vec_info loop_vinfo
, gimple
*loop_vectorized_call
)
825 tree arg
= gimple_call_arg (loop_vectorized_call
, 1);
828 class loop
*scalar_loop
= get_loop (cfun
, tree_to_shwi (arg
));
830 LOOP_VINFO_SCALAR_LOOP (loop_vinfo
) = scalar_loop
;
831 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop
)
832 == loop_vectorized_call
);
833 /* If we are going to vectorize outer loop, prevent vectorization
834 of the inner loop in the scalar loop - either the scalar loop is
835 thrown away, so it is a wasted work, or is used only for
837 if (scalar_loop
->inner
)
839 gimple
*g
= vect_loop_vectorized_call (scalar_loop
->inner
);
842 arg
= gimple_call_arg (g
, 0);
843 get_loop (cfun
, tree_to_shwi (arg
))->dont_vectorize
= true;
844 fold_loop_internal_call (g
, boolean_false_node
);
847 bbs
= get_loop_body (scalar_loop
);
848 for (i
= 0; i
< scalar_loop
->num_nodes
; i
++)
850 basic_block bb
= bbs
[i
];
851 gimple_stmt_iterator gsi
;
852 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
854 gimple
*phi
= gsi_stmt (gsi
);
855 gimple_set_uid (phi
, 0);
857 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
859 gimple
*stmt
= gsi_stmt (gsi
);
860 gimple_set_uid (stmt
, 0);
866 /* Try to vectorize LOOP. */
869 try_vectorize_loop_1 (hash_table
<simduid_to_vf
> *&simduid_to_vf_htab
,
870 unsigned *num_vectorized_loops
, loop_p loop
,
871 gimple
*loop_vectorized_call
,
872 gimple
*loop_dist_alias_call
)
875 vec_info_shared shared
;
876 auto_purge_vect_location sentinel
;
877 vect_location
= find_loop_location (loop
);
879 if (LOCATION_LOCUS (vect_location
.get_location_t ()) != UNKNOWN_LOCATION
880 && dump_enabled_p ())
881 dump_printf (MSG_NOTE
| MSG_PRIORITY_INTERNALS
,
882 "\nAnalyzing loop at %s:%d\n",
883 LOCATION_FILE (vect_location
.get_location_t ()),
884 LOCATION_LINE (vect_location
.get_location_t ()));
886 opt_loop_vec_info loop_vinfo
= opt_loop_vec_info::success (NULL
);
887 /* In the case of epilogue vectorization the loop already has its
888 loop_vec_info set, we do not require to analyze the loop in this case. */
889 if (loop_vec_info vinfo
= loop_vec_info_for_loop (loop
))
890 loop_vinfo
= opt_loop_vec_info::success (vinfo
);
893 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
894 loop_vinfo
= vect_analyze_loop (loop
, &shared
);
895 loop
->aux
= loop_vinfo
;
899 if (dump_enabled_p ())
900 if (opt_problem
*problem
= loop_vinfo
.get_problem ())
902 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
903 "couldn't vectorize loop\n");
904 problem
->emit_and_clear ();
907 if (!loop_vinfo
|| !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
))
909 /* Free existing information if loop is analyzed with some
911 if (loop_constraint_set_p (loop
, LOOP_C_FINITE
))
912 vect_free_loop_info_assumptions (loop
);
914 /* If we applied if-conversion then try to vectorize the
915 BB of innermost loops.
916 ??? Ideally BB vectorization would learn to vectorize
917 control flow by applying if-conversion on-the-fly, the
918 following retains the if-converted loop body even when
919 only non-if-converted parts took part in BB vectorization. */
920 if (flag_tree_slp_vectorize
!= 0
921 && loop_vectorized_call
924 basic_block bb
= loop
->header
;
925 bool require_loop_vectorize
= false;
926 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
);
927 !gsi_end_p (gsi
); gsi_next (&gsi
))
929 gimple
*stmt
= gsi_stmt (gsi
);
930 gcall
*call
= dyn_cast
<gcall
*> (stmt
);
931 if (call
&& gimple_call_internal_p (call
))
933 internal_fn ifn
= gimple_call_internal_fn (call
);
934 if (ifn
== IFN_MASK_LOAD
|| ifn
== IFN_MASK_STORE
935 /* Don't keep the if-converted parts when the ifn with
936 specifc type is not supported by the backend. */
937 || (direct_internal_fn_p (ifn
)
938 && !direct_internal_fn_supported_p
939 (call
, OPTIMIZE_FOR_SPEED
)))
941 require_loop_vectorize
= true;
945 gimple_set_uid (stmt
, -1);
946 gimple_set_visited (stmt
, false);
948 if (!require_loop_vectorize
&& vect_slp_bb (bb
))
950 if (dump_enabled_p ())
951 dump_printf_loc (MSG_NOTE
, vect_location
,
952 "basic block vectorized\n");
953 fold_loop_internal_call (loop_vectorized_call
,
955 loop_vectorized_call
= NULL
;
956 ret
|= TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
959 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
960 loop, don't vectorize its inner loop; we'll attempt to
961 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
963 if (loop_vectorized_call
&& loop
->inner
)
964 loop
->inner
->dont_vectorize
= true;
968 if (!dbg_cnt (vect_loop
))
970 /* Free existing information if loop is analyzed with some
972 if (loop_constraint_set_p (loop
, LOOP_C_FINITE
))
973 vect_free_loop_info_assumptions (loop
);
977 if (loop_vectorized_call
)
978 set_uid_loop_bbs (loop_vinfo
, loop_vectorized_call
);
980 unsigned HOST_WIDE_INT bytes
;
981 if (dump_enabled_p ())
983 if (GET_MODE_SIZE (loop_vinfo
->vector_mode
).is_constant (&bytes
))
984 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
985 "loop vectorized using %wu byte vectors\n", bytes
);
987 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
988 "loop vectorized using variable length vectors\n");
991 loop_p new_loop
= vect_transform_loop (loop_vinfo
,
992 loop_vectorized_call
);
993 (*num_vectorized_loops
)++;
994 /* Now that the loop has been vectorized, allow it to be unrolled
996 loop
->force_vectorize
= false;
1000 simduid_to_vf
*simduid_to_vf_data
= XNEW (simduid_to_vf
);
1001 if (!simduid_to_vf_htab
)
1002 simduid_to_vf_htab
= new hash_table
<simduid_to_vf
> (15);
1003 simduid_to_vf_data
->simduid
= DECL_UID (loop
->simduid
);
1004 simduid_to_vf_data
->vf
= loop_vinfo
->vectorization_factor
;
1005 *simduid_to_vf_htab
->find_slot (simduid_to_vf_data
, INSERT
)
1006 = simduid_to_vf_data
;
1009 if (loop_vectorized_call
)
1011 fold_loop_internal_call (loop_vectorized_call
, boolean_true_node
);
1012 loop_vectorized_call
= NULL
;
1013 ret
|= TODO_cleanup_cfg
;
1015 if (loop_dist_alias_call
)
1017 tree value
= gimple_call_arg (loop_dist_alias_call
, 1);
1018 fold_loop_internal_call (loop_dist_alias_call
, value
);
1019 loop_dist_alias_call
= NULL
;
1020 ret
|= TODO_cleanup_cfg
;
1023 /* Epilogue of vectorized loop must be vectorized too. */
1026 /* Don't include vectorized epilogues in the "vectorized loops" count.
1028 unsigned dont_count
= *num_vectorized_loops
;
1029 ret
|= try_vectorize_loop_1 (simduid_to_vf_htab
, &dont_count
,
1030 new_loop
, NULL
, NULL
);
1036 /* Try to vectorize LOOP. */
1039 try_vectorize_loop (hash_table
<simduid_to_vf
> *&simduid_to_vf_htab
,
1040 unsigned *num_vectorized_loops
, loop_p loop
)
1042 if (!((flag_tree_loop_vectorize
1043 && optimize_loop_nest_for_speed_p (loop
))
1044 || loop
->force_vectorize
))
1047 return try_vectorize_loop_1 (simduid_to_vf_htab
, num_vectorized_loops
, loop
,
1048 vect_loop_vectorized_call (loop
),
1049 vect_loop_dist_alias_call (loop
));
1053 /* Function vectorize_loops.
1055 Entry point to loop vectorization phase. */
1058 vectorize_loops (void)
1061 unsigned int num_vectorized_loops
= 0;
1062 unsigned int vect_loops_num
;
1064 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
= NULL
;
1065 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
1066 bool any_ifcvt_loops
= false;
1069 vect_loops_num
= number_of_loops (cfun
);
1071 /* Bail out if there are no loops. */
1072 if (vect_loops_num
<= 1)
1075 if (cfun
->has_simduid_loops
)
1076 note_simd_array_uses (&simd_array_to_simduid_htab
);
1078 /* ----------- Analyze loops. ----------- */
1080 /* If some loop was duplicated, it gets bigger number
1081 than all previously defined loops. This fact allows us to run
1082 only over initial loops skipping newly generated ones. */
1083 FOR_EACH_LOOP (loop
, 0)
1084 if (loop
->dont_vectorize
)
1086 any_ifcvt_loops
= true;
1087 /* If-conversion sometimes versions both the outer loop
1088 (for the case when outer loop vectorization might be
1089 desirable) as well as the inner loop in the scalar version
1090 of the loop. So we have:
1091 if (LOOP_VECTORIZED (1, 3))
1097 loop3 (copy of loop1)
1098 if (LOOP_VECTORIZED (4, 5))
1099 loop4 (copy of loop2)
1101 loop5 (copy of loop4)
1102 If FOR_EACH_LOOP gives us loop3 first (which has
1103 dont_vectorize set), make sure to process loop1 before loop4;
1104 so that we can prevent vectorization of loop4 if loop1
1105 is successfully vectorized. */
1108 gimple
*loop_vectorized_call
1109 = vect_loop_vectorized_call (loop
);
1110 if (loop_vectorized_call
1111 && vect_loop_vectorized_call (loop
->inner
))
1113 tree arg
= gimple_call_arg (loop_vectorized_call
, 0);
1114 class loop
*vector_loop
1115 = get_loop (cfun
, tree_to_shwi (arg
));
1116 if (vector_loop
&& vector_loop
!= loop
)
1118 /* Make sure we don't vectorize it twice. */
1119 vector_loop
->dont_vectorize
= true;
1120 ret
|= try_vectorize_loop (simduid_to_vf_htab
,
1121 &num_vectorized_loops
,
1128 ret
|= try_vectorize_loop (simduid_to_vf_htab
, &num_vectorized_loops
,
1131 vect_location
= dump_user_location_t ();
1133 statistics_counter_event (cfun
, "Vectorized loops", num_vectorized_loops
);
1134 if (dump_enabled_p ()
1135 || (num_vectorized_loops
> 0 && dump_enabled_p ()))
1136 dump_printf_loc (MSG_NOTE
, vect_location
,
1137 "vectorized %u loops in function.\n",
1138 num_vectorized_loops
);
1140 /* ----------- Finalize. ----------- */
1142 if (any_ifcvt_loops
)
1143 for (i
= 1; i
< number_of_loops (cfun
); i
++)
1145 loop
= get_loop (cfun
, i
);
1146 if (loop
&& loop
->dont_vectorize
)
1148 gimple
*g
= vect_loop_vectorized_call (loop
);
1151 fold_loop_internal_call (g
, boolean_false_node
);
1152 ret
|= TODO_cleanup_cfg
;
1156 g
= vect_loop_dist_alias_call (loop
);
1160 fold_loop_internal_call (g
, boolean_false_node
);
1161 ret
|= TODO_cleanup_cfg
;
1166 for (i
= 1; i
< number_of_loops (cfun
); i
++)
1168 loop_vec_info loop_vinfo
;
1169 bool has_mask_store
;
1171 loop
= get_loop (cfun
, i
);
1172 if (!loop
|| !loop
->aux
)
1174 loop_vinfo
= (loop_vec_info
) loop
->aux
;
1175 has_mask_store
= LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
);
1178 && targetm
.vectorize
.empty_mask_is_expensive (IFN_MASK_STORE
))
1179 optimize_mask_stores (loop
);
1183 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1184 if (cfun
->has_simduid_loops
)
1185 adjust_simduid_builtins (simduid_to_vf_htab
);
1187 /* Shrink any "omp array simd" temporary arrays to the
1188 actual vectorization factors. */
1189 if (simd_array_to_simduid_htab
)
1190 shrink_simd_arrays (simd_array_to_simduid_htab
, simduid_to_vf_htab
);
1191 delete simduid_to_vf_htab
;
1192 cfun
->has_simduid_loops
= false;
1194 if (num_vectorized_loops
> 0)
1196 /* If we vectorized any loop only virtual SSA form needs to be updated.
1197 ??? Also while we try hard to update loop-closed SSA form we fail
1198 to properly do this in some corner-cases (see PR56286). */
1199 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa_only_virtuals
);
1200 return TODO_cleanup_cfg
;
1207 /* Entry point to the simduid cleanup pass. */
1211 const pass_data pass_data_simduid_cleanup
=
1213 GIMPLE_PASS
, /* type */
1214 "simduid", /* name */
1215 OPTGROUP_NONE
, /* optinfo_flags */
1216 TV_NONE
, /* tv_id */
1217 ( PROP_ssa
| PROP_cfg
), /* properties_required */
1218 0, /* properties_provided */
1219 0, /* properties_destroyed */
1220 0, /* todo_flags_start */
1221 0, /* todo_flags_finish */
1224 class pass_simduid_cleanup
: public gimple_opt_pass
1227 pass_simduid_cleanup (gcc::context
*ctxt
)
1228 : gimple_opt_pass (pass_data_simduid_cleanup
, ctxt
)
1231 /* opt_pass methods: */
1232 opt_pass
* clone () { return new pass_simduid_cleanup (m_ctxt
); }
1233 virtual bool gate (function
*fun
) { return fun
->has_simduid_loops
; }
1234 virtual unsigned int execute (function
*);
1236 }; // class pass_simduid_cleanup
1239 pass_simduid_cleanup::execute (function
*fun
)
1241 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
1243 note_simd_array_uses (&simd_array_to_simduid_htab
);
1245 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1246 adjust_simduid_builtins (NULL
);
1248 /* Shrink any "omp array simd" temporary arrays to the
1249 actual vectorization factors. */
1250 if (simd_array_to_simduid_htab
)
1251 shrink_simd_arrays (simd_array_to_simduid_htab
, NULL
);
1252 fun
->has_simduid_loops
= false;
1259 make_pass_simduid_cleanup (gcc::context
*ctxt
)
1261 return new pass_simduid_cleanup (ctxt
);
1265 /* Entry point to basic block SLP phase. */
1269 const pass_data pass_data_slp_vectorize
=
1271 GIMPLE_PASS
, /* type */
1273 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
1274 TV_TREE_SLP_VECTORIZATION
, /* tv_id */
1275 ( PROP_ssa
| PROP_cfg
), /* properties_required */
1276 0, /* properties_provided */
1277 0, /* properties_destroyed */
1278 0, /* todo_flags_start */
1279 TODO_update_ssa
, /* todo_flags_finish */
1282 class pass_slp_vectorize
: public gimple_opt_pass
1285 pass_slp_vectorize (gcc::context
*ctxt
)
1286 : gimple_opt_pass (pass_data_slp_vectorize
, ctxt
)
1289 /* opt_pass methods: */
1290 opt_pass
* clone () { return new pass_slp_vectorize (m_ctxt
); }
1291 virtual bool gate (function
*) { return flag_tree_slp_vectorize
!= 0; }
1292 virtual unsigned int execute (function
*);
1294 }; // class pass_slp_vectorize
1297 pass_slp_vectorize::execute (function
*fun
)
1299 auto_purge_vect_location sentinel
;
1302 bool in_loop_pipeline
= scev_initialized_p ();
1303 if (!in_loop_pipeline
)
1305 loop_optimizer_init (LOOPS_NORMAL
);
1309 /* Mark all stmts as not belonging to the current region and unvisited. */
1310 FOR_EACH_BB_FN (bb
, fun
)
1312 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
1315 gimple
*stmt
= gsi_stmt (gsi
);
1316 gimple_set_uid (stmt
, -1);
1317 gimple_set_visited (stmt
, false);
1321 FOR_EACH_BB_FN (bb
, fun
)
1323 if (vect_slp_bb (bb
))
1324 if (dump_enabled_p ())
1325 dump_printf_loc (MSG_NOTE
, vect_location
, "basic block vectorized\n");
1328 if (!in_loop_pipeline
)
1331 loop_optimizer_finalize ();
1340 make_pass_slp_vectorize (gcc::context
*ctxt
)
1342 return new pass_slp_vectorize (ctxt
);
1346 /* Increase alignment of global arrays to improve vectorization potential.
1348 - Consider also structs that have an array field.
1349 - Use ipa analysis to prune arrays that can't be vectorized?
1350 This should involve global alignment analysis and in the future also
1353 static unsigned get_vec_alignment_for_type (tree
);
1354 static hash_map
<tree
, unsigned> *type_align_map
;
1356 /* Return alignment of array's vector type corresponding to scalar type.
1357 0 if no vector type exists. */
1359 get_vec_alignment_for_array_type (tree type
)
1361 gcc_assert (TREE_CODE (type
) == ARRAY_TYPE
);
1362 poly_uint64 array_size
, vector_size
;
1364 tree scalar_type
= strip_array_types (type
);
1365 tree vectype
= get_related_vectype_for_scalar_type (VOIDmode
, scalar_type
);
1367 || !poly_int_tree_p (TYPE_SIZE (type
), &array_size
)
1368 || !poly_int_tree_p (TYPE_SIZE (vectype
), &vector_size
)
1369 || maybe_lt (array_size
, vector_size
))
1372 return TYPE_ALIGN (vectype
);
1375 /* Return alignment of field having maximum alignment of vector type
1376 corresponding to it's scalar type. For now, we only consider fields whose
1377 offset is a multiple of it's vector alignment.
1378 0 if no suitable field is found. */
1380 get_vec_alignment_for_record_type (tree type
)
1382 gcc_assert (TREE_CODE (type
) == RECORD_TYPE
);
1384 unsigned max_align
= 0, alignment
;
1385 HOST_WIDE_INT offset
;
1388 if (TYPE_PACKED (type
))
1391 unsigned *slot
= type_align_map
->get (type
);
1395 for (tree field
= first_field (type
);
1397 field
= DECL_CHAIN (field
))
1399 /* Skip if not FIELD_DECL or if alignment is set by user. */
1400 if (TREE_CODE (field
) != FIELD_DECL
1401 || DECL_USER_ALIGN (field
)
1402 || DECL_ARTIFICIAL (field
))
1405 /* We don't need to process the type further if offset is variable,
1406 since the offsets of remaining members will also be variable. */
1407 if (TREE_CODE (DECL_FIELD_OFFSET (field
)) != INTEGER_CST
1408 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field
)) != INTEGER_CST
)
1411 /* Similarly stop processing the type if offset_tree
1412 does not fit in unsigned HOST_WIDE_INT. */
1413 offset_tree
= bit_position (field
);
1414 if (!tree_fits_uhwi_p (offset_tree
))
1417 offset
= tree_to_uhwi (offset_tree
);
1418 alignment
= get_vec_alignment_for_type (TREE_TYPE (field
));
1420 /* Get maximum alignment of vectorized field/array among those members
1421 whose offset is multiple of the vector alignment. */
1423 && (offset
% alignment
== 0)
1424 && (alignment
> max_align
))
1425 max_align
= alignment
;
1428 type_align_map
->put (type
, max_align
);
1432 /* Return alignment of vector type corresponding to decl's scalar type
1433 or 0 if it doesn't exist or the vector alignment is lesser than
1434 decl's alignment. */
1436 get_vec_alignment_for_type (tree type
)
1438 if (type
== NULL_TREE
)
1441 gcc_assert (TYPE_P (type
));
1443 static unsigned alignment
= 0;
1444 switch (TREE_CODE (type
))
1447 alignment
= get_vec_alignment_for_array_type (type
);
1450 alignment
= get_vec_alignment_for_record_type (type
);
1457 return (alignment
> TYPE_ALIGN (type
)) ? alignment
: 0;
1460 /* Entry point to increase_alignment pass. */
1462 increase_alignment (void)
1464 varpool_node
*vnode
;
1466 vect_location
= dump_user_location_t ();
1467 type_align_map
= new hash_map
<tree
, unsigned>;
1469 /* Increase the alignment of all global arrays for vectorization. */
1470 FOR_EACH_DEFINED_VARIABLE (vnode
)
1472 tree decl
= vnode
->decl
;
1473 unsigned int alignment
;
1475 if ((decl_in_symtab_p (decl
)
1476 && !symtab_node::get (decl
)->can_increase_alignment_p ())
1477 || DECL_USER_ALIGN (decl
) || DECL_ARTIFICIAL (decl
))
1480 alignment
= get_vec_alignment_for_type (TREE_TYPE (decl
));
1481 if (alignment
&& vect_can_force_dr_alignment_p (decl
, alignment
))
1483 vnode
->increase_alignment (alignment
);
1484 if (dump_enabled_p ())
1485 dump_printf (MSG_NOTE
, "Increasing alignment of decl: %T\n", decl
);
1489 delete type_align_map
;
1496 const pass_data pass_data_ipa_increase_alignment
=
1498 SIMPLE_IPA_PASS
, /* type */
1499 "increase_alignment", /* name */
1500 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
1501 TV_IPA_OPT
, /* tv_id */
1502 0, /* properties_required */
1503 0, /* properties_provided */
1504 0, /* properties_destroyed */
1505 0, /* todo_flags_start */
1506 0, /* todo_flags_finish */
1509 class pass_ipa_increase_alignment
: public simple_ipa_opt_pass
1512 pass_ipa_increase_alignment (gcc::context
*ctxt
)
1513 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment
, ctxt
)
1516 /* opt_pass methods: */
1517 virtual bool gate (function
*)
1519 return flag_section_anchors
&& flag_tree_loop_vectorize
;
1522 virtual unsigned int execute (function
*) { return increase_alignment (); }
1524 }; // class pass_ipa_increase_alignment
1528 simple_ipa_opt_pass
*
1529 make_pass_ipa_increase_alignment (gcc::context
*ctxt
)
1531 return new pass_ipa_increase_alignment (ctxt
);
1534 /* If the condition represented by T is a comparison or the SSA name
1535 result of a comparison, extract the comparison's operands. Represent
1536 T as NE_EXPR <T, 0> otherwise. */
1539 scalar_cond_masked_key::get_cond_ops_from_tree (tree t
)
1541 if (TREE_CODE_CLASS (TREE_CODE (t
)) == tcc_comparison
)
1543 this->code
= TREE_CODE (t
);
1544 this->op0
= TREE_OPERAND (t
, 0);
1545 this->op1
= TREE_OPERAND (t
, 1);
1549 if (TREE_CODE (t
) == SSA_NAME
)
1550 if (gassign
*stmt
= dyn_cast
<gassign
*> (SSA_NAME_DEF_STMT (t
)))
1552 tree_code code
= gimple_assign_rhs_code (stmt
);
1553 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
1556 this->op0
= gimple_assign_rhs1 (stmt
);
1557 this->op1
= gimple_assign_rhs2 (stmt
);
1562 this->code
= NE_EXPR
;
1564 this->op1
= build_zero_cst (TREE_TYPE (t
));