2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
44 loop_vect() loop_aware_slp() slp_vect()
47 tree-vect-loop.c tree-vect-slp.c
52 tree-vect-stmts.c tree-vect-data-refs.c
59 #include "coretypes.h"
64 #include "tree-pass.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
81 #include "gimple-pretty-print.h"
82 #include "opt-problem.h"
83 #include "internal-fn.h"
86 /* Loop or bb location, with hotness information. */
87 dump_user_location_t vect_location
;
89 /* auto_purge_vect_location's dtor: reset the vect_location
90 global, to avoid stale location_t values that could reference
93 auto_purge_vect_location::~auto_purge_vect_location ()
95 vect_location
= dump_user_location_t ();
98 /* Dump a cost entry according to args to F. */
101 dump_stmt_cost (FILE *f
, void *data
, int count
, enum vect_cost_for_stmt kind
,
102 stmt_vec_info stmt_info
, tree
, int misalign
, unsigned cost
,
103 enum vect_cost_model_location where
)
105 fprintf (f
, "%p ", data
);
108 print_gimple_expr (f
, STMT_VINFO_STMT (stmt_info
), 0, TDF_SLIM
);
112 fprintf (f
, "<unknown> ");
113 fprintf (f
, "%d times ", count
);
114 const char *ks
= "unknown";
132 case vector_gather_load
:
133 ks
= "vector_gather_load";
136 ks
= "unaligned_load";
138 case unaligned_store
:
139 ks
= "unaligned_store";
144 case vector_scatter_store
:
145 ks
= "vector_scatter_store";
148 ks
= "vec_to_scalar";
151 ks
= "scalar_to_vec";
153 case cond_branch_not_taken
:
154 ks
= "cond_branch_not_taken";
156 case cond_branch_taken
:
157 ks
= "cond_branch_taken";
162 case vec_promote_demote
:
163 ks
= "vec_promote_demote";
166 ks
= "vec_construct";
169 fprintf (f
, "%s ", ks
);
170 if (kind
== unaligned_load
|| kind
== unaligned_store
)
171 fprintf (f
, "(misalign %d) ", misalign
);
172 fprintf (f
, "costs %u ", cost
);
173 const char *ws
= "unknown";
186 fprintf (f
, "in %s\n", ws
);
189 /* For mapping simduid to vectorization factor. */
191 class simduid_to_vf
: public free_ptr_hash
<simduid_to_vf
>
194 unsigned int simduid
;
197 /* hash_table support. */
198 static inline hashval_t
hash (const simduid_to_vf
*);
199 static inline int equal (const simduid_to_vf
*, const simduid_to_vf
*);
203 simduid_to_vf::hash (const simduid_to_vf
*p
)
209 simduid_to_vf::equal (const simduid_to_vf
*p1
, const simduid_to_vf
*p2
)
211 return p1
->simduid
== p2
->simduid
;
214 /* This hash maps the OMP simd array to the corresponding simduid used
215 to index into it. Like thus,
217 _7 = GOMP_SIMD_LANE (simduid.0)
223 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
226 struct simd_array_to_simduid
: free_ptr_hash
<simd_array_to_simduid
>
229 unsigned int simduid
;
231 /* hash_table support. */
232 static inline hashval_t
hash (const simd_array_to_simduid
*);
233 static inline int equal (const simd_array_to_simduid
*,
234 const simd_array_to_simduid
*);
238 simd_array_to_simduid::hash (const simd_array_to_simduid
*p
)
240 return DECL_UID (p
->decl
);
244 simd_array_to_simduid::equal (const simd_array_to_simduid
*p1
,
245 const simd_array_to_simduid
*p2
)
247 return p1
->decl
== p2
->decl
;
250 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
251 into their corresponding constants and remove
252 IFN_GOMP_SIMD_ORDERED_{START,END}. */
255 adjust_simduid_builtins (hash_table
<simduid_to_vf
> *htab
)
259 FOR_EACH_BB_FN (bb
, cfun
)
261 gimple_stmt_iterator i
;
263 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
266 enum internal_fn ifn
;
267 gimple
*stmt
= gsi_stmt (i
);
269 if (!is_gimple_call (stmt
)
270 || !gimple_call_internal_p (stmt
))
275 ifn
= gimple_call_internal_fn (stmt
);
278 case IFN_GOMP_SIMD_LANE
:
279 case IFN_GOMP_SIMD_VF
:
280 case IFN_GOMP_SIMD_LAST_LANE
:
282 case IFN_GOMP_SIMD_ORDERED_START
:
283 case IFN_GOMP_SIMD_ORDERED_END
:
284 if (integer_onep (gimple_call_arg (stmt
, 0)))
286 enum built_in_function bcode
287 = (ifn
== IFN_GOMP_SIMD_ORDERED_START
288 ? BUILT_IN_GOMP_ORDERED_START
289 : BUILT_IN_GOMP_ORDERED_END
);
291 = gimple_build_call (builtin_decl_explicit (bcode
), 0);
292 gimple_move_vops (g
, stmt
);
293 gsi_replace (&i
, g
, true);
296 gsi_remove (&i
, true);
297 unlink_stmt_vdef (stmt
);
303 tree arg
= gimple_call_arg (stmt
, 0);
304 gcc_assert (arg
!= NULL_TREE
);
305 gcc_assert (TREE_CODE (arg
) == SSA_NAME
);
306 simduid_to_vf
*p
= NULL
, data
;
307 data
.simduid
= DECL_UID (SSA_NAME_VAR (arg
));
308 /* Need to nullify loop safelen field since it's value is not
309 valid after transformation. */
310 if (bb
->loop_father
&& bb
->loop_father
->safelen
> 0)
311 bb
->loop_father
->safelen
= 0;
314 p
= htab
->find (&data
);
320 case IFN_GOMP_SIMD_VF
:
321 t
= build_int_cst (unsigned_type_node
, vf
);
323 case IFN_GOMP_SIMD_LANE
:
324 t
= build_int_cst (unsigned_type_node
, 0);
326 case IFN_GOMP_SIMD_LAST_LANE
:
327 t
= gimple_call_arg (stmt
, 1);
332 tree lhs
= gimple_call_lhs (stmt
);
334 replace_uses_by (lhs
, t
);
336 gsi_remove (&i
, true);
341 /* Helper structure for note_simd_array_uses. */
343 struct note_simd_array_uses_struct
345 hash_table
<simd_array_to_simduid
> **htab
;
346 unsigned int simduid
;
349 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
352 note_simd_array_uses_cb (tree
*tp
, int *walk_subtrees
, void *data
)
354 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
355 struct note_simd_array_uses_struct
*ns
356 = (struct note_simd_array_uses_struct
*) wi
->info
;
361 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp
))
362 && DECL_CONTEXT (*tp
) == current_function_decl
)
364 simd_array_to_simduid data
;
366 *ns
->htab
= new hash_table
<simd_array_to_simduid
> (15);
368 data
.simduid
= ns
->simduid
;
369 simd_array_to_simduid
**slot
= (*ns
->htab
)->find_slot (&data
, INSERT
);
372 simd_array_to_simduid
*p
= XNEW (simd_array_to_simduid
);
376 else if ((*slot
)->simduid
!= ns
->simduid
)
377 (*slot
)->simduid
= -1U;
383 /* Find "omp simd array" temporaries and map them to corresponding
387 note_simd_array_uses (hash_table
<simd_array_to_simduid
> **htab
)
390 gimple_stmt_iterator gsi
;
391 struct walk_stmt_info wi
;
392 struct note_simd_array_uses_struct ns
;
394 memset (&wi
, 0, sizeof (wi
));
398 FOR_EACH_BB_FN (bb
, cfun
)
399 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
401 gimple
*stmt
= gsi_stmt (gsi
);
402 if (!is_gimple_call (stmt
) || !gimple_call_internal_p (stmt
))
404 switch (gimple_call_internal_fn (stmt
))
406 case IFN_GOMP_SIMD_LANE
:
407 case IFN_GOMP_SIMD_VF
:
408 case IFN_GOMP_SIMD_LAST_LANE
:
413 tree lhs
= gimple_call_lhs (stmt
);
414 if (lhs
== NULL_TREE
)
416 imm_use_iterator use_iter
;
418 ns
.simduid
= DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt
, 0)));
419 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, lhs
)
420 if (!is_gimple_debug (use_stmt
))
421 walk_gimple_op (use_stmt
, note_simd_array_uses_cb
, &wi
);
425 /* Shrink arrays with "omp simd array" attribute to the corresponding
426 vectorization factor. */
430 (hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
,
431 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
)
433 for (hash_table
<simd_array_to_simduid
>::iterator iter
434 = simd_array_to_simduid_htab
->begin ();
435 iter
!= simd_array_to_simduid_htab
->end (); ++iter
)
436 if ((*iter
)->simduid
!= -1U)
438 tree decl
= (*iter
)->decl
;
440 if (simduid_to_vf_htab
)
442 simduid_to_vf
*p
= NULL
, data
;
443 data
.simduid
= (*iter
)->simduid
;
444 p
= simduid_to_vf_htab
->find (&data
);
449 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl
)), vf
);
450 TREE_TYPE (decl
) = atype
;
451 relayout_decl (decl
);
454 delete simd_array_to_simduid_htab
;
457 /* Initialize the vec_info with kind KIND_IN and target cost data
458 TARGET_COST_DATA_IN. */
460 vec_info::vec_info (vec_info::vec_kind kind_in
, void *target_cost_data_in
,
461 vec_info_shared
*shared_
)
464 stmt_vec_info_ro (false),
465 target_cost_data (target_cost_data_in
)
467 stmt_vec_infos
.create (50);
470 vec_info::~vec_info ()
472 slp_instance instance
;
475 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
476 vect_free_slp_instance (instance
, true);
478 destroy_cost_data (target_cost_data
);
479 free_stmt_vec_infos ();
482 vec_info_shared::vec_info_shared ()
484 datarefs_copy (vNULL
),
489 vec_info_shared::~vec_info_shared ()
491 free_data_refs (datarefs
);
492 free_dependence_relations (ddrs
);
493 datarefs_copy
.release ();
497 vec_info_shared::save_datarefs ()
501 datarefs_copy
.reserve_exact (datarefs
.length ());
502 for (unsigned i
= 0; i
< datarefs
.length (); ++i
)
503 datarefs_copy
.quick_push (*datarefs
[i
]);
507 vec_info_shared::check_datarefs ()
511 gcc_assert (datarefs
.length () == datarefs_copy
.length ());
512 for (unsigned i
= 0; i
< datarefs
.length (); ++i
)
513 if (memcmp (&datarefs_copy
[i
], datarefs
[i
], sizeof (data_reference
)) != 0)
517 /* Record that STMT belongs to the vectorizable region. Create and return
518 an associated stmt_vec_info. */
521 vec_info::add_stmt (gimple
*stmt
)
523 stmt_vec_info res
= new_stmt_vec_info (stmt
);
524 set_vinfo_for_stmt (stmt
, res
);
528 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
529 return null. It is safe to call this function on any statement, even if
530 it might not be part of the vectorizable region. */
533 vec_info::lookup_stmt (gimple
*stmt
)
535 unsigned int uid
= gimple_uid (stmt
);
536 if (uid
> 0 && uid
- 1 < stmt_vec_infos
.length ())
538 stmt_vec_info res
= stmt_vec_infos
[uid
- 1];
539 if (res
&& res
->stmt
== stmt
)
545 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
546 return that stmt_vec_info, otherwise return null. It is safe to call
547 this on arbitrary operands. */
550 vec_info::lookup_def (tree name
)
552 if (TREE_CODE (name
) == SSA_NAME
553 && !SSA_NAME_IS_DEFAULT_DEF (name
))
554 return lookup_stmt (SSA_NAME_DEF_STMT (name
));
558 /* See whether there is a single non-debug statement that uses LHS and
559 whether that statement has an associated stmt_vec_info. Return the
560 stmt_vec_info if so, otherwise return null. */
563 vec_info::lookup_single_use (tree lhs
)
567 if (single_imm_use (lhs
, &dummy
, &use_stmt
))
568 return lookup_stmt (use_stmt
);
572 /* Return vectorization information about DR. */
575 vec_info::lookup_dr (data_reference
*dr
)
577 stmt_vec_info stmt_info
= lookup_stmt (DR_STMT (dr
));
578 /* DR_STMT should never refer to a stmt in a pattern replacement. */
579 gcc_checking_assert (!is_pattern_stmt_p (stmt_info
));
580 return STMT_VINFO_DR_INFO (stmt_info
->dr_aux
.stmt
);
583 /* Record that NEW_STMT_INFO now implements the same data reference
587 vec_info::move_dr (stmt_vec_info new_stmt_info
, stmt_vec_info old_stmt_info
)
589 gcc_assert (!is_pattern_stmt_p (old_stmt_info
));
590 STMT_VINFO_DR_INFO (old_stmt_info
)->stmt
= new_stmt_info
;
591 new_stmt_info
->dr_aux
= old_stmt_info
->dr_aux
;
592 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info
)
593 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info
);
594 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info
)
595 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info
);
598 /* Permanently remove the statement described by STMT_INFO from the
602 vec_info::remove_stmt (stmt_vec_info stmt_info
)
604 gcc_assert (!stmt_info
->pattern_stmt_p
);
605 set_vinfo_for_stmt (stmt_info
->stmt
, NULL
);
606 gimple_stmt_iterator si
= gsi_for_stmt (stmt_info
->stmt
);
607 unlink_stmt_vdef (stmt_info
->stmt
);
608 gsi_remove (&si
, true);
609 release_defs (stmt_info
->stmt
);
610 free_stmt_vec_info (stmt_info
);
613 /* Replace the statement at GSI by NEW_STMT, both the vectorization
614 information and the function itself. STMT_INFO describes the statement
618 vec_info::replace_stmt (gimple_stmt_iterator
*gsi
, stmt_vec_info stmt_info
,
621 gimple
*old_stmt
= stmt_info
->stmt
;
622 gcc_assert (!stmt_info
->pattern_stmt_p
&& old_stmt
== gsi_stmt (*gsi
));
623 gimple_set_uid (new_stmt
, gimple_uid (old_stmt
));
624 stmt_info
->stmt
= new_stmt
;
625 gsi_replace (gsi
, new_stmt
, true);
628 /* Create and initialize a new stmt_vec_info struct for STMT. */
631 vec_info::new_stmt_vec_info (gimple
*stmt
)
633 stmt_vec_info res
= XCNEW (class _stmt_vec_info
);
636 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
637 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
638 STMT_VINFO_VECTORIZABLE (res
) = true;
639 STMT_VINFO_REDUC_TYPE (res
) = TREE_CODE_REDUCTION
;
640 STMT_VINFO_REDUC_CODE (res
) = ERROR_MARK
;
641 STMT_VINFO_REDUC_FN (res
) = IFN_LAST
;
642 STMT_VINFO_REDUC_IDX (res
) = -1;
643 STMT_VINFO_SLP_VECT_ONLY (res
) = false;
644 STMT_VINFO_VEC_STMTS (res
) = vNULL
;
646 if (gimple_code (stmt
) == GIMPLE_PHI
647 && is_loop_header_bb_p (gimple_bb (stmt
)))
648 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
650 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
652 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
653 STMT_SLP_TYPE (res
) = loop_vect
;
655 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
656 res
->dr_aux
.misalignment
= DR_MISALIGNMENT_UNINITIALIZED
;
661 /* Associate STMT with INFO. */
664 vec_info::set_vinfo_for_stmt (gimple
*stmt
, stmt_vec_info info
)
666 unsigned int uid
= gimple_uid (stmt
);
669 gcc_assert (!stmt_vec_info_ro
);
670 gcc_checking_assert (info
);
671 uid
= stmt_vec_infos
.length () + 1;
672 gimple_set_uid (stmt
, uid
);
673 stmt_vec_infos
.safe_push (info
);
677 gcc_checking_assert (info
== NULL
);
678 stmt_vec_infos
[uid
- 1] = info
;
682 /* Free the contents of stmt_vec_infos. */
685 vec_info::free_stmt_vec_infos (void)
689 FOR_EACH_VEC_ELT (stmt_vec_infos
, i
, info
)
691 free_stmt_vec_info (info
);
692 stmt_vec_infos
.release ();
695 /* Free STMT_INFO. */
698 vec_info::free_stmt_vec_info (stmt_vec_info stmt_info
)
700 if (stmt_info
->pattern_stmt_p
)
702 gimple_set_bb (stmt_info
->stmt
, NULL
);
703 tree lhs
= gimple_get_lhs (stmt_info
->stmt
);
704 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
705 release_ssa_name (lhs
);
708 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
709 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
710 STMT_VINFO_VEC_STMTS (stmt_info
).release ();
714 /* Returns true if S1 dominates S2. */
717 vect_stmt_dominates_stmt_p (gimple
*s1
, gimple
*s2
)
719 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
721 /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D)
722 SSA_NAME. Assume it lives at the beginning of function and
723 thus dominates everything. */
724 if (!bb1
|| s1
== s2
)
727 /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */
732 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
734 /* PHIs in the same basic block are assumed to be
735 executed all in parallel, if only one stmt is a PHI,
736 it dominates the other stmt in the same basic block. */
737 if (gimple_code (s1
) == GIMPLE_PHI
)
740 if (gimple_code (s2
) == GIMPLE_PHI
)
743 /* Inserted vectorized stmts all have UID 0 while the original stmts
744 in the IL have UID increasing within a BB. Walk from both sides
745 until we find the other stmt or a stmt with UID != 0. */
746 gimple_stmt_iterator gsi1
= gsi_for_stmt (s1
);
747 while (gimple_uid (gsi_stmt (gsi1
)) == 0)
750 if (gsi_end_p (gsi1
))
752 if (gsi_stmt (gsi1
) == s2
)
755 if (gimple_uid (gsi_stmt (gsi1
)) == -1u)
758 gimple_stmt_iterator gsi2
= gsi_for_stmt (s2
);
759 while (gimple_uid (gsi_stmt (gsi2
)) == 0)
762 if (gsi_end_p (gsi2
))
764 if (gsi_stmt (gsi2
) == s1
)
767 if (gimple_uid (gsi_stmt (gsi2
)) == -1u)
770 if (gimple_uid (gsi_stmt (gsi1
)) <= gimple_uid (gsi_stmt (gsi2
)))
775 /* A helper function to free scev and LOOP niter information, as well as
776 clear loop constraint LOOP_C_FINITE. */
779 vect_free_loop_info_assumptions (class loop
*loop
)
782 /* We need to explicitly reset upper bound information since they are
783 used even after free_numbers_of_iterations_estimates. */
784 loop
->any_upper_bound
= false;
785 loop
->any_likely_upper_bound
= false;
786 free_numbers_of_iterations_estimates (loop
);
787 loop_constraint_clear (loop
, LOOP_C_FINITE
);
790 /* If LOOP has been versioned during ifcvt, return the internal call
794 vect_loop_vectorized_call (class loop
*loop
, gcond
**cond
)
796 basic_block bb
= loop_preheader_edge (loop
)->src
;
803 if (!single_pred_p (bb
))
805 bb
= single_pred (bb
);
808 if (g
&& gimple_code (g
) == GIMPLE_COND
)
811 *cond
= as_a
<gcond
*> (g
);
812 gimple_stmt_iterator gsi
= gsi_for_stmt (g
);
814 if (!gsi_end_p (gsi
))
817 if (gimple_call_internal_p (g
, IFN_LOOP_VECTORIZED
)
818 && (tree_to_shwi (gimple_call_arg (g
, 0)) == loop
->num
819 || tree_to_shwi (gimple_call_arg (g
, 1)) == loop
->num
))
826 /* If LOOP has been versioned during loop distribution, return the gurading
830 vect_loop_dist_alias_call (class loop
*loop
)
834 class loop
*outer
, *orig
;
835 gimple_stmt_iterator gsi
;
838 if (loop
->orig_loop_num
== 0)
841 orig
= get_loop (cfun
, loop
->orig_loop_num
);
844 /* The original loop is somehow destroyed. Clear the information. */
845 loop
->orig_loop_num
= 0;
850 bb
= nearest_common_dominator (CDI_DOMINATORS
, loop
->header
, orig
->header
);
852 bb
= loop_preheader_edge (loop
)->src
;
854 outer
= bb
->loop_father
;
855 entry
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
857 /* Look upward in dominance tree. */
858 for (; bb
!= entry
&& flow_bb_inside_loop_p (outer
, bb
);
859 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
862 if (g
== NULL
|| gimple_code (g
) != GIMPLE_COND
)
865 gsi
= gsi_for_stmt (g
);
871 /* The guarding internal function call must have the same distribution
873 if (gimple_call_internal_p (g
, IFN_LOOP_DIST_ALIAS
)
874 && (tree_to_shwi (gimple_call_arg (g
, 0)) == loop
->orig_loop_num
))
880 /* Set the uids of all the statements in basic blocks inside loop
881 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
882 call guarding the loop which has been if converted. */
884 set_uid_loop_bbs (loop_vec_info loop_vinfo
, gimple
*loop_vectorized_call
)
886 tree arg
= gimple_call_arg (loop_vectorized_call
, 1);
889 class loop
*scalar_loop
= get_loop (cfun
, tree_to_shwi (arg
));
891 LOOP_VINFO_SCALAR_LOOP (loop_vinfo
) = scalar_loop
;
892 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop
)
893 == loop_vectorized_call
);
894 /* If we are going to vectorize outer loop, prevent vectorization
895 of the inner loop in the scalar loop - either the scalar loop is
896 thrown away, so it is a wasted work, or is used only for
898 if (scalar_loop
->inner
)
900 gimple
*g
= vect_loop_vectorized_call (scalar_loop
->inner
);
903 arg
= gimple_call_arg (g
, 0);
904 get_loop (cfun
, tree_to_shwi (arg
))->dont_vectorize
= true;
905 fold_loop_internal_call (g
, boolean_false_node
);
908 bbs
= get_loop_body (scalar_loop
);
909 for (i
= 0; i
< scalar_loop
->num_nodes
; i
++)
911 basic_block bb
= bbs
[i
];
912 gimple_stmt_iterator gsi
;
913 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
915 gimple
*phi
= gsi_stmt (gsi
);
916 gimple_set_uid (phi
, 0);
918 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
920 gimple
*stmt
= gsi_stmt (gsi
);
921 gimple_set_uid (stmt
, 0);
927 /* Try to vectorize LOOP. */
930 try_vectorize_loop_1 (hash_table
<simduid_to_vf
> *&simduid_to_vf_htab
,
931 unsigned *num_vectorized_loops
, loop_p loop
,
932 gimple
*loop_vectorized_call
,
933 gimple
*loop_dist_alias_call
)
936 vec_info_shared shared
;
937 auto_purge_vect_location sentinel
;
938 vect_location
= find_loop_location (loop
);
940 if (LOCATION_LOCUS (vect_location
.get_location_t ()) != UNKNOWN_LOCATION
941 && dump_enabled_p ())
942 dump_printf (MSG_NOTE
| MSG_PRIORITY_INTERNALS
,
943 "\nAnalyzing loop at %s:%d\n",
944 LOCATION_FILE (vect_location
.get_location_t ()),
945 LOCATION_LINE (vect_location
.get_location_t ()));
947 opt_loop_vec_info loop_vinfo
= opt_loop_vec_info::success (NULL
);
948 /* In the case of epilogue vectorization the loop already has its
949 loop_vec_info set, we do not require to analyze the loop in this case. */
950 if (loop_vec_info vinfo
= loop_vec_info_for_loop (loop
))
951 loop_vinfo
= opt_loop_vec_info::success (vinfo
);
954 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
955 loop_vinfo
= vect_analyze_loop (loop
, &shared
);
956 loop
->aux
= loop_vinfo
;
960 if (dump_enabled_p ())
961 if (opt_problem
*problem
= loop_vinfo
.get_problem ())
963 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
964 "couldn't vectorize loop\n");
965 problem
->emit_and_clear ();
968 if (!loop_vinfo
|| !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
))
970 /* Free existing information if loop is analyzed with some
972 if (loop_constraint_set_p (loop
, LOOP_C_FINITE
))
973 vect_free_loop_info_assumptions (loop
);
975 /* If we applied if-conversion then try to vectorize the
976 BB of innermost loops.
977 ??? Ideally BB vectorization would learn to vectorize
978 control flow by applying if-conversion on-the-fly, the
979 following retains the if-converted loop body even when
980 only non-if-converted parts took part in BB vectorization. */
981 if (flag_tree_slp_vectorize
!= 0
982 && loop_vectorized_call
985 basic_block bb
= loop
->header
;
986 bool require_loop_vectorize
= false;
987 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
);
988 !gsi_end_p (gsi
); gsi_next (&gsi
))
990 gimple
*stmt
= gsi_stmt (gsi
);
991 gcall
*call
= dyn_cast
<gcall
*> (stmt
);
992 if (call
&& gimple_call_internal_p (call
))
994 internal_fn ifn
= gimple_call_internal_fn (call
);
995 if (ifn
== IFN_MASK_LOAD
|| ifn
== IFN_MASK_STORE
996 /* Don't keep the if-converted parts when the ifn with
997 specifc type is not supported by the backend. */
998 || (direct_internal_fn_p (ifn
)
999 && !direct_internal_fn_supported_p
1000 (call
, OPTIMIZE_FOR_SPEED
)))
1002 require_loop_vectorize
= true;
1006 gimple_set_uid (stmt
, -1);
1007 gimple_set_visited (stmt
, false);
1009 if (!require_loop_vectorize
&& vect_slp_bb (bb
))
1011 if (dump_enabled_p ())
1012 dump_printf_loc (MSG_NOTE
, vect_location
,
1013 "basic block vectorized\n");
1014 fold_loop_internal_call (loop_vectorized_call
,
1016 loop_vectorized_call
= NULL
;
1017 ret
|= TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
1020 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
1021 loop, don't vectorize its inner loop; we'll attempt to
1022 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
1024 if (loop_vectorized_call
&& loop
->inner
)
1025 loop
->inner
->dont_vectorize
= true;
1029 if (!dbg_cnt (vect_loop
))
1031 /* Free existing information if loop is analyzed with some
1033 if (loop_constraint_set_p (loop
, LOOP_C_FINITE
))
1034 vect_free_loop_info_assumptions (loop
);
1038 if (loop_vectorized_call
)
1039 set_uid_loop_bbs (loop_vinfo
, loop_vectorized_call
);
1041 unsigned HOST_WIDE_INT bytes
;
1042 if (dump_enabled_p ())
1044 if (GET_MODE_SIZE (loop_vinfo
->vector_mode
).is_constant (&bytes
))
1045 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
1046 "loop vectorized using %wu byte vectors\n", bytes
);
1048 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
1049 "loop vectorized using variable length vectors\n");
1052 loop_p new_loop
= vect_transform_loop (loop_vinfo
,
1053 loop_vectorized_call
);
1054 (*num_vectorized_loops
)++;
1055 /* Now that the loop has been vectorized, allow it to be unrolled
1057 loop
->force_vectorize
= false;
1061 simduid_to_vf
*simduid_to_vf_data
= XNEW (simduid_to_vf
);
1062 if (!simduid_to_vf_htab
)
1063 simduid_to_vf_htab
= new hash_table
<simduid_to_vf
> (15);
1064 simduid_to_vf_data
->simduid
= DECL_UID (loop
->simduid
);
1065 simduid_to_vf_data
->vf
= loop_vinfo
->vectorization_factor
;
1066 *simduid_to_vf_htab
->find_slot (simduid_to_vf_data
, INSERT
)
1067 = simduid_to_vf_data
;
1070 if (loop_vectorized_call
)
1072 fold_loop_internal_call (loop_vectorized_call
, boolean_true_node
);
1073 loop_vectorized_call
= NULL
;
1074 ret
|= TODO_cleanup_cfg
;
1076 if (loop_dist_alias_call
)
1078 tree value
= gimple_call_arg (loop_dist_alias_call
, 1);
1079 fold_loop_internal_call (loop_dist_alias_call
, value
);
1080 loop_dist_alias_call
= NULL
;
1081 ret
|= TODO_cleanup_cfg
;
1084 /* Epilogue of vectorized loop must be vectorized too. */
1087 /* Don't include vectorized epilogues in the "vectorized loops" count.
1089 unsigned dont_count
= *num_vectorized_loops
;
1090 ret
|= try_vectorize_loop_1 (simduid_to_vf_htab
, &dont_count
,
1091 new_loop
, NULL
, NULL
);
1097 /* Try to vectorize LOOP. */
1100 try_vectorize_loop (hash_table
<simduid_to_vf
> *&simduid_to_vf_htab
,
1101 unsigned *num_vectorized_loops
, loop_p loop
)
1103 if (!((flag_tree_loop_vectorize
1104 && optimize_loop_nest_for_speed_p (loop
))
1105 || loop
->force_vectorize
))
1108 return try_vectorize_loop_1 (simduid_to_vf_htab
, num_vectorized_loops
, loop
,
1109 vect_loop_vectorized_call (loop
),
1110 vect_loop_dist_alias_call (loop
));
1114 /* Function vectorize_loops.
1116 Entry point to loop vectorization phase. */
1119 vectorize_loops (void)
1122 unsigned int num_vectorized_loops
= 0;
1123 unsigned int vect_loops_num
;
1125 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
= NULL
;
1126 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
1127 bool any_ifcvt_loops
= false;
1130 vect_loops_num
= number_of_loops (cfun
);
1132 /* Bail out if there are no loops. */
1133 if (vect_loops_num
<= 1)
1136 if (cfun
->has_simduid_loops
)
1137 note_simd_array_uses (&simd_array_to_simduid_htab
);
1139 /* ----------- Analyze loops. ----------- */
1141 /* If some loop was duplicated, it gets bigger number
1142 than all previously defined loops. This fact allows us to run
1143 only over initial loops skipping newly generated ones. */
1144 FOR_EACH_LOOP (loop
, 0)
1145 if (loop
->dont_vectorize
)
1147 any_ifcvt_loops
= true;
1148 /* If-conversion sometimes versions both the outer loop
1149 (for the case when outer loop vectorization might be
1150 desirable) as well as the inner loop in the scalar version
1151 of the loop. So we have:
1152 if (LOOP_VECTORIZED (1, 3))
1158 loop3 (copy of loop1)
1159 if (LOOP_VECTORIZED (4, 5))
1160 loop4 (copy of loop2)
1162 loop5 (copy of loop4)
1163 If FOR_EACH_LOOP gives us loop3 first (which has
1164 dont_vectorize set), make sure to process loop1 before loop4;
1165 so that we can prevent vectorization of loop4 if loop1
1166 is successfully vectorized. */
1169 gimple
*loop_vectorized_call
1170 = vect_loop_vectorized_call (loop
);
1171 if (loop_vectorized_call
1172 && vect_loop_vectorized_call (loop
->inner
))
1174 tree arg
= gimple_call_arg (loop_vectorized_call
, 0);
1175 class loop
*vector_loop
1176 = get_loop (cfun
, tree_to_shwi (arg
));
1177 if (vector_loop
&& vector_loop
!= loop
)
1179 /* Make sure we don't vectorize it twice. */
1180 vector_loop
->dont_vectorize
= true;
1181 ret
|= try_vectorize_loop (simduid_to_vf_htab
,
1182 &num_vectorized_loops
,
1189 ret
|= try_vectorize_loop (simduid_to_vf_htab
, &num_vectorized_loops
,
1192 vect_location
= dump_user_location_t ();
1194 statistics_counter_event (cfun
, "Vectorized loops", num_vectorized_loops
);
1195 if (dump_enabled_p ()
1196 || (num_vectorized_loops
> 0 && dump_enabled_p ()))
1197 dump_printf_loc (MSG_NOTE
, vect_location
,
1198 "vectorized %u loops in function.\n",
1199 num_vectorized_loops
);
1201 /* ----------- Finalize. ----------- */
1203 if (any_ifcvt_loops
)
1204 for (i
= 1; i
< number_of_loops (cfun
); i
++)
1206 loop
= get_loop (cfun
, i
);
1207 if (loop
&& loop
->dont_vectorize
)
1209 gimple
*g
= vect_loop_vectorized_call (loop
);
1212 fold_loop_internal_call (g
, boolean_false_node
);
1213 ret
|= TODO_cleanup_cfg
;
1217 g
= vect_loop_dist_alias_call (loop
);
1221 fold_loop_internal_call (g
, boolean_false_node
);
1222 ret
|= TODO_cleanup_cfg
;
1227 for (i
= 1; i
< number_of_loops (cfun
); i
++)
1229 loop_vec_info loop_vinfo
;
1230 bool has_mask_store
;
1232 loop
= get_loop (cfun
, i
);
1233 if (!loop
|| !loop
->aux
)
1235 loop_vinfo
= (loop_vec_info
) loop
->aux
;
1236 has_mask_store
= LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
);
1239 && targetm
.vectorize
.empty_mask_is_expensive (IFN_MASK_STORE
))
1240 optimize_mask_stores (loop
);
1244 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1245 if (cfun
->has_simduid_loops
)
1246 adjust_simduid_builtins (simduid_to_vf_htab
);
1248 /* Shrink any "omp array simd" temporary arrays to the
1249 actual vectorization factors. */
1250 if (simd_array_to_simduid_htab
)
1251 shrink_simd_arrays (simd_array_to_simduid_htab
, simduid_to_vf_htab
);
1252 delete simduid_to_vf_htab
;
1253 cfun
->has_simduid_loops
= false;
1255 if (num_vectorized_loops
> 0)
1257 /* If we vectorized any loop only virtual SSA form needs to be updated.
1258 ??? Also while we try hard to update loop-closed SSA form we fail
1259 to properly do this in some corner-cases (see PR56286). */
1260 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa_only_virtuals
);
1261 return TODO_cleanup_cfg
;
1268 /* Entry point to the simduid cleanup pass. */
1272 const pass_data pass_data_simduid_cleanup
=
1274 GIMPLE_PASS
, /* type */
1275 "simduid", /* name */
1276 OPTGROUP_NONE
, /* optinfo_flags */
1277 TV_NONE
, /* tv_id */
1278 ( PROP_ssa
| PROP_cfg
), /* properties_required */
1279 0, /* properties_provided */
1280 0, /* properties_destroyed */
1281 0, /* todo_flags_start */
1282 0, /* todo_flags_finish */
1285 class pass_simduid_cleanup
: public gimple_opt_pass
1288 pass_simduid_cleanup (gcc::context
*ctxt
)
1289 : gimple_opt_pass (pass_data_simduid_cleanup
, ctxt
)
1292 /* opt_pass methods: */
1293 opt_pass
* clone () { return new pass_simduid_cleanup (m_ctxt
); }
1294 virtual bool gate (function
*fun
) { return fun
->has_simduid_loops
; }
1295 virtual unsigned int execute (function
*);
1297 }; // class pass_simduid_cleanup
1300 pass_simduid_cleanup::execute (function
*fun
)
1302 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
1304 note_simd_array_uses (&simd_array_to_simduid_htab
);
1306 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1307 adjust_simduid_builtins (NULL
);
1309 /* Shrink any "omp array simd" temporary arrays to the
1310 actual vectorization factors. */
1311 if (simd_array_to_simduid_htab
)
1312 shrink_simd_arrays (simd_array_to_simduid_htab
, NULL
);
1313 fun
->has_simduid_loops
= false;
1320 make_pass_simduid_cleanup (gcc::context
*ctxt
)
1322 return new pass_simduid_cleanup (ctxt
);
1326 /* Entry point to basic block SLP phase. */
1330 const pass_data pass_data_slp_vectorize
=
1332 GIMPLE_PASS
, /* type */
1334 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
1335 TV_TREE_SLP_VECTORIZATION
, /* tv_id */
1336 ( PROP_ssa
| PROP_cfg
), /* properties_required */
1337 0, /* properties_provided */
1338 0, /* properties_destroyed */
1339 0, /* todo_flags_start */
1340 TODO_update_ssa
, /* todo_flags_finish */
1343 class pass_slp_vectorize
: public gimple_opt_pass
1346 pass_slp_vectorize (gcc::context
*ctxt
)
1347 : gimple_opt_pass (pass_data_slp_vectorize
, ctxt
)
1350 /* opt_pass methods: */
1351 opt_pass
* clone () { return new pass_slp_vectorize (m_ctxt
); }
1352 virtual bool gate (function
*) { return flag_tree_slp_vectorize
!= 0; }
1353 virtual unsigned int execute (function
*);
1355 }; // class pass_slp_vectorize
1358 pass_slp_vectorize::execute (function
*fun
)
1360 auto_purge_vect_location sentinel
;
1363 bool in_loop_pipeline
= scev_initialized_p ();
1364 if (!in_loop_pipeline
)
1366 loop_optimizer_init (LOOPS_NORMAL
);
1370 /* Mark all stmts as not belonging to the current region and unvisited. */
1371 FOR_EACH_BB_FN (bb
, fun
)
1373 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
1376 gimple
*stmt
= gsi_stmt (gsi
);
1377 gimple_set_uid (stmt
, -1);
1378 gimple_set_visited (stmt
, false);
1382 FOR_EACH_BB_FN (bb
, fun
)
1384 if (vect_slp_bb (bb
))
1385 if (dump_enabled_p ())
1386 dump_printf_loc (MSG_NOTE
, vect_location
, "basic block vectorized\n");
1389 if (!in_loop_pipeline
)
1392 loop_optimizer_finalize ();
1401 make_pass_slp_vectorize (gcc::context
*ctxt
)
1403 return new pass_slp_vectorize (ctxt
);
1407 /* Increase alignment of global arrays to improve vectorization potential.
1409 - Consider also structs that have an array field.
1410 - Use ipa analysis to prune arrays that can't be vectorized?
1411 This should involve global alignment analysis and in the future also
1414 static unsigned get_vec_alignment_for_type (tree
);
1415 static hash_map
<tree
, unsigned> *type_align_map
;
1417 /* Return alignment of array's vector type corresponding to scalar type.
1418 0 if no vector type exists. */
1420 get_vec_alignment_for_array_type (tree type
)
1422 gcc_assert (TREE_CODE (type
) == ARRAY_TYPE
);
1423 poly_uint64 array_size
, vector_size
;
1425 tree scalar_type
= strip_array_types (type
);
1426 tree vectype
= get_related_vectype_for_scalar_type (VOIDmode
, scalar_type
);
1428 || !poly_int_tree_p (TYPE_SIZE (type
), &array_size
)
1429 || !poly_int_tree_p (TYPE_SIZE (vectype
), &vector_size
)
1430 || maybe_lt (array_size
, vector_size
))
1433 return TYPE_ALIGN (vectype
);
1436 /* Return alignment of field having maximum alignment of vector type
1437 corresponding to it's scalar type. For now, we only consider fields whose
1438 offset is a multiple of it's vector alignment.
1439 0 if no suitable field is found. */
1441 get_vec_alignment_for_record_type (tree type
)
1443 gcc_assert (TREE_CODE (type
) == RECORD_TYPE
);
1445 unsigned max_align
= 0, alignment
;
1446 HOST_WIDE_INT offset
;
1449 if (TYPE_PACKED (type
))
1452 unsigned *slot
= type_align_map
->get (type
);
1456 for (tree field
= first_field (type
);
1458 field
= DECL_CHAIN (field
))
1460 /* Skip if not FIELD_DECL or if alignment is set by user. */
1461 if (TREE_CODE (field
) != FIELD_DECL
1462 || DECL_USER_ALIGN (field
)
1463 || DECL_ARTIFICIAL (field
))
1466 /* We don't need to process the type further if offset is variable,
1467 since the offsets of remaining members will also be variable. */
1468 if (TREE_CODE (DECL_FIELD_OFFSET (field
)) != INTEGER_CST
1469 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field
)) != INTEGER_CST
)
1472 /* Similarly stop processing the type if offset_tree
1473 does not fit in unsigned HOST_WIDE_INT. */
1474 offset_tree
= bit_position (field
);
1475 if (!tree_fits_uhwi_p (offset_tree
))
1478 offset
= tree_to_uhwi (offset_tree
);
1479 alignment
= get_vec_alignment_for_type (TREE_TYPE (field
));
1481 /* Get maximum alignment of vectorized field/array among those members
1482 whose offset is multiple of the vector alignment. */
1484 && (offset
% alignment
== 0)
1485 && (alignment
> max_align
))
1486 max_align
= alignment
;
1489 type_align_map
->put (type
, max_align
);
1493 /* Return alignment of vector type corresponding to decl's scalar type
1494 or 0 if it doesn't exist or the vector alignment is lesser than
1495 decl's alignment. */
1497 get_vec_alignment_for_type (tree type
)
1499 if (type
== NULL_TREE
)
1502 gcc_assert (TYPE_P (type
));
1504 static unsigned alignment
= 0;
1505 switch (TREE_CODE (type
))
1508 alignment
= get_vec_alignment_for_array_type (type
);
1511 alignment
= get_vec_alignment_for_record_type (type
);
1518 return (alignment
> TYPE_ALIGN (type
)) ? alignment
: 0;
1521 /* Entry point to increase_alignment pass. */
1523 increase_alignment (void)
1525 varpool_node
*vnode
;
1527 vect_location
= dump_user_location_t ();
1528 type_align_map
= new hash_map
<tree
, unsigned>;
1530 /* Increase the alignment of all global arrays for vectorization. */
1531 FOR_EACH_DEFINED_VARIABLE (vnode
)
1533 tree decl
= vnode
->decl
;
1534 unsigned int alignment
;
1536 if ((decl_in_symtab_p (decl
)
1537 && !symtab_node::get (decl
)->can_increase_alignment_p ())
1538 || DECL_USER_ALIGN (decl
) || DECL_ARTIFICIAL (decl
))
1541 alignment
= get_vec_alignment_for_type (TREE_TYPE (decl
));
1542 if (alignment
&& vect_can_force_dr_alignment_p (decl
, alignment
))
1544 vnode
->increase_alignment (alignment
);
1545 if (dump_enabled_p ())
1546 dump_printf (MSG_NOTE
, "Increasing alignment of decl: %T\n", decl
);
1550 delete type_align_map
;
1557 const pass_data pass_data_ipa_increase_alignment
=
1559 SIMPLE_IPA_PASS
, /* type */
1560 "increase_alignment", /* name */
1561 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
1562 TV_IPA_OPT
, /* tv_id */
1563 0, /* properties_required */
1564 0, /* properties_provided */
1565 0, /* properties_destroyed */
1566 0, /* todo_flags_start */
1567 0, /* todo_flags_finish */
1570 class pass_ipa_increase_alignment
: public simple_ipa_opt_pass
1573 pass_ipa_increase_alignment (gcc::context
*ctxt
)
1574 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment
, ctxt
)
1577 /* opt_pass methods: */
1578 virtual bool gate (function
*)
1580 return flag_section_anchors
&& flag_tree_loop_vectorize
;
1583 virtual unsigned int execute (function
*) { return increase_alignment (); }
1585 }; // class pass_ipa_increase_alignment
1589 simple_ipa_opt_pass
*
1590 make_pass_ipa_increase_alignment (gcc::context
*ctxt
)
1592 return new pass_ipa_increase_alignment (ctxt
);
1595 /* If the condition represented by T is a comparison or the SSA name
1596 result of a comparison, extract the comparison's operands. Represent
1597 T as NE_EXPR <T, 0> otherwise. */
1600 scalar_cond_masked_key::get_cond_ops_from_tree (tree t
)
1602 if (TREE_CODE_CLASS (TREE_CODE (t
)) == tcc_comparison
)
1604 this->code
= TREE_CODE (t
);
1605 this->op0
= TREE_OPERAND (t
, 0);
1606 this->op1
= TREE_OPERAND (t
, 1);
1610 if (TREE_CODE (t
) == SSA_NAME
)
1611 if (gassign
*stmt
= dyn_cast
<gassign
*> (SSA_NAME_DEF_STMT (t
)))
1613 tree_code code
= gimple_assign_rhs_code (stmt
);
1614 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
1617 this->op0
= gimple_assign_rhs1 (stmt
);
1618 this->op1
= gimple_assign_rhs2 (stmt
);
1623 this->code
= NE_EXPR
;
1625 this->op1
= build_zero_cst (TREE_TYPE (t
));