2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
58 /* Loop Vectorization Pass.
60 This pass tries to vectorize loops.
62 For example, the vectorizer transforms the following simple loop:
64 short a[N]; short b[N]; short c[N]; int i;
70 as if it was manually vectorized by rewriting the source code into:
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
77 for (i=0; i<N/8; i++){
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
118 For example, say stmt S1 was vectorized into stmt VS1:
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
156 static void vect_estimate_min_profitable_iters (loop_vec_info
, int *, int *);
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info
,
164 bool vectype_maybe_set_p
,
166 vec
<stmt_vec_info
> *mask_producers
)
168 gimple
*stmt
= stmt_info
->stmt
;
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
171 && !STMT_VINFO_LIVE_P (stmt_info
))
172 || gimple_clobber_p (stmt
))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE
, vect_location
, "skip.\n");
176 return opt_result::success ();
179 tree stmt_vectype
, nunits_vectype
;
180 opt_result res
= vect_get_vector_types_for_stmt (stmt_info
, &stmt_vectype
,
187 if (STMT_VINFO_VECTYPE (stmt_info
))
188 /* The only case when a vectype had been already set is for stmts
189 that contain a data ref, or for "pattern-stmts" (stmts generated
190 by the vectorizer to represent/replace a certain idiom). */
191 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info
)
192 || vectype_maybe_set_p
)
193 && STMT_VINFO_VECTYPE (stmt_info
) == stmt_vectype
);
194 else if (stmt_vectype
== boolean_type_node
)
195 mask_producers
->safe_push (stmt_info
);
197 STMT_VINFO_VECTYPE (stmt_info
) = stmt_vectype
;
201 vect_update_max_nunits (vf
, nunits_vectype
);
203 return opt_result::success ();
206 /* Subroutine of vect_determine_vectorization_factor. Set the vector
207 types of STMT_INFO and all attached pattern statements and update
208 the vectorization factor VF accordingly. If some of the statements
209 produce a mask result whose vector type can only be calculated later,
210 add them to MASK_PRODUCERS. Return true on success or false if
211 something prevented vectorization. */
214 vect_determine_vf_for_stmt (stmt_vec_info stmt_info
, poly_uint64
*vf
,
215 vec
<stmt_vec_info
> *mask_producers
)
217 vec_info
*vinfo
= stmt_info
->vinfo
;
218 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: %G",
222 = vect_determine_vf_for_stmt_1 (stmt_info
, false, vf
, mask_producers
);
226 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
227 && STMT_VINFO_RELATED_STMT (stmt_info
))
229 gimple
*pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
230 stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
232 /* If a pattern statement has def stmts, analyze them too. */
233 for (gimple_stmt_iterator si
= gsi_start (pattern_def_seq
);
234 !gsi_end_p (si
); gsi_next (&si
))
236 stmt_vec_info def_stmt_info
= vinfo
->lookup_stmt (gsi_stmt (si
));
237 if (dump_enabled_p ())
238 dump_printf_loc (MSG_NOTE
, vect_location
,
239 "==> examining pattern def stmt: %G",
240 def_stmt_info
->stmt
);
241 if (!vect_determine_vf_for_stmt_1 (def_stmt_info
, true,
243 res
= vect_determine_vf_for_stmt_1 (def_stmt_info
, true,
249 if (dump_enabled_p ())
250 dump_printf_loc (MSG_NOTE
, vect_location
,
251 "==> examining pattern statement: %G",
253 res
= vect_determine_vf_for_stmt_1 (stmt_info
, true, vf
, mask_producers
);
258 return opt_result::success ();
261 /* Function vect_determine_vectorization_factor
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo
)
289 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
290 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
291 unsigned nbbs
= loop
->num_nodes
;
292 poly_uint64 vectorization_factor
= 1;
293 tree scalar_type
= NULL_TREE
;
296 stmt_vec_info stmt_info
;
298 auto_vec
<stmt_vec_info
> mask_producers
;
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
302 for (i
= 0; i
< nbbs
; i
++)
304 basic_block bb
= bbs
[i
];
306 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
310 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
311 if (dump_enabled_p ())
312 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining phi: %G",
315 gcc_assert (stmt_info
);
317 if (STMT_VINFO_RELEVANT_P (stmt_info
)
318 || STMT_VINFO_LIVE_P (stmt_info
))
320 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info
));
321 scalar_type
= TREE_TYPE (PHI_RESULT (phi
));
323 if (dump_enabled_p ())
324 dump_printf_loc (MSG_NOTE
, vect_location
,
325 "get vectype for scalar type: %T\n",
328 vectype
= get_vectype_for_scalar_type (scalar_type
);
330 return opt_result::failure_at (phi
,
331 "not vectorized: unsupported "
334 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: %T\n",
340 if (dump_enabled_p ())
342 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = ");
343 dump_dec (MSG_NOTE
, TYPE_VECTOR_SUBPARTS (vectype
));
344 dump_printf (MSG_NOTE
, "\n");
347 vect_update_max_nunits (&vectorization_factor
, vectype
);
351 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
354 stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
356 = vect_determine_vf_for_stmt (stmt_info
, &vectorization_factor
,
363 /* TODO: Analyze cost. Decide if worth while to vectorize. */
364 if (dump_enabled_p ())
366 dump_printf_loc (MSG_NOTE
, vect_location
, "vectorization factor = ");
367 dump_dec (MSG_NOTE
, vectorization_factor
);
368 dump_printf (MSG_NOTE
, "\n");
371 if (known_le (vectorization_factor
, 1U))
372 return opt_result::failure_at (vect_location
,
373 "not vectorized: unsupported data-type\n");
374 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
376 for (i
= 0; i
< mask_producers
.length (); i
++)
378 stmt_info
= mask_producers
[i
];
379 opt_tree mask_type
= vect_get_mask_type_for_stmt (stmt_info
);
381 return opt_result::propagate_failure (mask_type
);
382 STMT_VINFO_VECTYPE (stmt_info
) = mask_type
;
385 return opt_result::success ();
389 /* Function vect_is_simple_iv_evolution.
391 FORNOW: A simple evolution of an induction variables in the loop is
392 considered a polynomial evolution. */
395 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
400 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
403 /* When there is no evolution in this loop, the evolution function
405 if (evolution_part
== NULL_TREE
)
408 /* When the evolution is a polynomial of degree >= 2
409 the evolution function is not "simple". */
410 if (tree_is_chrec (evolution_part
))
413 step_expr
= evolution_part
;
414 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
416 if (dump_enabled_p ())
417 dump_printf_loc (MSG_NOTE
, vect_location
, "step: %T, init: %T\n",
418 step_expr
, init_expr
);
423 if (TREE_CODE (step_expr
) != INTEGER_CST
424 && (TREE_CODE (step_expr
) != SSA_NAME
425 || ((bb
= gimple_bb (SSA_NAME_DEF_STMT (step_expr
)))
426 && flow_bb_inside_loop_p (get_loop (cfun
, loop_nb
), bb
))
427 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr
))
428 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
))
429 || !flag_associative_math
)))
430 && (TREE_CODE (step_expr
) != REAL_CST
431 || !flag_associative_math
))
433 if (dump_enabled_p ())
434 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
442 /* Return true if PHI, described by STMT_INFO, is the inner PHI in
443 what we are assuming is a double reduction. For example, given
444 a structure like this:
447 x_1 = PHI <x_4(outer2), ...>;
451 x_2 = PHI <x_1(outer1), ...>;
457 x_4 = PHI <x_3(inner)>;
460 outer loop analysis would treat x_1 as a double reduction phi and
461 this function would then return true for x_2. */
464 vect_inner_phi_in_double_reduction_p (stmt_vec_info stmt_info
, gphi
*phi
)
466 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
469 FOR_EACH_PHI_ARG (use_p
, phi
, op_iter
, SSA_OP_USE
)
470 if (stmt_vec_info def_info
= loop_vinfo
->lookup_def (USE_FROM_PTR (use_p
)))
471 if (STMT_VINFO_DEF_TYPE (def_info
) == vect_double_reduction_def
)
476 /* Function vect_analyze_scalar_cycles_1.
478 Examine the cross iteration def-use cycles of scalar variables
479 in LOOP. LOOP_VINFO represents the loop that is now being
480 considered for vectorization (can be LOOP, or an outer-loop
484 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo
, struct loop
*loop
)
486 basic_block bb
= loop
->header
;
488 auto_vec
<stmt_vec_info
, 64> worklist
;
492 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
494 /* First - identify all inductions. Reduction detection assumes that all the
495 inductions have been identified, therefore, this order must not be
497 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
499 gphi
*phi
= gsi
.phi ();
500 tree access_fn
= NULL
;
501 tree def
= PHI_RESULT (phi
);
502 stmt_vec_info stmt_vinfo
= loop_vinfo
->lookup_stmt (phi
);
504 if (dump_enabled_p ())
505 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: %G", phi
);
507 /* Skip virtual phi's. The data dependences that are associated with
508 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
509 if (virtual_operand_p (def
))
512 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_unknown_def_type
;
514 /* Analyze the evolution function. */
515 access_fn
= analyze_scalar_evolution (loop
, def
);
518 STRIP_NOPS (access_fn
);
519 if (dump_enabled_p ())
520 dump_printf_loc (MSG_NOTE
, vect_location
,
521 "Access function of PHI: %T\n", access_fn
);
522 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
)
523 = initial_condition_in_loop_num (access_fn
, loop
->num
);
524 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
)
525 = evolution_part_in_loop_num (access_fn
, loop
->num
);
529 || vect_inner_phi_in_double_reduction_p (stmt_vinfo
, phi
)
530 || !vect_is_simple_iv_evolution (loop
->num
, access_fn
, &init
, &step
)
531 || (LOOP_VINFO_LOOP (loop_vinfo
) != loop
532 && TREE_CODE (step
) != INTEGER_CST
))
534 worklist
.safe_push (stmt_vinfo
);
538 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
)
540 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
) != NULL_TREE
);
542 if (dump_enabled_p ())
543 dump_printf_loc (MSG_NOTE
, vect_location
, "Detected induction.\n");
544 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_induction_def
;
548 /* Second - identify all reductions and nested cycles. */
549 while (worklist
.length () > 0)
551 stmt_vec_info stmt_vinfo
= worklist
.pop ();
552 gphi
*phi
= as_a
<gphi
*> (stmt_vinfo
->stmt
);
553 tree def
= PHI_RESULT (phi
);
555 if (dump_enabled_p ())
556 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: %G", phi
);
558 gcc_assert (!virtual_operand_p (def
)
559 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_unknown_def_type
);
561 stmt_vec_info reduc_stmt_info
562 = vect_force_simple_reduction (loop_vinfo
, stmt_vinfo
,
563 &double_reduc
, false);
568 if (dump_enabled_p ())
569 dump_printf_loc (MSG_NOTE
, vect_location
,
570 "Detected double reduction.\n");
572 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_double_reduction_def
;
573 STMT_VINFO_DEF_TYPE (reduc_stmt_info
)
574 = vect_double_reduction_def
;
578 if (loop
!= LOOP_VINFO_LOOP (loop_vinfo
))
580 if (dump_enabled_p ())
581 dump_printf_loc (MSG_NOTE
, vect_location
,
582 "Detected vectorizable nested cycle.\n");
584 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_nested_cycle
;
585 STMT_VINFO_DEF_TYPE (reduc_stmt_info
) = vect_nested_cycle
;
589 if (dump_enabled_p ())
590 dump_printf_loc (MSG_NOTE
, vect_location
,
591 "Detected reduction.\n");
593 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_reduction_def
;
594 STMT_VINFO_DEF_TYPE (reduc_stmt_info
) = vect_reduction_def
;
595 /* Store the reduction cycles for possible vectorization in
596 loop-aware SLP if it was not detected as reduction
598 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info
))
599 LOOP_VINFO_REDUCTIONS (loop_vinfo
).safe_push
605 if (dump_enabled_p ())
606 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
607 "Unknown def-use cycle pattern.\n");
612 /* Function vect_analyze_scalar_cycles.
614 Examine the cross iteration def-use cycles of scalar variables, by
615 analyzing the loop-header PHIs of scalar variables. Classify each
616 cycle as one of the following: invariant, induction, reduction, unknown.
617 We do that for the loop represented by LOOP_VINFO, and also to its
618 inner-loop, if exists.
619 Examples for scalar cycles:
634 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo
)
636 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
638 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
);
640 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
641 Reductions in such inner-loop therefore have different properties than
642 the reductions in the nest that gets vectorized:
643 1. When vectorized, they are executed in the same order as in the original
644 scalar loop, so we can't change the order of computation when
646 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
647 current checks are too strict. */
650 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
->inner
);
653 /* Transfer group and reduction information from STMT_INFO to its
657 vect_fixup_reduc_chain (stmt_vec_info stmt_info
)
659 stmt_vec_info firstp
= STMT_VINFO_RELATED_STMT (stmt_info
);
661 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp
)
662 && REDUC_GROUP_FIRST_ELEMENT (stmt_info
));
663 REDUC_GROUP_SIZE (firstp
) = REDUC_GROUP_SIZE (stmt_info
);
666 stmtp
= STMT_VINFO_RELATED_STMT (stmt_info
);
667 REDUC_GROUP_FIRST_ELEMENT (stmtp
) = firstp
;
668 stmt_info
= REDUC_GROUP_NEXT_ELEMENT (stmt_info
);
670 REDUC_GROUP_NEXT_ELEMENT (stmtp
)
671 = STMT_VINFO_RELATED_STMT (stmt_info
);
674 STMT_VINFO_DEF_TYPE (stmtp
) = vect_reduction_def
;
677 /* Fixup scalar cycles that now have their stmts detected as patterns. */
680 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo
)
685 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
), i
, first
)
686 if (STMT_VINFO_IN_PATTERN_P (first
))
688 stmt_vec_info next
= REDUC_GROUP_NEXT_ELEMENT (first
);
691 if (! STMT_VINFO_IN_PATTERN_P (next
))
693 next
= REDUC_GROUP_NEXT_ELEMENT (next
);
695 /* If not all stmt in the chain are patterns try to handle
696 the chain without patterns. */
699 vect_fixup_reduc_chain (first
);
700 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
)[i
]
701 = STMT_VINFO_RELATED_STMT (first
);
706 /* Function vect_get_loop_niters.
708 Determine how many iterations the loop is executed and place it
709 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
710 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
711 niter information holds in ASSUMPTIONS.
713 Return the loop exit condition. */
717 vect_get_loop_niters (struct loop
*loop
, tree
*assumptions
,
718 tree
*number_of_iterations
, tree
*number_of_iterationsm1
)
720 edge exit
= single_exit (loop
);
721 struct tree_niter_desc niter_desc
;
722 tree niter_assumptions
, niter
, may_be_zero
;
723 gcond
*cond
= get_loop_exit_condition (loop
);
725 *assumptions
= boolean_true_node
;
726 *number_of_iterationsm1
= chrec_dont_know
;
727 *number_of_iterations
= chrec_dont_know
;
728 DUMP_VECT_SCOPE ("get_loop_niters");
733 may_be_zero
= NULL_TREE
;
734 if (!number_of_iterations_exit_assumptions (loop
, exit
, &niter_desc
, NULL
)
735 || chrec_contains_undetermined (niter_desc
.niter
))
738 niter_assumptions
= niter_desc
.assumptions
;
739 may_be_zero
= niter_desc
.may_be_zero
;
740 niter
= niter_desc
.niter
;
742 if (may_be_zero
&& integer_zerop (may_be_zero
))
743 may_be_zero
= NULL_TREE
;
747 if (COMPARISON_CLASS_P (may_be_zero
))
749 /* Try to combine may_be_zero with assumptions, this can simplify
750 computation of niter expression. */
751 if (niter_assumptions
&& !integer_nonzerop (niter_assumptions
))
752 niter_assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
754 fold_build1 (TRUTH_NOT_EXPR
,
758 niter
= fold_build3 (COND_EXPR
, TREE_TYPE (niter
), may_be_zero
,
759 build_int_cst (TREE_TYPE (niter
), 0),
760 rewrite_to_non_trapping_overflow (niter
));
762 may_be_zero
= NULL_TREE
;
764 else if (integer_nonzerop (may_be_zero
))
766 *number_of_iterationsm1
= build_int_cst (TREE_TYPE (niter
), 0);
767 *number_of_iterations
= build_int_cst (TREE_TYPE (niter
), 1);
774 *assumptions
= niter_assumptions
;
775 *number_of_iterationsm1
= niter
;
777 /* We want the number of loop header executions which is the number
778 of latch executions plus one.
779 ??? For UINT_MAX latch executions this number overflows to zero
780 for loops like do { n++; } while (n != 0); */
781 if (niter
&& !chrec_contains_undetermined (niter
))
782 niter
= fold_build2 (PLUS_EXPR
, TREE_TYPE (niter
), unshare_expr (niter
),
783 build_int_cst (TREE_TYPE (niter
), 1));
784 *number_of_iterations
= niter
;
789 /* Function bb_in_loop_p
791 Used as predicate for dfs order traversal of the loop bbs. */
794 bb_in_loop_p (const_basic_block bb
, const void *data
)
796 const struct loop
*const loop
= (const struct loop
*)data
;
797 if (flow_bb_inside_loop_p (loop
, bb
))
803 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
804 stmt_vec_info structs for all the stmts in LOOP_IN. */
806 _loop_vec_info::_loop_vec_info (struct loop
*loop_in
, vec_info_shared
*shared
)
807 : vec_info (vec_info::loop
, init_cost (loop_in
), shared
),
809 bbs (XCNEWVEC (basic_block
, loop
->num_nodes
)),
810 num_itersm1 (NULL_TREE
),
811 num_iters (NULL_TREE
),
812 num_iters_unchanged (NULL_TREE
),
813 num_iters_assumptions (NULL_TREE
),
815 versioning_threshold (0),
816 vectorization_factor (0),
817 max_vectorization_factor (0),
818 mask_skip_niters (NULL_TREE
),
819 mask_compare_type (NULL_TREE
),
820 simd_if_cond (NULL_TREE
),
822 peeling_for_alignment (0),
826 slp_unrolling_factor (1),
827 single_scalar_iteration_cost (0),
828 vectorizable (false),
829 can_fully_mask_p (true),
830 fully_masked_p (false),
831 peeling_for_gaps (false),
832 peeling_for_niter (false),
833 operands_swapped (false),
834 no_data_dependencies (false),
835 has_mask_store (false),
837 orig_loop_info (NULL
)
839 /* CHECKME: We want to visit all BBs before their successors (except for
840 latch blocks, for which this assertion wouldn't hold). In the simple
841 case of the loop forms we allow, a dfs order of the BBs would the same
842 as reversed postorder traversal, so we are safe. */
844 unsigned int nbbs
= dfs_enumerate_from (loop
->header
, 0, bb_in_loop_p
,
845 bbs
, loop
->num_nodes
, loop
);
846 gcc_assert (nbbs
== loop
->num_nodes
);
848 for (unsigned int i
= 0; i
< nbbs
; i
++)
850 basic_block bb
= bbs
[i
];
851 gimple_stmt_iterator si
;
853 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
855 gimple
*phi
= gsi_stmt (si
);
856 gimple_set_uid (phi
, 0);
860 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
862 gimple
*stmt
= gsi_stmt (si
);
863 gimple_set_uid (stmt
, 0);
865 /* If .GOMP_SIMD_LANE call for the current loop has 3 arguments, the
866 third argument is the #pragma omp simd if (x) condition, when 0,
867 loop shouldn't be vectorized, when non-zero constant, it should
868 be vectorized normally, otherwise versioned with vectorized loop
869 done if the condition is non-zero at runtime. */
871 && is_gimple_call (stmt
)
872 && gimple_call_internal_p (stmt
)
873 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
874 && gimple_call_num_args (stmt
) >= 3
875 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
877 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0))))
879 tree arg
= gimple_call_arg (stmt
, 2);
880 if (integer_zerop (arg
) || TREE_CODE (arg
) == SSA_NAME
)
883 gcc_assert (integer_nonzerop (arg
));
889 /* Free all levels of MASKS. */
892 release_vec_loop_masks (vec_loop_masks
*masks
)
896 FOR_EACH_VEC_ELT (*masks
, i
, rgm
)
897 rgm
->masks
.release ();
901 /* Free all memory used by the _loop_vec_info, as well as all the
902 stmt_vec_info structs of all the stmts in the loop. */
904 _loop_vec_info::~_loop_vec_info ()
907 gimple_stmt_iterator si
;
910 nbbs
= loop
->num_nodes
;
911 for (j
= 0; j
< nbbs
; j
++)
913 basic_block bb
= bbs
[j
];
914 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); )
916 gimple
*stmt
= gsi_stmt (si
);
918 /* We may have broken canonical form by moving a constant
919 into RHS1 of a commutative op. Fix such occurrences. */
920 if (operands_swapped
&& is_gimple_assign (stmt
))
922 enum tree_code code
= gimple_assign_rhs_code (stmt
);
924 if ((code
== PLUS_EXPR
925 || code
== POINTER_PLUS_EXPR
926 || code
== MULT_EXPR
)
927 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt
)))
928 swap_ssa_operands (stmt
,
929 gimple_assign_rhs1_ptr (stmt
),
930 gimple_assign_rhs2_ptr (stmt
));
931 else if (code
== COND_EXPR
932 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt
)))
934 tree cond_expr
= gimple_assign_rhs1 (stmt
);
935 enum tree_code cond_code
= TREE_CODE (cond_expr
);
937 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
939 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
,
941 cond_code
= invert_tree_comparison (cond_code
,
943 if (cond_code
!= ERROR_MARK
)
945 TREE_SET_CODE (cond_expr
, cond_code
);
946 swap_ssa_operands (stmt
,
947 gimple_assign_rhs2_ptr (stmt
),
948 gimple_assign_rhs3_ptr (stmt
));
959 release_vec_loop_masks (&masks
);
966 /* Return an invariant or register for EXPR and emit necessary
967 computations in the LOOP_VINFO loop preheader. */
970 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo
, tree expr
)
972 if (is_gimple_reg (expr
)
973 || is_gimple_min_invariant (expr
))
976 if (! loop_vinfo
->ivexpr_map
)
977 loop_vinfo
->ivexpr_map
= new hash_map
<tree_operand_hash
, tree
>;
978 tree
&cached
= loop_vinfo
->ivexpr_map
->get_or_insert (expr
);
981 gimple_seq stmts
= NULL
;
982 cached
= force_gimple_operand (unshare_expr (expr
),
983 &stmts
, true, NULL_TREE
);
986 edge e
= loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo
));
987 gsi_insert_seq_on_edge_immediate (e
, stmts
);
993 /* Return true if we can use CMP_TYPE as the comparison type to produce
994 all masks required to mask LOOP_VINFO. */
997 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo
, tree cmp_type
)
1001 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo
), i
, rgm
)
1002 if (rgm
->mask_type
!= NULL_TREE
1003 && !direct_internal_fn_supported_p (IFN_WHILE_ULT
,
1004 cmp_type
, rgm
->mask_type
,
1005 OPTIMIZE_FOR_SPEED
))
1010 /* Calculate the maximum number of scalars per iteration for every
1011 rgroup in LOOP_VINFO. */
1014 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo
)
1016 unsigned int res
= 1;
1019 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo
), i
, rgm
)
1020 res
= MAX (res
, rgm
->max_nscalars_per_iter
);
1024 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1025 whether we can actually generate the masks required. Return true if so,
1026 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1029 vect_verify_full_masking (loop_vec_info loop_vinfo
)
1031 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1032 unsigned int min_ni_width
;
1033 unsigned int max_nscalars_per_iter
1034 = vect_get_max_nscalars_per_iter (loop_vinfo
);
1036 /* Use a normal loop if there are no statements that need masking.
1037 This only happens in rare degenerate cases: it means that the loop
1038 has no loads, no stores, and no live-out values. */
1039 if (LOOP_VINFO_MASKS (loop_vinfo
).is_empty ())
1042 /* Get the maximum number of iterations that is representable
1043 in the counter type. */
1044 tree ni_type
= TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo
));
1045 widest_int max_ni
= wi::to_widest (TYPE_MAX_VALUE (ni_type
)) + 1;
1047 /* Get a more refined estimate for the number of iterations. */
1048 widest_int max_back_edges
;
1049 if (max_loop_iterations (loop
, &max_back_edges
))
1050 max_ni
= wi::smin (max_ni
, max_back_edges
+ 1);
1052 /* Account for rgroup masks, in which each bit is replicated N times. */
1053 max_ni
*= max_nscalars_per_iter
;
1055 /* Work out how many bits we need to represent the limit. */
1056 min_ni_width
= wi::min_precision (max_ni
, UNSIGNED
);
1058 /* Find a scalar mode for which WHILE_ULT is supported. */
1059 opt_scalar_int_mode cmp_mode_iter
;
1060 tree cmp_type
= NULL_TREE
;
1061 tree iv_type
= NULL_TREE
;
1062 widest_int iv_limit
= vect_iv_limit_for_full_masking (loop_vinfo
);
1063 unsigned int iv_precision
= UINT_MAX
;
1066 iv_precision
= wi::min_precision (iv_limit
* max_nscalars_per_iter
,
1069 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
1071 unsigned int cmp_bits
= GET_MODE_BITSIZE (cmp_mode_iter
.require ());
1072 if (cmp_bits
>= min_ni_width
1073 && targetm
.scalar_mode_supported_p (cmp_mode_iter
.require ()))
1075 tree this_type
= build_nonstandard_integer_type (cmp_bits
, true);
1077 && can_produce_all_loop_masks_p (loop_vinfo
, this_type
))
1079 /* Although we could stop as soon as we find a valid mode,
1080 there are at least two reasons why that's not always the
1083 - An IV that's Pmode or wider is more likely to be reusable
1084 in address calculations than an IV that's narrower than
1087 - Doing the comparison in IV_PRECISION or wider allows
1088 a natural 0-based IV, whereas using a narrower comparison
1089 type requires mitigations against wrap-around.
1091 Conversely, if the IV limit is variable, doing the comparison
1092 in a wider type than the original type can introduce
1093 unnecessary extensions, so picking the widest valid mode
1094 is not always a good choice either.
1096 Here we prefer the first IV type that's Pmode or wider,
1097 and the first comparison type that's IV_PRECISION or wider.
1098 (The comparison type must be no wider than the IV type,
1099 to avoid extensions in the vector loop.)
1101 ??? We might want to try continuing beyond Pmode for ILP32
1102 targets if CMP_BITS < IV_PRECISION. */
1103 iv_type
= this_type
;
1104 if (!cmp_type
|| iv_precision
> TYPE_PRECISION (cmp_type
))
1105 cmp_type
= this_type
;
1106 if (cmp_bits
>= GET_MODE_BITSIZE (Pmode
))
1115 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo
) = cmp_type
;
1116 LOOP_VINFO_MASK_IV_TYPE (loop_vinfo
) = iv_type
;
1120 /* Calculate the cost of one scalar iteration of the loop. */
1122 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo
)
1124 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1125 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1126 int nbbs
= loop
->num_nodes
, factor
;
1127 int innerloop_iters
, i
;
1129 DUMP_VECT_SCOPE ("vect_compute_single_scalar_iteration_cost");
1131 /* Gather costs for statements in the scalar loop. */
1134 innerloop_iters
= 1;
1136 innerloop_iters
= 50; /* FIXME */
1138 for (i
= 0; i
< nbbs
; i
++)
1140 gimple_stmt_iterator si
;
1141 basic_block bb
= bbs
[i
];
1143 if (bb
->loop_father
== loop
->inner
)
1144 factor
= innerloop_iters
;
1148 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1150 gimple
*stmt
= gsi_stmt (si
);
1151 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
1153 if (!is_gimple_assign (stmt
) && !is_gimple_call (stmt
))
1156 /* Skip stmts that are not vectorized inside the loop. */
1157 stmt_vec_info vstmt_info
= vect_stmt_to_vectorize (stmt_info
);
1158 if (!STMT_VINFO_RELEVANT_P (vstmt_info
)
1159 && (!STMT_VINFO_LIVE_P (vstmt_info
)
1160 || !VECTORIZABLE_CYCLE_DEF
1161 (STMT_VINFO_DEF_TYPE (vstmt_info
))))
1164 vect_cost_for_stmt kind
;
1165 if (STMT_VINFO_DATA_REF (stmt_info
))
1167 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
1170 kind
= scalar_store
;
1175 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
1176 factor
, kind
, stmt_info
, 0, vect_prologue
);
1180 /* Now accumulate cost. */
1181 void *target_cost_data
= init_cost (loop
);
1182 stmt_info_for_cost
*si
;
1184 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
1186 (void) add_stmt_cost (target_cost_data
, si
->count
,
1187 si
->kind
, si
->stmt_info
, si
->misalign
,
1189 unsigned dummy
, body_cost
= 0;
1190 finish_cost (target_cost_data
, &dummy
, &body_cost
, &dummy
);
1191 destroy_cost_data (target_cost_data
);
1192 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo
) = body_cost
;
1196 /* Function vect_analyze_loop_form_1.
1198 Verify that certain CFG restrictions hold, including:
1199 - the loop has a pre-header
1200 - the loop has a single entry and exit
1201 - the loop exit condition is simple enough
1202 - the number of iterations can be analyzed, i.e, a countable loop. The
1203 niter could be analyzed under some assumptions. */
1206 vect_analyze_loop_form_1 (struct loop
*loop
, gcond
**loop_cond
,
1207 tree
*assumptions
, tree
*number_of_iterationsm1
,
1208 tree
*number_of_iterations
, gcond
**inner_loop_cond
)
1210 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1212 /* Different restrictions apply when we are considering an inner-most loop,
1213 vs. an outer (nested) loop.
1214 (FORNOW. May want to relax some of these restrictions in the future). */
1218 /* Inner-most loop. We currently require that the number of BBs is
1219 exactly 2 (the header and latch). Vectorizable inner-most loops
1230 if (loop
->num_nodes
!= 2)
1231 return opt_result::failure_at (vect_location
,
1233 " control flow in loop.\n");
1235 if (empty_block_p (loop
->header
))
1236 return opt_result::failure_at (vect_location
,
1237 "not vectorized: empty loop.\n");
1241 struct loop
*innerloop
= loop
->inner
;
1244 /* Nested loop. We currently require that the loop is doubly-nested,
1245 contains a single inner loop, and the number of BBs is exactly 5.
1246 Vectorizable outer-loops look like this:
1258 The inner-loop has the properties expected of inner-most loops
1259 as described above. */
1261 if ((loop
->inner
)->inner
|| (loop
->inner
)->next
)
1262 return opt_result::failure_at (vect_location
,
1264 " multiple nested loops.\n");
1266 if (loop
->num_nodes
!= 5)
1267 return opt_result::failure_at (vect_location
,
1269 " control flow in loop.\n");
1271 entryedge
= loop_preheader_edge (innerloop
);
1272 if (entryedge
->src
!= loop
->header
1273 || !single_exit (innerloop
)
1274 || single_exit (innerloop
)->dest
!= EDGE_PRED (loop
->latch
, 0)->src
)
1275 return opt_result::failure_at (vect_location
,
1277 " unsupported outerloop form.\n");
1279 /* Analyze the inner-loop. */
1280 tree inner_niterm1
, inner_niter
, inner_assumptions
;
1282 = vect_analyze_loop_form_1 (loop
->inner
, inner_loop_cond
,
1283 &inner_assumptions
, &inner_niterm1
,
1284 &inner_niter
, NULL
);
1287 if (dump_enabled_p ())
1288 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1289 "not vectorized: Bad inner loop.\n");
1293 /* Don't support analyzing niter under assumptions for inner
1295 if (!integer_onep (inner_assumptions
))
1296 return opt_result::failure_at (vect_location
,
1297 "not vectorized: Bad inner loop.\n");
1299 if (!expr_invariant_in_loop_p (loop
, inner_niter
))
1300 return opt_result::failure_at (vect_location
,
1301 "not vectorized: inner-loop count not"
1304 if (dump_enabled_p ())
1305 dump_printf_loc (MSG_NOTE
, vect_location
,
1306 "Considering outer-loop vectorization.\n");
1309 if (!single_exit (loop
))
1310 return opt_result::failure_at (vect_location
,
1311 "not vectorized: multiple exits.\n");
1312 if (EDGE_COUNT (loop
->header
->preds
) != 2)
1313 return opt_result::failure_at (vect_location
,
1315 " too many incoming edges.\n");
1317 /* We assume that the loop exit condition is at the end of the loop. i.e,
1318 that the loop is represented as a do-while (with a proper if-guard
1319 before the loop if needed), where the loop header contains all the
1320 executable statements, and the latch is empty. */
1321 if (!empty_block_p (loop
->latch
)
1322 || !gimple_seq_empty_p (phi_nodes (loop
->latch
)))
1323 return opt_result::failure_at (vect_location
,
1324 "not vectorized: latch block not empty.\n");
1326 /* Make sure the exit is not abnormal. */
1327 edge e
= single_exit (loop
);
1328 if (e
->flags
& EDGE_ABNORMAL
)
1329 return opt_result::failure_at (vect_location
,
1331 " abnormal loop exit edge.\n");
1333 *loop_cond
= vect_get_loop_niters (loop
, assumptions
, number_of_iterations
,
1334 number_of_iterationsm1
);
1336 return opt_result::failure_at
1338 "not vectorized: complicated exit condition.\n");
1340 if (integer_zerop (*assumptions
)
1341 || !*number_of_iterations
1342 || chrec_contains_undetermined (*number_of_iterations
))
1343 return opt_result::failure_at
1345 "not vectorized: number of iterations cannot be computed.\n");
1347 if (integer_zerop (*number_of_iterations
))
1348 return opt_result::failure_at
1350 "not vectorized: number of iterations = 0.\n");
1352 return opt_result::success ();
1355 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1358 vect_analyze_loop_form (struct loop
*loop
, vec_info_shared
*shared
)
1360 tree assumptions
, number_of_iterations
, number_of_iterationsm1
;
1361 gcond
*loop_cond
, *inner_loop_cond
= NULL
;
1364 = vect_analyze_loop_form_1 (loop
, &loop_cond
,
1365 &assumptions
, &number_of_iterationsm1
,
1366 &number_of_iterations
, &inner_loop_cond
);
1368 return opt_loop_vec_info::propagate_failure (res
);
1370 loop_vec_info loop_vinfo
= new _loop_vec_info (loop
, shared
);
1371 LOOP_VINFO_NITERSM1 (loop_vinfo
) = number_of_iterationsm1
;
1372 LOOP_VINFO_NITERS (loop_vinfo
) = number_of_iterations
;
1373 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = number_of_iterations
;
1374 if (!integer_onep (assumptions
))
1376 /* We consider to vectorize this loop by versioning it under
1377 some assumptions. In order to do this, we need to clear
1378 existing information computed by scev and niter analyzer. */
1380 free_numbers_of_iterations_estimates (loop
);
1381 /* Also set flag for this loop so that following scev and niter
1382 analysis are done under the assumptions. */
1383 loop_constraint_set (loop
, LOOP_C_FINITE
);
1384 /* Also record the assumptions for versioning. */
1385 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo
) = assumptions
;
1388 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1390 if (dump_enabled_p ())
1392 dump_printf_loc (MSG_NOTE
, vect_location
,
1393 "Symbolic number of iterations is ");
1394 dump_generic_expr (MSG_NOTE
, TDF_DETAILS
, number_of_iterations
);
1395 dump_printf (MSG_NOTE
, "\n");
1399 stmt_vec_info loop_cond_info
= loop_vinfo
->lookup_stmt (loop_cond
);
1400 STMT_VINFO_TYPE (loop_cond_info
) = loop_exit_ctrl_vec_info_type
;
1401 if (inner_loop_cond
)
1403 stmt_vec_info inner_loop_cond_info
1404 = loop_vinfo
->lookup_stmt (inner_loop_cond
);
1405 STMT_VINFO_TYPE (inner_loop_cond_info
) = loop_exit_ctrl_vec_info_type
;
1408 gcc_assert (!loop
->aux
);
1409 loop
->aux
= loop_vinfo
;
1410 return opt_loop_vec_info::success (loop_vinfo
);
1415 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1416 statements update the vectorization factor. */
1419 vect_update_vf_for_slp (loop_vec_info loop_vinfo
)
1421 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1422 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1423 int nbbs
= loop
->num_nodes
;
1424 poly_uint64 vectorization_factor
;
1427 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1429 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1430 gcc_assert (known_ne (vectorization_factor
, 0U));
1432 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1433 vectorization factor of the loop is the unrolling factor required by
1434 the SLP instances. If that unrolling factor is 1, we say, that we
1435 perform pure SLP on loop - cross iteration parallelism is not
1437 bool only_slp_in_loop
= true;
1438 for (i
= 0; i
< nbbs
; i
++)
1440 basic_block bb
= bbs
[i
];
1441 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
1444 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
1445 stmt_info
= vect_stmt_to_vectorize (stmt_info
);
1446 if ((STMT_VINFO_RELEVANT_P (stmt_info
)
1447 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1448 && !PURE_SLP_STMT (stmt_info
))
1449 /* STMT needs both SLP and loop-based vectorization. */
1450 only_slp_in_loop
= false;
1454 if (only_slp_in_loop
)
1456 if (dump_enabled_p ())
1457 dump_printf_loc (MSG_NOTE
, vect_location
,
1458 "Loop contains only SLP stmts\n");
1459 vectorization_factor
= LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
);
1463 if (dump_enabled_p ())
1464 dump_printf_loc (MSG_NOTE
, vect_location
,
1465 "Loop contains SLP and non-SLP stmts\n");
1466 /* Both the vectorization factor and unroll factor have the form
1467 current_vector_size * X for some rational X, so they must have
1468 a common multiple. */
1469 vectorization_factor
1470 = force_common_multiple (vectorization_factor
,
1471 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
));
1474 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
1475 if (dump_enabled_p ())
1477 dump_printf_loc (MSG_NOTE
, vect_location
,
1478 "Updating vectorization factor to ");
1479 dump_dec (MSG_NOTE
, vectorization_factor
);
1480 dump_printf (MSG_NOTE
, ".\n");
1484 /* Return true if STMT_INFO describes a double reduction phi and if
1485 the other phi in the reduction is also relevant for vectorization.
1486 This rejects cases such as:
1489 x_1 = PHI <x_3(outer2), ...>;
1497 x_3 = PHI <x_2(inner)>;
1499 if nothing in x_2 or elsewhere makes x_1 relevant. */
1502 vect_active_double_reduction_p (stmt_vec_info stmt_info
)
1504 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_double_reduction_def
)
1507 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info
));
1510 /* Function vect_analyze_loop_operations.
1512 Scan the loop stmts and make sure they are all vectorizable. */
1515 vect_analyze_loop_operations (loop_vec_info loop_vinfo
)
1517 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1518 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1519 int nbbs
= loop
->num_nodes
;
1521 stmt_vec_info stmt_info
;
1522 bool need_to_vectorize
= false;
1525 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1527 auto_vec
<stmt_info_for_cost
> cost_vec
;
1529 for (i
= 0; i
< nbbs
; i
++)
1531 basic_block bb
= bbs
[i
];
1533 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
1536 gphi
*phi
= si
.phi ();
1539 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
1540 if (dump_enabled_p ())
1541 dump_printf_loc (MSG_NOTE
, vect_location
, "examining phi: %G", phi
);
1542 if (virtual_operand_p (gimple_phi_result (phi
)))
1545 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1546 (i.e., a phi in the tail of the outer-loop). */
1547 if (! is_loop_header_bb_p (bb
))
1549 /* FORNOW: we currently don't support the case that these phis
1550 are not used in the outerloop (unless it is double reduction,
1551 i.e., this phi is vect_reduction_def), cause this case
1552 requires to actually do something here. */
1553 if (STMT_VINFO_LIVE_P (stmt_info
)
1554 && !vect_active_double_reduction_p (stmt_info
))
1555 return opt_result::failure_at (phi
,
1556 "Unsupported loop-closed phi"
1557 " in outer-loop.\n");
1559 /* If PHI is used in the outer loop, we check that its operand
1560 is defined in the inner loop. */
1561 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1565 if (gimple_phi_num_args (phi
) != 1)
1566 return opt_result::failure_at (phi
, "unsupported phi");
1568 phi_op
= PHI_ARG_DEF (phi
, 0);
1569 stmt_vec_info op_def_info
= loop_vinfo
->lookup_def (phi_op
);
1571 return opt_result::failure_at (phi
, "unsupported phi");
1573 if (STMT_VINFO_RELEVANT (op_def_info
) != vect_used_in_outer
1574 && (STMT_VINFO_RELEVANT (op_def_info
)
1575 != vect_used_in_outer_by_reduction
))
1576 return opt_result::failure_at (phi
, "unsupported phi");
1582 gcc_assert (stmt_info
);
1584 if ((STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
1585 || STMT_VINFO_LIVE_P (stmt_info
))
1586 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
1587 /* A scalar-dependence cycle that we don't support. */
1588 return opt_result::failure_at (phi
,
1590 " scalar dependence cycle.\n");
1592 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1594 need_to_vectorize
= true;
1595 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
1596 && ! PURE_SLP_STMT (stmt_info
))
1597 ok
= vectorizable_induction (stmt_info
, NULL
, NULL
, NULL
,
1599 else if ((STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
1600 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
)
1601 && ! PURE_SLP_STMT (stmt_info
))
1602 ok
= vectorizable_reduction (stmt_info
, NULL
, NULL
, NULL
, NULL
,
1606 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1608 && STMT_VINFO_LIVE_P (stmt_info
)
1609 && !PURE_SLP_STMT (stmt_info
))
1610 ok
= vectorizable_live_operation (stmt_info
, NULL
, NULL
, -1, NULL
,
1614 return opt_result::failure_at (phi
,
1615 "not vectorized: relevant phi not "
1617 static_cast <gimple
*> (phi
));
1620 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
1623 gimple
*stmt
= gsi_stmt (si
);
1624 if (!gimple_clobber_p (stmt
))
1627 = vect_analyze_stmt (loop_vinfo
->lookup_stmt (stmt
),
1629 NULL
, NULL
, &cost_vec
);
1636 add_stmt_costs (loop_vinfo
->target_cost_data
, &cost_vec
);
1638 /* All operations in the loop are either irrelevant (deal with loop
1639 control, or dead), or only used outside the loop and can be moved
1640 out of the loop (e.g. invariants, inductions). The loop can be
1641 optimized away by scalar optimizations. We're better off not
1642 touching this loop. */
1643 if (!need_to_vectorize
)
1645 if (dump_enabled_p ())
1646 dump_printf_loc (MSG_NOTE
, vect_location
,
1647 "All the computation can be taken out of the loop.\n");
1648 return opt_result::failure_at
1650 "not vectorized: redundant loop. no profit to vectorize.\n");
1653 return opt_result::success ();
1656 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1657 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1658 definitely no, or -1 if it's worth retrying. */
1661 vect_analyze_loop_costing (loop_vec_info loop_vinfo
)
1663 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1664 unsigned int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
1666 /* Only fully-masked loops can have iteration counts less than the
1667 vectorization factor. */
1668 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
1670 HOST_WIDE_INT max_niter
;
1672 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1673 max_niter
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
1675 max_niter
= max_stmt_executions_int (loop
);
1678 && (unsigned HOST_WIDE_INT
) max_niter
< assumed_vf
)
1680 if (dump_enabled_p ())
1681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1682 "not vectorized: iteration count smaller than "
1683 "vectorization factor.\n");
1688 int min_profitable_iters
, min_profitable_estimate
;
1689 vect_estimate_min_profitable_iters (loop_vinfo
, &min_profitable_iters
,
1690 &min_profitable_estimate
);
1692 if (min_profitable_iters
< 0)
1694 if (dump_enabled_p ())
1695 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1696 "not vectorized: vectorization not profitable.\n");
1697 if (dump_enabled_p ())
1698 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1699 "not vectorized: vector version will never be "
1704 int min_scalar_loop_bound
= (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
1707 /* Use the cost model only if it is more conservative than user specified
1709 unsigned int th
= (unsigned) MAX (min_scalar_loop_bound
,
1710 min_profitable_iters
);
1712 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = th
;
1714 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1715 && LOOP_VINFO_INT_NITERS (loop_vinfo
) < th
)
1717 if (dump_enabled_p ())
1718 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1719 "not vectorized: vectorization not profitable.\n");
1720 if (dump_enabled_p ())
1721 dump_printf_loc (MSG_NOTE
, vect_location
,
1722 "not vectorized: iteration count smaller than user "
1723 "specified loop bound parameter or minimum profitable "
1724 "iterations (whichever is more conservative).\n");
1728 HOST_WIDE_INT estimated_niter
= estimated_stmt_executions_int (loop
);
1729 if (estimated_niter
== -1)
1730 estimated_niter
= likely_max_stmt_executions_int (loop
);
1731 if (estimated_niter
!= -1
1732 && ((unsigned HOST_WIDE_INT
) estimated_niter
1733 < MAX (th
, (unsigned) min_profitable_estimate
)))
1735 if (dump_enabled_p ())
1736 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1737 "not vectorized: estimated iteration count too "
1739 if (dump_enabled_p ())
1740 dump_printf_loc (MSG_NOTE
, vect_location
,
1741 "not vectorized: estimated iteration count smaller "
1742 "than specified loop bound parameter or minimum "
1743 "profitable iterations (whichever is more "
1744 "conservative).\n");
1752 vect_get_datarefs_in_loop (loop_p loop
, basic_block
*bbs
,
1753 vec
<data_reference_p
> *datarefs
,
1754 unsigned int *n_stmts
)
1757 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
1758 for (gimple_stmt_iterator gsi
= gsi_start_bb (bbs
[i
]);
1759 !gsi_end_p (gsi
); gsi_next (&gsi
))
1761 gimple
*stmt
= gsi_stmt (gsi
);
1762 if (is_gimple_debug (stmt
))
1765 opt_result res
= vect_find_stmt_data_reference (loop
, stmt
, datarefs
);
1768 if (is_gimple_call (stmt
) && loop
->safelen
)
1770 tree fndecl
= gimple_call_fndecl (stmt
), op
;
1771 if (fndecl
!= NULL_TREE
)
1773 cgraph_node
*node
= cgraph_node::get (fndecl
);
1774 if (node
!= NULL
&& node
->simd_clones
!= NULL
)
1776 unsigned int j
, n
= gimple_call_num_args (stmt
);
1777 for (j
= 0; j
< n
; j
++)
1779 op
= gimple_call_arg (stmt
, j
);
1781 || (REFERENCE_CLASS_P (op
)
1782 && get_base_address (op
)))
1785 op
= gimple_call_lhs (stmt
);
1786 /* Ignore #pragma omp declare simd functions
1787 if they don't have data references in the
1788 call stmt itself. */
1792 || (REFERENCE_CLASS_P (op
)
1793 && get_base_address (op
)))))
1800 /* If dependence analysis will give up due to the limit on the
1801 number of datarefs stop here and fail fatally. */
1802 if (datarefs
->length ()
1803 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS
))
1804 return opt_result::failure_at (stmt
, "exceeded param "
1805 "loop-max-datarefs-for-datadeps\n");
1807 return opt_result::success ();
1810 /* Look for SLP-only access groups and turn each individual access into its own
1813 vect_dissolve_slp_only_groups (loop_vec_info loop_vinfo
)
1816 struct data_reference
*dr
;
1818 DUMP_VECT_SCOPE ("vect_dissolve_slp_only_groups");
1820 vec
<data_reference_p
> datarefs
= loop_vinfo
->shared
->datarefs
;
1821 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1823 gcc_assert (DR_REF (dr
));
1824 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (DR_STMT (dr
));
1826 /* Check if the load is a part of an interleaving chain. */
1827 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1829 stmt_vec_info first_element
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
1830 unsigned int group_size
= DR_GROUP_SIZE (first_element
);
1832 /* Check if SLP-only groups. */
1833 if (!STMT_SLP_TYPE (stmt_info
)
1834 && STMT_VINFO_SLP_VECT_ONLY (first_element
))
1836 /* Dissolve the group. */
1837 STMT_VINFO_SLP_VECT_ONLY (first_element
) = false;
1839 stmt_vec_info vinfo
= first_element
;
1842 stmt_vec_info next
= DR_GROUP_NEXT_ELEMENT (vinfo
);
1843 DR_GROUP_FIRST_ELEMENT (vinfo
) = vinfo
;
1844 DR_GROUP_NEXT_ELEMENT (vinfo
) = NULL
;
1845 DR_GROUP_SIZE (vinfo
) = 1;
1846 DR_GROUP_GAP (vinfo
) = group_size
- 1;
1854 /* Function vect_analyze_loop_2.
1856 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1857 for it. The different analyses will record information in the
1858 loop_vec_info struct. */
1860 vect_analyze_loop_2 (loop_vec_info loop_vinfo
, bool &fatal
, unsigned *n_stmts
)
1862 opt_result ok
= opt_result::success ();
1864 unsigned int max_vf
= MAX_VECTORIZATION_FACTOR
;
1865 poly_uint64 min_vf
= 2;
1867 /* The first group of checks is independent of the vector size. */
1870 if (LOOP_VINFO_SIMD_IF_COND (loop_vinfo
)
1871 && integer_zerop (LOOP_VINFO_SIMD_IF_COND (loop_vinfo
)))
1872 return opt_result::failure_at (vect_location
,
1873 "not vectorized: simd if(0)\n");
1875 /* Find all data references in the loop (which correspond to vdefs/vuses)
1876 and analyze their evolution in the loop. */
1878 loop_p loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1880 /* Gather the data references and count stmts in the loop. */
1881 if (!LOOP_VINFO_DATAREFS (loop_vinfo
).exists ())
1884 = vect_get_datarefs_in_loop (loop
, LOOP_VINFO_BBS (loop_vinfo
),
1885 &LOOP_VINFO_DATAREFS (loop_vinfo
),
1889 if (dump_enabled_p ())
1890 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1891 "not vectorized: loop contains function "
1892 "calls or data references that cannot "
1896 loop_vinfo
->shared
->save_datarefs ();
1899 loop_vinfo
->shared
->check_datarefs ();
1901 /* Analyze the data references and also adjust the minimal
1902 vectorization factor according to the loads and stores. */
1904 ok
= vect_analyze_data_refs (loop_vinfo
, &min_vf
, &fatal
);
1907 if (dump_enabled_p ())
1908 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1909 "bad data references.\n");
1913 /* Classify all cross-iteration scalar data-flow cycles.
1914 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1915 vect_analyze_scalar_cycles (loop_vinfo
);
1917 vect_pattern_recog (loop_vinfo
);
1919 vect_fixup_scalar_cycles_with_patterns (loop_vinfo
);
1921 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1922 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1924 ok
= vect_analyze_data_ref_accesses (loop_vinfo
);
1927 if (dump_enabled_p ())
1928 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1929 "bad data access.\n");
1933 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1935 ok
= vect_mark_stmts_to_be_vectorized (loop_vinfo
, &fatal
);
1938 if (dump_enabled_p ())
1939 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1940 "unexpected pattern.\n");
1944 /* While the rest of the analysis below depends on it in some way. */
1947 /* Analyze data dependences between the data-refs in the loop
1948 and adjust the maximum vectorization factor according to
1950 FORNOW: fail at the first data dependence that we encounter. */
1952 ok
= vect_analyze_data_ref_dependences (loop_vinfo
, &max_vf
);
1955 if (dump_enabled_p ())
1956 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1957 "bad data dependence.\n");
1960 if (max_vf
!= MAX_VECTORIZATION_FACTOR
1961 && maybe_lt (max_vf
, min_vf
))
1962 return opt_result::failure_at (vect_location
, "bad data dependence.\n");
1963 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo
) = max_vf
;
1965 ok
= vect_determine_vectorization_factor (loop_vinfo
);
1968 if (dump_enabled_p ())
1969 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1970 "can't determine vectorization factor.\n");
1973 if (max_vf
!= MAX_VECTORIZATION_FACTOR
1974 && maybe_lt (max_vf
, LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
1975 return opt_result::failure_at (vect_location
, "bad data dependence.\n");
1977 /* Compute the scalar iteration cost. */
1978 vect_compute_single_scalar_iteration_cost (loop_vinfo
);
1980 poly_uint64 saved_vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1983 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1984 ok
= vect_analyze_slp (loop_vinfo
, *n_stmts
);
1988 /* If there are any SLP instances mark them as pure_slp. */
1989 bool slp
= vect_make_slp_decision (loop_vinfo
);
1992 /* Find stmts that need to be both vectorized and SLPed. */
1993 vect_detect_hybrid_slp (loop_vinfo
);
1995 /* Update the vectorization factor based on the SLP decision. */
1996 vect_update_vf_for_slp (loop_vinfo
);
1999 bool saved_can_fully_mask_p
= LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
);
2001 /* We don't expect to have to roll back to anything other than an empty
2003 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo
).is_empty ());
2005 /* This is the point where we can re-start analysis with SLP forced off. */
2008 /* Now the vectorization factor is final. */
2009 poly_uint64 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2010 gcc_assert (known_ne (vectorization_factor
, 0U));
2012 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
) && dump_enabled_p ())
2014 dump_printf_loc (MSG_NOTE
, vect_location
,
2015 "vectorization_factor = ");
2016 dump_dec (MSG_NOTE
, vectorization_factor
);
2017 dump_printf (MSG_NOTE
, ", niters = %wd\n",
2018 LOOP_VINFO_INT_NITERS (loop_vinfo
));
2021 HOST_WIDE_INT max_niter
2022 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo
));
2024 /* Analyze the alignment of the data-refs in the loop.
2025 Fail if a data reference is found that cannot be vectorized. */
2027 ok
= vect_analyze_data_refs_alignment (loop_vinfo
);
2030 if (dump_enabled_p ())
2031 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2032 "bad data alignment.\n");
2036 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
2037 It is important to call pruning after vect_analyze_data_ref_accesses,
2038 since we use grouping information gathered by interleaving analysis. */
2039 ok
= vect_prune_runtime_alias_test_list (loop_vinfo
);
2043 /* Do not invoke vect_enhance_data_refs_alignment for epilogue
2044 vectorization, since we do not want to add extra peeling or
2045 add versioning for alignment. */
2046 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
2047 /* This pass will decide on using loop versioning and/or loop peeling in
2048 order to enhance the alignment of data references in the loop. */
2049 ok
= vect_enhance_data_refs_alignment (loop_vinfo
);
2051 ok
= vect_verify_datarefs_alignment (loop_vinfo
);
2057 /* Analyze operations in the SLP instances. Note this may
2058 remove unsupported SLP instances which makes the above
2059 SLP kind detection invalid. */
2060 unsigned old_size
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).length ();
2061 vect_slp_analyze_operations (loop_vinfo
);
2062 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).length () != old_size
)
2064 ok
= opt_result::failure_at (vect_location
,
2065 "unsupported SLP instances\n");
2070 /* Dissolve SLP-only groups. */
2071 vect_dissolve_slp_only_groups (loop_vinfo
);
2073 /* Scan all the remaining operations in the loop that are not subject
2074 to SLP and make sure they are vectorizable. */
2075 ok
= vect_analyze_loop_operations (loop_vinfo
);
2078 if (dump_enabled_p ())
2079 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2080 "bad operation or unsupported loop bound.\n");
2084 /* Decide whether to use a fully-masked loop for this vectorization
2086 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
2087 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
)
2088 && vect_verify_full_masking (loop_vinfo
));
2089 if (dump_enabled_p ())
2091 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2092 dump_printf_loc (MSG_NOTE
, vect_location
,
2093 "using a fully-masked loop.\n");
2095 dump_printf_loc (MSG_NOTE
, vect_location
,
2096 "not using a fully-masked loop.\n");
2099 /* If epilog loop is required because of data accesses with gaps,
2100 one additional iteration needs to be peeled. Check if there is
2101 enough iterations for vectorization. */
2102 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2103 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2104 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2106 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2107 tree scalar_niters
= LOOP_VINFO_NITERSM1 (loop_vinfo
);
2109 if (known_lt (wi::to_widest (scalar_niters
), vf
))
2110 return opt_result::failure_at (vect_location
,
2111 "loop has no enough iterations to"
2112 " support peeling for gaps.\n");
2115 /* Check the costings of the loop make vectorizing worthwhile. */
2116 res
= vect_analyze_loop_costing (loop_vinfo
);
2119 ok
= opt_result::failure_at (vect_location
,
2120 "Loop costings may not be worthwhile.\n");
2124 return opt_result::failure_at (vect_location
,
2125 "Loop costings not worthwhile.\n");
2127 /* Decide whether we need to create an epilogue loop to handle
2128 remaining scalar iterations. */
2129 th
= LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
);
2131 unsigned HOST_WIDE_INT const_vf
;
2132 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2133 /* The main loop handles all iterations. */
2134 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = false;
2135 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2136 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) >= 0)
2138 /* Work out the (constant) number of iterations that need to be
2139 peeled for reasons other than niters. */
2140 unsigned int peel_niter
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
2141 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
2143 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo
) - peel_niter
,
2144 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
2145 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
2147 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
)
2148 /* ??? When peeling for gaps but not alignment, we could
2149 try to check whether the (variable) niters is known to be
2150 VF * N + 1. That's something of a niche case though. */
2151 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2152 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&const_vf
)
2153 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo
))
2154 < (unsigned) exact_log2 (const_vf
))
2155 /* In case of versioning, check if the maximum number of
2156 iterations is greater than th. If they are identical,
2157 the epilogue is unnecessary. */
2158 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo
)
2159 || ((unsigned HOST_WIDE_INT
) max_niter
2160 > (th
/ const_vf
) * const_vf
))))
2161 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
2163 /* If an epilogue loop is required make sure we can create one. */
2164 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2165 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
))
2167 if (dump_enabled_p ())
2168 dump_printf_loc (MSG_NOTE
, vect_location
, "epilog loop required\n");
2169 if (!vect_can_advance_ivs_p (loop_vinfo
)
2170 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo
),
2171 single_exit (LOOP_VINFO_LOOP
2174 ok
= opt_result::failure_at (vect_location
,
2175 "not vectorized: can't create required "
2181 /* During peeling, we need to check if number of loop iterations is
2182 enough for both peeled prolog loop and vector loop. This check
2183 can be merged along with threshold check of loop versioning, so
2184 increase threshold for this case if necessary. */
2185 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
2187 poly_uint64 niters_th
= 0;
2189 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo
))
2191 /* Niters for peeled prolog loop. */
2192 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
2194 dr_vec_info
*dr_info
= LOOP_VINFO_UNALIGNED_DR (loop_vinfo
);
2195 tree vectype
= STMT_VINFO_VECTYPE (dr_info
->stmt
);
2196 niters_th
+= TYPE_VECTOR_SUBPARTS (vectype
) - 1;
2199 niters_th
+= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
2202 /* Niters for at least one iteration of vectorized loop. */
2203 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2204 niters_th
+= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2205 /* One additional iteration because of peeling for gap. */
2206 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
2208 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
) = niters_th
;
2211 gcc_assert (known_eq (vectorization_factor
,
2212 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)));
2214 /* Ok to vectorize! */
2215 return opt_result::success ();
2218 /* Ensure that "ok" is false (with an opt_problem if dumping is enabled). */
2221 /* Try again with SLP forced off but if we didn't do any SLP there is
2222 no point in re-trying. */
2226 /* If there are reduction chains re-trying will fail anyway. */
2227 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).is_empty ())
2230 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2231 via interleaving or lane instructions. */
2232 slp_instance instance
;
2235 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), i
, instance
)
2237 stmt_vec_info vinfo
;
2238 vinfo
= SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance
))[0];
2239 if (! STMT_VINFO_GROUPED_ACCESS (vinfo
))
2241 vinfo
= DR_GROUP_FIRST_ELEMENT (vinfo
);
2242 unsigned int size
= DR_GROUP_SIZE (vinfo
);
2243 tree vectype
= STMT_VINFO_VECTYPE (vinfo
);
2244 if (! vect_store_lanes_supported (vectype
, size
, false)
2245 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype
), 1U)
2246 && ! vect_grouped_store_supported (vectype
, size
))
2247 return opt_result::failure_at (vinfo
->stmt
,
2248 "unsupported grouped store\n");
2249 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), j
, node
)
2251 vinfo
= SLP_TREE_SCALAR_STMTS (node
)[0];
2252 vinfo
= DR_GROUP_FIRST_ELEMENT (vinfo
);
2253 bool single_element_p
= !DR_GROUP_NEXT_ELEMENT (vinfo
);
2254 size
= DR_GROUP_SIZE (vinfo
);
2255 vectype
= STMT_VINFO_VECTYPE (vinfo
);
2256 if (! vect_load_lanes_supported (vectype
, size
, false)
2257 && ! vect_grouped_load_supported (vectype
, single_element_p
,
2259 return opt_result::failure_at (vinfo
->stmt
,
2260 "unsupported grouped load\n");
2264 if (dump_enabled_p ())
2265 dump_printf_loc (MSG_NOTE
, vect_location
,
2266 "re-trying with SLP disabled\n");
2268 /* Roll back state appropriately. No SLP this time. */
2270 /* Restore vectorization factor as it were without SLP. */
2271 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = saved_vectorization_factor
;
2272 /* Free the SLP instances. */
2273 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), j
, instance
)
2274 vect_free_slp_instance (instance
, false);
2275 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
2276 /* Reset SLP type to loop_vect on all stmts. */
2277 for (i
= 0; i
< LOOP_VINFO_LOOP (loop_vinfo
)->num_nodes
; ++i
)
2279 basic_block bb
= LOOP_VINFO_BBS (loop_vinfo
)[i
];
2280 for (gimple_stmt_iterator si
= gsi_start_phis (bb
);
2281 !gsi_end_p (si
); gsi_next (&si
))
2283 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
2284 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2286 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
2287 !gsi_end_p (si
); gsi_next (&si
))
2289 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
2290 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2291 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
2293 gimple
*pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
2294 stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
2295 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2296 for (gimple_stmt_iterator pi
= gsi_start (pattern_def_seq
);
2297 !gsi_end_p (pi
); gsi_next (&pi
))
2298 STMT_SLP_TYPE (loop_vinfo
->lookup_stmt (gsi_stmt (pi
)))
2303 /* Free optimized alias test DDRS. */
2304 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
).truncate (0);
2305 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).release ();
2306 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo
).release ();
2307 /* Reset target cost data. */
2308 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
));
2309 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
)
2310 = init_cost (LOOP_VINFO_LOOP (loop_vinfo
));
2311 /* Reset accumulated rgroup information. */
2312 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo
));
2313 /* Reset assorted flags. */
2314 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = false;
2315 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = false;
2316 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = 0;
2317 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
) = 0;
2318 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = saved_can_fully_mask_p
;
2323 /* Function vect_analyze_loop.
2325 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2326 for it. The different analyses will record information in the
2327 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2330 vect_analyze_loop (struct loop
*loop
, loop_vec_info orig_loop_vinfo
,
2331 vec_info_shared
*shared
)
2333 auto_vector_sizes vector_sizes
;
2335 /* Autodetect first vector size we try. */
2336 current_vector_size
= 0;
2337 targetm
.vectorize
.autovectorize_vector_sizes (&vector_sizes
,
2338 loop
->simdlen
!= 0);
2339 unsigned int next_size
= 0;
2341 DUMP_VECT_SCOPE ("analyze_loop_nest");
2343 if (loop_outer (loop
)
2344 && loop_vec_info_for_loop (loop_outer (loop
))
2345 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop
))))
2346 return opt_loop_vec_info::failure_at (vect_location
,
2347 "outer-loop already vectorized.\n");
2349 if (!find_loop_nest (loop
, &shared
->loop_nest
))
2350 return opt_loop_vec_info::failure_at
2352 "not vectorized: loop nest containing two or more consecutive inner"
2353 " loops cannot be vectorized\n");
2355 unsigned n_stmts
= 0;
2356 poly_uint64 autodetected_vector_size
= 0;
2357 opt_loop_vec_info first_loop_vinfo
= opt_loop_vec_info::success (NULL
);
2358 poly_uint64 first_vector_size
= 0;
2361 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2362 opt_loop_vec_info loop_vinfo
2363 = vect_analyze_loop_form (loop
, shared
);
2366 if (dump_enabled_p ())
2367 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2368 "bad loop form.\n");
2369 gcc_checking_assert (first_loop_vinfo
== NULL
);
2375 if (orig_loop_vinfo
)
2376 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo
) = orig_loop_vinfo
;
2378 opt_result res
= vect_analyze_loop_2 (loop_vinfo
, fatal
, &n_stmts
);
2381 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
) = 1;
2384 && maybe_ne (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
2385 (unsigned HOST_WIDE_INT
) loop
->simdlen
))
2387 if (first_loop_vinfo
== NULL
)
2389 first_loop_vinfo
= loop_vinfo
;
2390 first_vector_size
= current_vector_size
;
2398 delete first_loop_vinfo
;
2406 autodetected_vector_size
= current_vector_size
;
2408 if (next_size
< vector_sizes
.length ()
2409 && known_eq (vector_sizes
[next_size
], autodetected_vector_size
))
2414 gcc_checking_assert (first_loop_vinfo
== NULL
);
2415 return opt_loop_vec_info::propagate_failure (res
);
2418 if (next_size
== vector_sizes
.length ()
2419 || known_eq (current_vector_size
, 0U))
2421 if (first_loop_vinfo
)
2423 current_vector_size
= first_vector_size
;
2424 loop
->aux
= (loop_vec_info
) first_loop_vinfo
;
2425 if (dump_enabled_p ())
2427 dump_printf_loc (MSG_NOTE
, vect_location
,
2428 "***** Choosing vector size ");
2429 dump_dec (MSG_NOTE
, current_vector_size
);
2430 dump_printf (MSG_NOTE
, "\n");
2432 return first_loop_vinfo
;
2435 return opt_loop_vec_info::propagate_failure (res
);
2438 /* Try the next biggest vector size. */
2439 current_vector_size
= vector_sizes
[next_size
++];
2440 if (dump_enabled_p ())
2442 dump_printf_loc (MSG_NOTE
, vect_location
,
2443 "***** Re-trying analysis with "
2445 dump_dec (MSG_NOTE
, current_vector_size
);
2446 dump_printf (MSG_NOTE
, "\n");
2451 /* Return true if there is an in-order reduction function for CODE, storing
2452 it in *REDUC_FN if so. */
2455 fold_left_reduction_fn (tree_code code
, internal_fn
*reduc_fn
)
2460 *reduc_fn
= IFN_FOLD_LEFT_PLUS
;
2468 /* Function reduction_fn_for_scalar_code
2471 CODE - tree_code of a reduction operations.
2474 REDUC_FN - the corresponding internal function to be used to reduce the
2475 vector of partial results into a single scalar result, or IFN_LAST
2476 if the operation is a supported reduction operation, but does not have
2477 such an internal function.
2479 Return FALSE if CODE currently cannot be vectorized as reduction. */
2482 reduction_fn_for_scalar_code (enum tree_code code
, internal_fn
*reduc_fn
)
2487 *reduc_fn
= IFN_REDUC_MAX
;
2491 *reduc_fn
= IFN_REDUC_MIN
;
2495 *reduc_fn
= IFN_REDUC_PLUS
;
2499 *reduc_fn
= IFN_REDUC_AND
;
2503 *reduc_fn
= IFN_REDUC_IOR
;
2507 *reduc_fn
= IFN_REDUC_XOR
;
2512 *reduc_fn
= IFN_LAST
;
2520 /* If there is a neutral value X such that SLP reduction NODE would not
2521 be affected by the introduction of additional X elements, return that X,
2522 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2523 is true if the SLP statements perform a single reduction, false if each
2524 statement performs an independent reduction. */
2527 neutral_op_for_slp_reduction (slp_tree slp_node
, tree_code code
,
2530 vec
<stmt_vec_info
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2531 stmt_vec_info stmt_vinfo
= stmts
[0];
2532 tree vector_type
= STMT_VINFO_VECTYPE (stmt_vinfo
);
2533 tree scalar_type
= TREE_TYPE (vector_type
);
2534 struct loop
*loop
= gimple_bb (stmt_vinfo
->stmt
)->loop_father
;
2539 case WIDEN_SUM_EXPR
:
2546 return build_zero_cst (scalar_type
);
2549 return build_one_cst (scalar_type
);
2552 return build_all_ones_cst (scalar_type
);
2556 /* For MIN/MAX the initial values are neutral. A reduction chain
2557 has only a single initial value, so that value is neutral for
2560 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo
->stmt
,
2561 loop_preheader_edge (loop
));
2569 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2570 STMT is printed with a message MSG. */
2573 report_vect_op (dump_flags_t msg_type
, gimple
*stmt
, const char *msg
)
2575 dump_printf_loc (msg_type
, vect_location
, "%s%G", msg
, stmt
);
2578 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2579 operation. Return true if the results of DEF_STMT_INFO are something
2580 that can be accumulated by such a reduction. */
2583 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info
)
2585 return (is_gimple_assign (def_stmt_info
->stmt
)
2586 || is_gimple_call (def_stmt_info
->stmt
)
2587 || STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_induction_def
2588 || (gimple_code (def_stmt_info
->stmt
) == GIMPLE_PHI
2589 && STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_internal_def
2590 && !is_loop_header_bb_p (gimple_bb (def_stmt_info
->stmt
))));
2593 /* Detect SLP reduction of the form:
2603 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2604 FIRST_STMT is the first reduction stmt in the chain
2605 (a2 = operation (a1)).
2607 Return TRUE if a reduction chain was detected. */
2610 vect_is_slp_reduction (loop_vec_info loop_info
, gimple
*phi
,
2613 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2614 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2615 enum tree_code code
;
2616 gimple
*loop_use_stmt
= NULL
;
2617 stmt_vec_info use_stmt_info
;
2619 imm_use_iterator imm_iter
;
2620 use_operand_p use_p
;
2621 int nloop_uses
, size
= 0, n_out_of_loop_uses
;
2624 if (loop
!= vect_loop
)
2627 auto_vec
<stmt_vec_info
, 8> reduc_chain
;
2628 lhs
= PHI_RESULT (phi
);
2629 code
= gimple_assign_rhs_code (first_stmt
);
2633 n_out_of_loop_uses
= 0;
2634 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2636 gimple
*use_stmt
= USE_STMT (use_p
);
2637 if (is_gimple_debug (use_stmt
))
2640 /* Check if we got back to the reduction phi. */
2641 if (use_stmt
== phi
)
2643 loop_use_stmt
= use_stmt
;
2648 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2650 loop_use_stmt
= use_stmt
;
2654 n_out_of_loop_uses
++;
2656 /* There are can be either a single use in the loop or two uses in
2658 if (nloop_uses
> 1 || (n_out_of_loop_uses
&& nloop_uses
))
2665 /* We reached a statement with no loop uses. */
2666 if (nloop_uses
== 0)
2669 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2670 if (gimple_code (loop_use_stmt
) == GIMPLE_PHI
)
2673 if (!is_gimple_assign (loop_use_stmt
)
2674 || code
!= gimple_assign_rhs_code (loop_use_stmt
)
2675 || !flow_bb_inside_loop_p (loop
, gimple_bb (loop_use_stmt
)))
2678 /* Insert USE_STMT into reduction chain. */
2679 use_stmt_info
= loop_info
->lookup_stmt (loop_use_stmt
);
2680 reduc_chain
.safe_push (use_stmt_info
);
2682 lhs
= gimple_assign_lhs (loop_use_stmt
);
2686 if (!found
|| loop_use_stmt
!= phi
|| size
< 2)
2689 /* Swap the operands, if needed, to make the reduction operand be the second
2691 lhs
= PHI_RESULT (phi
);
2692 for (unsigned i
= 0; i
< reduc_chain
.length (); ++i
)
2694 gassign
*next_stmt
= as_a
<gassign
*> (reduc_chain
[i
]->stmt
);
2695 if (gimple_assign_rhs2 (next_stmt
) == lhs
)
2697 tree op
= gimple_assign_rhs1 (next_stmt
);
2698 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (op
);
2700 /* Check that the other def is either defined in the loop
2701 ("vect_internal_def"), or it's an induction (defined by a
2702 loop-header phi-node). */
2704 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
))
2705 && vect_valid_reduction_input_p (def_stmt_info
))
2707 lhs
= gimple_assign_lhs (next_stmt
);
2715 tree op
= gimple_assign_rhs2 (next_stmt
);
2716 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (op
);
2718 /* Check that the other def is either defined in the loop
2719 ("vect_internal_def"), or it's an induction (defined by a
2720 loop-header phi-node). */
2722 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
))
2723 && vect_valid_reduction_input_p (def_stmt_info
))
2725 if (dump_enabled_p ())
2726 dump_printf_loc (MSG_NOTE
, vect_location
, "swapping oprnds: %G",
2729 swap_ssa_operands (next_stmt
,
2730 gimple_assign_rhs1_ptr (next_stmt
),
2731 gimple_assign_rhs2_ptr (next_stmt
));
2732 update_stmt (next_stmt
);
2734 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt
)))
2735 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
2741 lhs
= gimple_assign_lhs (next_stmt
);
2744 /* Build up the actual chain. */
2745 for (unsigned i
= 0; i
< reduc_chain
.length () - 1; ++i
)
2747 REDUC_GROUP_FIRST_ELEMENT (reduc_chain
[i
]) = reduc_chain
[0];
2748 REDUC_GROUP_NEXT_ELEMENT (reduc_chain
[i
]) = reduc_chain
[i
+1];
2750 REDUC_GROUP_FIRST_ELEMENT (reduc_chain
.last ()) = reduc_chain
[0];
2751 REDUC_GROUP_NEXT_ELEMENT (reduc_chain
.last ()) = NULL
;
2753 /* Save the chain for further analysis in SLP detection. */
2754 LOOP_VINFO_REDUCTION_CHAINS (loop_info
).safe_push (reduc_chain
[0]);
2755 REDUC_GROUP_SIZE (reduc_chain
[0]) = size
;
2760 /* Return true if we need an in-order reduction for operation CODE
2761 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2762 overflow must wrap. */
2765 needs_fold_left_reduction_p (tree type
, tree_code code
,
2766 bool need_wrapping_integral_overflow
)
2768 /* CHECKME: check for !flag_finite_math_only too? */
2769 if (SCALAR_FLOAT_TYPE_P (type
))
2777 return !flag_associative_math
;
2780 if (INTEGRAL_TYPE_P (type
))
2782 if (!operation_no_trapping_overflow (type
, code
))
2784 if (need_wrapping_integral_overflow
2785 && !TYPE_OVERFLOW_WRAPS (type
)
2786 && operation_can_overflow (code
))
2791 if (SAT_FIXED_POINT_TYPE_P (type
))
2797 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2798 reduction operation CODE has a handled computation expression. */
2801 check_reduction_path (dump_user_location_t loc
, loop_p loop
, gphi
*phi
,
2802 tree loop_arg
, enum tree_code code
)
2804 auto_vec
<std::pair
<ssa_op_iter
, use_operand_p
> > path
;
2805 auto_bitmap visited
;
2806 tree lookfor
= PHI_RESULT (phi
);
2808 use_operand_p curr
= op_iter_init_phiuse (&curri
, phi
, SSA_OP_USE
);
2809 while (USE_FROM_PTR (curr
) != loop_arg
)
2810 curr
= op_iter_next_use (&curri
);
2811 curri
.i
= curri
.numops
;
2814 path
.safe_push (std::make_pair (curri
, curr
));
2815 tree use
= USE_FROM_PTR (curr
);
2818 gimple
*def
= SSA_NAME_DEF_STMT (use
);
2819 if (gimple_nop_p (def
)
2820 || ! flow_bb_inside_loop_p (loop
, gimple_bb (def
)))
2825 std::pair
<ssa_op_iter
, use_operand_p
> x
= path
.pop ();
2829 curr
= op_iter_next_use (&curri
);
2830 /* Skip already visited or non-SSA operands (from iterating
2832 while (curr
!= NULL_USE_OPERAND_P
2833 && (TREE_CODE (USE_FROM_PTR (curr
)) != SSA_NAME
2834 || ! bitmap_set_bit (visited
,
2836 (USE_FROM_PTR (curr
)))));
2838 while (curr
== NULL_USE_OPERAND_P
&& ! path
.is_empty ());
2839 if (curr
== NULL_USE_OPERAND_P
)
2844 if (gimple_code (def
) == GIMPLE_PHI
)
2845 curr
= op_iter_init_phiuse (&curri
, as_a
<gphi
*>(def
), SSA_OP_USE
);
2847 curr
= op_iter_init_use (&curri
, def
, SSA_OP_USE
);
2848 while (curr
!= NULL_USE_OPERAND_P
2849 && (TREE_CODE (USE_FROM_PTR (curr
)) != SSA_NAME
2850 || ! bitmap_set_bit (visited
,
2852 (USE_FROM_PTR (curr
)))))
2853 curr
= op_iter_next_use (&curri
);
2854 if (curr
== NULL_USE_OPERAND_P
)
2859 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2861 dump_printf_loc (MSG_NOTE
, loc
, "reduction path: ");
2863 std::pair
<ssa_op_iter
, use_operand_p
> *x
;
2864 FOR_EACH_VEC_ELT (path
, i
, x
)
2865 dump_printf (MSG_NOTE
, "%T ", USE_FROM_PTR (x
->second
));
2866 dump_printf (MSG_NOTE
, "\n");
2869 /* Check whether the reduction path detected is valid. */
2870 bool fail
= path
.length () == 0;
2872 for (unsigned i
= 1; i
< path
.length (); ++i
)
2874 gimple
*use_stmt
= USE_STMT (path
[i
].second
);
2875 tree op
= USE_FROM_PTR (path
[i
].second
);
2876 if (! has_single_use (op
)
2877 || ! is_gimple_assign (use_stmt
))
2882 if (gimple_assign_rhs_code (use_stmt
) != code
)
2884 if (code
== PLUS_EXPR
2885 && gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2887 /* Track whether we negate the reduction value each iteration. */
2888 if (gimple_assign_rhs2 (use_stmt
) == op
)
2898 return ! fail
&& ! neg
;
2902 /* Function vect_is_simple_reduction
2904 (1) Detect a cross-iteration def-use cycle that represents a simple
2905 reduction computation. We look for the following pattern:
2910 a2 = operation (a3, a1)
2917 a2 = operation (a3, a1)
2920 1. operation is commutative and associative and it is safe to
2921 change the order of the computation
2922 2. no uses for a2 in the loop (a2 is used out of the loop)
2923 3. no uses of a1 in the loop besides the reduction operation
2924 4. no uses of a1 outside the loop.
2926 Conditions 1,4 are tested here.
2927 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2929 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2932 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2936 inner loop (def of a3)
2939 (4) Detect condition expressions, ie:
2940 for (int i = 0; i < N; i++)
2946 static stmt_vec_info
2947 vect_is_simple_reduction (loop_vec_info loop_info
, stmt_vec_info phi_info
,
2949 bool need_wrapping_integral_overflow
,
2950 enum vect_reduction_type
*v_reduc_type
)
2952 gphi
*phi
= as_a
<gphi
*> (phi_info
->stmt
);
2953 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2954 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2955 bool nested_in_vect_loop
= flow_loop_nested_p (vect_loop
, loop
);
2956 gimple
*phi_use_stmt
= NULL
;
2957 enum tree_code orig_code
, code
;
2958 tree op1
, op2
, op3
= NULL_TREE
, op4
= NULL_TREE
;
2961 imm_use_iterator imm_iter
;
2962 use_operand_p use_p
;
2965 *double_reduc
= false;
2966 *v_reduc_type
= TREE_CODE_REDUCTION
;
2968 tree phi_name
= PHI_RESULT (phi
);
2969 /* ??? If there are no uses of the PHI result the inner loop reduction
2970 won't be detected as possibly double-reduction by vectorizable_reduction
2971 because that tries to walk the PHI arg from the preheader edge which
2972 can be constant. See PR60382. */
2973 if (has_zero_uses (phi_name
))
2975 unsigned nphi_def_loop_uses
= 0;
2976 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, phi_name
)
2978 gimple
*use_stmt
= USE_STMT (use_p
);
2979 if (is_gimple_debug (use_stmt
))
2982 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2984 if (dump_enabled_p ())
2985 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2986 "intermediate value used outside loop.\n");
2991 nphi_def_loop_uses
++;
2992 phi_use_stmt
= use_stmt
;
2995 edge latch_e
= loop_latch_edge (loop
);
2996 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
2997 if (TREE_CODE (loop_arg
) != SSA_NAME
)
2999 if (dump_enabled_p ())
3000 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3001 "reduction: not ssa_name: %T\n", loop_arg
);
3005 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (loop_arg
);
3007 || !flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
)))
3010 if (gassign
*def_stmt
= dyn_cast
<gassign
*> (def_stmt_info
->stmt
))
3012 name
= gimple_assign_lhs (def_stmt
);
3015 else if (gphi
*def_stmt
= dyn_cast
<gphi
*> (def_stmt_info
->stmt
))
3017 name
= PHI_RESULT (def_stmt
);
3022 if (dump_enabled_p ())
3023 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3024 "reduction: unhandled reduction operation: %G",
3025 def_stmt_info
->stmt
);
3029 unsigned nlatch_def_loop_uses
= 0;
3030 auto_vec
<gphi
*, 3> lcphis
;
3031 bool inner_loop_of_double_reduc
= false;
3032 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
3034 gimple
*use_stmt
= USE_STMT (use_p
);
3035 if (is_gimple_debug (use_stmt
))
3037 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
3038 nlatch_def_loop_uses
++;
3041 /* We can have more than one loop-closed PHI. */
3042 lcphis
.safe_push (as_a
<gphi
*> (use_stmt
));
3043 if (nested_in_vect_loop
3044 && (STMT_VINFO_DEF_TYPE (loop_info
->lookup_stmt (use_stmt
))
3045 == vect_double_reduction_def
))
3046 inner_loop_of_double_reduc
= true;
3050 /* If this isn't a nested cycle or if the nested cycle reduction value
3051 is used ouside of the inner loop we cannot handle uses of the reduction
3053 if ((!nested_in_vect_loop
|| inner_loop_of_double_reduc
)
3054 && (nlatch_def_loop_uses
> 1 || nphi_def_loop_uses
> 1))
3056 if (dump_enabled_p ())
3057 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3058 "reduction used in loop.\n");
3062 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
3063 defined in the inner loop. */
3066 gphi
*def_stmt
= as_a
<gphi
*> (def_stmt_info
->stmt
);
3067 op1
= PHI_ARG_DEF (def_stmt
, 0);
3069 if (gimple_phi_num_args (def_stmt
) != 1
3070 || TREE_CODE (op1
) != SSA_NAME
)
3072 if (dump_enabled_p ())
3073 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3074 "unsupported phi node definition.\n");
3079 gimple
*def1
= SSA_NAME_DEF_STMT (op1
);
3080 if (gimple_bb (def1
)
3081 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
3083 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (def1
))
3084 && is_gimple_assign (def1
)
3085 && is_a
<gphi
*> (phi_use_stmt
)
3086 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (phi_use_stmt
)))
3088 if (dump_enabled_p ())
3089 report_vect_op (MSG_NOTE
, def_stmt
,
3090 "detected double reduction: ");
3092 *double_reduc
= true;
3093 return def_stmt_info
;
3099 /* If we are vectorizing an inner reduction we are executing that
3100 in the original order only in case we are not dealing with a
3101 double reduction. */
3102 bool check_reduction
= true;
3103 if (flow_loop_nested_p (vect_loop
, loop
))
3107 check_reduction
= false;
3108 FOR_EACH_VEC_ELT (lcphis
, i
, lcphi
)
3109 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_phi_result (lcphi
))
3111 gimple
*use_stmt
= USE_STMT (use_p
);
3112 if (is_gimple_debug (use_stmt
))
3114 if (! flow_bb_inside_loop_p (vect_loop
, gimple_bb (use_stmt
)))
3115 check_reduction
= true;
3119 gassign
*def_stmt
= as_a
<gassign
*> (def_stmt_info
->stmt
);
3120 code
= orig_code
= gimple_assign_rhs_code (def_stmt
);
3122 if (nested_in_vect_loop
&& !check_reduction
)
3124 /* FIXME: Even for non-reductions code generation is funneled
3125 through vectorizable_reduction for the stmt defining the
3126 PHI latch value. So we have to artificially restrict ourselves
3127 for the supported operations. */
3128 switch (get_gimple_rhs_class (code
))
3130 case GIMPLE_BINARY_RHS
:
3131 case GIMPLE_TERNARY_RHS
:
3134 /* Not supported by vectorizable_reduction. */
3135 if (dump_enabled_p ())
3136 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3137 "nested cycle: not handled operation: ");
3140 if (dump_enabled_p ())
3141 report_vect_op (MSG_NOTE
, def_stmt
, "detected nested cycle: ");
3142 return def_stmt_info
;
3145 /* We can handle "res -= x[i]", which is non-associative by
3146 simply rewriting this into "res += -x[i]". Avoid changing
3147 gimple instruction for the first simple tests and only do this
3148 if we're allowed to change code at all. */
3149 if (code
== MINUS_EXPR
&& gimple_assign_rhs2 (def_stmt
) != phi_name
)
3152 if (code
== COND_EXPR
)
3154 if (! nested_in_vect_loop
)
3155 *v_reduc_type
= COND_REDUCTION
;
3157 op3
= gimple_assign_rhs1 (def_stmt
);
3158 if (COMPARISON_CLASS_P (op3
))
3160 op4
= TREE_OPERAND (op3
, 1);
3161 op3
= TREE_OPERAND (op3
, 0);
3163 if (op3
== phi_name
|| op4
== phi_name
)
3165 if (dump_enabled_p ())
3166 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3167 "reduction: condition depends on previous"
3172 op1
= gimple_assign_rhs2 (def_stmt
);
3173 op2
= gimple_assign_rhs3 (def_stmt
);
3175 else if (!commutative_tree_code (code
) || !associative_tree_code (code
))
3177 if (dump_enabled_p ())
3178 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3179 "reduction: not commutative/associative: ");
3182 else if (get_gimple_rhs_class (code
) == GIMPLE_BINARY_RHS
)
3184 op1
= gimple_assign_rhs1 (def_stmt
);
3185 op2
= gimple_assign_rhs2 (def_stmt
);
3189 if (dump_enabled_p ())
3190 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3191 "reduction: not handled operation: ");
3195 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
3197 if (dump_enabled_p ())
3198 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3199 "reduction: both uses not ssa_names: ");
3204 type
= TREE_TYPE (gimple_assign_lhs (def_stmt
));
3205 if ((TREE_CODE (op1
) == SSA_NAME
3206 && !types_compatible_p (type
,TREE_TYPE (op1
)))
3207 || (TREE_CODE (op2
) == SSA_NAME
3208 && !types_compatible_p (type
, TREE_TYPE (op2
)))
3209 || (op3
&& TREE_CODE (op3
) == SSA_NAME
3210 && !types_compatible_p (type
, TREE_TYPE (op3
)))
3211 || (op4
&& TREE_CODE (op4
) == SSA_NAME
3212 && !types_compatible_p (type
, TREE_TYPE (op4
))))
3214 if (dump_enabled_p ())
3216 dump_printf_loc (MSG_NOTE
, vect_location
,
3217 "reduction: multiple types: operation type: "
3218 "%T, operands types: %T,%T",
3219 type
, TREE_TYPE (op1
), TREE_TYPE (op2
));
3221 dump_printf (MSG_NOTE
, ",%T", TREE_TYPE (op3
));
3224 dump_printf (MSG_NOTE
, ",%T", TREE_TYPE (op4
));
3225 dump_printf (MSG_NOTE
, "\n");
3231 /* Check whether it's ok to change the order of the computation.
3232 Generally, when vectorizing a reduction we change the order of the
3233 computation. This may change the behavior of the program in some
3234 cases, so we need to check that this is ok. One exception is when
3235 vectorizing an outer-loop: the inner-loop is executed sequentially,
3236 and therefore vectorizing reductions in the inner-loop during
3237 outer-loop vectorization is safe. */
3239 && *v_reduc_type
== TREE_CODE_REDUCTION
3240 && needs_fold_left_reduction_p (type
, code
,
3241 need_wrapping_integral_overflow
))
3242 *v_reduc_type
= FOLD_LEFT_REDUCTION
;
3244 /* Reduction is safe. We're dealing with one of the following:
3245 1) integer arithmetic and no trapv
3246 2) floating point arithmetic, and special flags permit this optimization
3247 3) nested cycle (i.e., outer loop vectorization). */
3248 stmt_vec_info def1_info
= loop_info
->lookup_def (op1
);
3249 stmt_vec_info def2_info
= loop_info
->lookup_def (op2
);
3250 if (code
!= COND_EXPR
&& !def1_info
&& !def2_info
)
3252 if (dump_enabled_p ())
3253 report_vect_op (MSG_NOTE
, def_stmt
, "reduction: no defs for operands: ");
3257 /* Check that one def is the reduction def, defined by PHI,
3258 the other def is either defined in the loop ("vect_internal_def"),
3259 or it's an induction (defined by a loop-header phi-node). */
3262 && def2_info
->stmt
== phi
3263 && (code
== COND_EXPR
3265 || !flow_bb_inside_loop_p (loop
, gimple_bb (def1_info
->stmt
))
3266 || vect_valid_reduction_input_p (def1_info
)))
3268 if (dump_enabled_p ())
3269 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
3270 return def_stmt_info
;
3274 && def1_info
->stmt
== phi
3275 && (code
== COND_EXPR
3277 || !flow_bb_inside_loop_p (loop
, gimple_bb (def2_info
->stmt
))
3278 || vect_valid_reduction_input_p (def2_info
)))
3280 if (! nested_in_vect_loop
&& orig_code
!= MINUS_EXPR
)
3282 /* Check if we can swap operands (just for simplicity - so that
3283 the rest of the code can assume that the reduction variable
3284 is always the last (second) argument). */
3285 if (code
== COND_EXPR
)
3287 /* Swap cond_expr by inverting the condition. */
3288 tree cond_expr
= gimple_assign_rhs1 (def_stmt
);
3289 enum tree_code invert_code
= ERROR_MARK
;
3290 enum tree_code cond_code
= TREE_CODE (cond_expr
);
3292 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
3294 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
, 0));
3295 invert_code
= invert_tree_comparison (cond_code
, honor_nans
);
3297 if (invert_code
!= ERROR_MARK
)
3299 TREE_SET_CODE (cond_expr
, invert_code
);
3300 swap_ssa_operands (def_stmt
,
3301 gimple_assign_rhs2_ptr (def_stmt
),
3302 gimple_assign_rhs3_ptr (def_stmt
));
3306 if (dump_enabled_p ())
3307 report_vect_op (MSG_NOTE
, def_stmt
,
3308 "detected reduction: cannot swap operands "
3314 swap_ssa_operands (def_stmt
, gimple_assign_rhs1_ptr (def_stmt
),
3315 gimple_assign_rhs2_ptr (def_stmt
));
3317 if (dump_enabled_p ())
3318 report_vect_op (MSG_NOTE
, def_stmt
,
3319 "detected reduction: need to swap operands: ");
3321 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt
)))
3322 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
3326 if (dump_enabled_p ())
3327 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
3330 return def_stmt_info
;
3333 /* Try to find SLP reduction chain. */
3334 if (! nested_in_vect_loop
3335 && code
!= COND_EXPR
3336 && orig_code
!= MINUS_EXPR
3337 && vect_is_slp_reduction (loop_info
, phi
, def_stmt
))
3339 if (dump_enabled_p ())
3340 report_vect_op (MSG_NOTE
, def_stmt
,
3341 "reduction: detected reduction chain: ");
3343 return def_stmt_info
;
3346 /* Look for the expression computing loop_arg from loop PHI result. */
3347 if (check_reduction_path (vect_location
, loop
, phi
, loop_arg
, code
))
3348 return def_stmt_info
;
3350 if (dump_enabled_p ())
3352 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3353 "reduction: unknown pattern: ");
3359 /* Wrapper around vect_is_simple_reduction, which will modify code
3360 in-place if it enables detection of more reductions. Arguments
3364 vect_force_simple_reduction (loop_vec_info loop_info
, stmt_vec_info phi_info
,
3366 bool need_wrapping_integral_overflow
)
3368 enum vect_reduction_type v_reduc_type
;
3369 stmt_vec_info def_info
3370 = vect_is_simple_reduction (loop_info
, phi_info
, double_reduc
,
3371 need_wrapping_integral_overflow
,
3375 STMT_VINFO_REDUC_TYPE (phi_info
) = v_reduc_type
;
3376 STMT_VINFO_REDUC_DEF (phi_info
) = def_info
;
3377 STMT_VINFO_REDUC_TYPE (def_info
) = v_reduc_type
;
3378 STMT_VINFO_REDUC_DEF (def_info
) = phi_info
;
3383 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3385 vect_get_known_peeling_cost (loop_vec_info loop_vinfo
, int peel_iters_prologue
,
3386 int *peel_iters_epilogue
,
3387 stmt_vector_for_cost
*scalar_cost_vec
,
3388 stmt_vector_for_cost
*prologue_cost_vec
,
3389 stmt_vector_for_cost
*epilogue_cost_vec
)
3392 int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
3394 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
3396 *peel_iters_epilogue
= assumed_vf
/ 2;
3397 if (dump_enabled_p ())
3398 dump_printf_loc (MSG_NOTE
, vect_location
,
3399 "cost model: epilogue peel iters set to vf/2 "
3400 "because loop iterations are unknown .\n");
3402 /* If peeled iterations are known but number of scalar loop
3403 iterations are unknown, count a taken branch per peeled loop. */
3404 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
3405 NULL
, 0, vect_prologue
);
3406 retval
+= record_stmt_cost (epilogue_cost_vec
, 1, cond_branch_taken
,
3407 NULL
, 0, vect_epilogue
);
3411 int niters
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
3412 peel_iters_prologue
= niters
< peel_iters_prologue
?
3413 niters
: peel_iters_prologue
;
3414 *peel_iters_epilogue
= (niters
- peel_iters_prologue
) % assumed_vf
;
3415 /* If we need to peel for gaps, but no peeling is required, we have to
3416 peel VF iterations. */
3417 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) && !*peel_iters_epilogue
)
3418 *peel_iters_epilogue
= assumed_vf
;
3421 stmt_info_for_cost
*si
;
3423 if (peel_iters_prologue
)
3424 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
3425 retval
+= record_stmt_cost (prologue_cost_vec
,
3426 si
->count
* peel_iters_prologue
,
3427 si
->kind
, si
->stmt_info
, si
->misalign
,
3429 if (*peel_iters_epilogue
)
3430 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
3431 retval
+= record_stmt_cost (epilogue_cost_vec
,
3432 si
->count
* *peel_iters_epilogue
,
3433 si
->kind
, si
->stmt_info
, si
->misalign
,
3439 /* Function vect_estimate_min_profitable_iters
3441 Return the number of iterations required for the vector version of the
3442 loop to be profitable relative to the cost of the scalar version of the
3445 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3446 of iterations for vectorization. -1 value means loop vectorization
3447 is not profitable. This returned value may be used for dynamic
3448 profitability check.
3450 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3451 for static check against estimated number of iterations. */
3454 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo
,
3455 int *ret_min_profitable_niters
,
3456 int *ret_min_profitable_estimate
)
3458 int min_profitable_iters
;
3459 int min_profitable_estimate
;
3460 int peel_iters_prologue
;
3461 int peel_iters_epilogue
;
3462 unsigned vec_inside_cost
= 0;
3463 int vec_outside_cost
= 0;
3464 unsigned vec_prologue_cost
= 0;
3465 unsigned vec_epilogue_cost
= 0;
3466 int scalar_single_iter_cost
= 0;
3467 int scalar_outside_cost
= 0;
3468 int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
3469 int npeel
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
3470 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3472 /* Cost model disabled. */
3473 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
3475 if (dump_enabled_p ())
3476 dump_printf_loc (MSG_NOTE
, vect_location
, "cost model disabled.\n");
3477 *ret_min_profitable_niters
= 0;
3478 *ret_min_profitable_estimate
= 0;
3482 /* Requires loop versioning tests to handle misalignment. */
3483 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
3485 /* FIXME: Make cost depend on complexity of individual check. */
3486 unsigned len
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).length ();
3487 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
3489 if (dump_enabled_p ())
3490 dump_printf (MSG_NOTE
,
3491 "cost model: Adding cost of checks for loop "
3492 "versioning to treat misalignment.\n");
3495 /* Requires loop versioning with alias checks. */
3496 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
3498 /* FIXME: Make cost depend on complexity of individual check. */
3499 unsigned len
= LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).length ();
3500 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
3502 len
= LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo
).length ();
3504 /* Count LEN - 1 ANDs and LEN comparisons. */
3505 (void) add_stmt_cost (target_cost_data
, len
* 2 - 1, scalar_stmt
,
3506 NULL
, 0, vect_prologue
);
3507 len
= LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
).length ();
3510 /* Count LEN - 1 ANDs and LEN comparisons. */
3511 unsigned int nstmts
= len
* 2 - 1;
3512 /* +1 for each bias that needs adding. */
3513 for (unsigned int i
= 0; i
< len
; ++i
)
3514 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
)[i
].unsigned_p
)
3516 (void) add_stmt_cost (target_cost_data
, nstmts
, scalar_stmt
,
3517 NULL
, 0, vect_prologue
);
3519 if (dump_enabled_p ())
3520 dump_printf (MSG_NOTE
,
3521 "cost model: Adding cost of checks for loop "
3522 "versioning aliasing.\n");
3525 /* Requires loop versioning with niter checks. */
3526 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo
))
3528 /* FIXME: Make cost depend on complexity of individual check. */
3529 (void) add_stmt_cost (target_cost_data
, 1, vector_stmt
, NULL
, 0,
3531 if (dump_enabled_p ())
3532 dump_printf (MSG_NOTE
,
3533 "cost model: Adding cost of checks for loop "
3534 "versioning niters.\n");
3537 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3538 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
, NULL
, 0,
3541 /* Count statements in scalar loop. Using this as scalar cost for a single
3544 TODO: Add outer loop support.
3546 TODO: Consider assigning different costs to different scalar
3549 scalar_single_iter_cost
3550 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo
);
3552 /* Add additional cost for the peeled instructions in prologue and epilogue
3553 loop. (For fully-masked loops there will be no peeling.)
3555 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3556 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3558 TODO: Build an expression that represents peel_iters for prologue and
3559 epilogue to be used in a run-time test. */
3561 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
3563 peel_iters_prologue
= 0;
3564 peel_iters_epilogue
= 0;
3566 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
3568 /* We need to peel exactly one iteration. */
3569 peel_iters_epilogue
+= 1;
3570 stmt_info_for_cost
*si
;
3572 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
3574 (void) add_stmt_cost (target_cost_data
, si
->count
,
3575 si
->kind
, si
->stmt_info
, si
->misalign
,
3581 peel_iters_prologue
= assumed_vf
/ 2;
3582 if (dump_enabled_p ())
3583 dump_printf (MSG_NOTE
, "cost model: "
3584 "prologue peel iters set to vf/2.\n");
3586 /* If peeling for alignment is unknown, loop bound of main loop becomes
3588 peel_iters_epilogue
= assumed_vf
/ 2;
3589 if (dump_enabled_p ())
3590 dump_printf (MSG_NOTE
, "cost model: "
3591 "epilogue peel iters set to vf/2 because "
3592 "peeling for alignment is unknown.\n");
3594 /* If peeled iterations are unknown, count a taken branch and a not taken
3595 branch per peeled loop. Even if scalar loop iterations are known,
3596 vector iterations are not known since peeled prologue iterations are
3597 not known. Hence guards remain the same. */
3598 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
3599 NULL
, 0, vect_prologue
);
3600 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
3601 NULL
, 0, vect_prologue
);
3602 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
3603 NULL
, 0, vect_epilogue
);
3604 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
3605 NULL
, 0, vect_epilogue
);
3606 stmt_info_for_cost
*si
;
3608 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
), j
, si
)
3610 (void) add_stmt_cost (target_cost_data
,
3611 si
->count
* peel_iters_prologue
,
3612 si
->kind
, si
->stmt_info
, si
->misalign
,
3614 (void) add_stmt_cost (target_cost_data
,
3615 si
->count
* peel_iters_epilogue
,
3616 si
->kind
, si
->stmt_info
, si
->misalign
,
3622 stmt_vector_for_cost prologue_cost_vec
, epilogue_cost_vec
;
3623 stmt_info_for_cost
*si
;
3625 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3627 prologue_cost_vec
.create (2);
3628 epilogue_cost_vec
.create (2);
3629 peel_iters_prologue
= npeel
;
3631 (void) vect_get_known_peeling_cost (loop_vinfo
, peel_iters_prologue
,
3632 &peel_iters_epilogue
,
3633 &LOOP_VINFO_SCALAR_ITERATION_COST
3636 &epilogue_cost_vec
);
3638 FOR_EACH_VEC_ELT (prologue_cost_vec
, j
, si
)
3639 (void) add_stmt_cost (data
, si
->count
, si
->kind
, si
->stmt_info
,
3640 si
->misalign
, vect_prologue
);
3642 FOR_EACH_VEC_ELT (epilogue_cost_vec
, j
, si
)
3643 (void) add_stmt_cost (data
, si
->count
, si
->kind
, si
->stmt_info
,
3644 si
->misalign
, vect_epilogue
);
3646 prologue_cost_vec
.release ();
3647 epilogue_cost_vec
.release ();
3650 /* FORNOW: The scalar outside cost is incremented in one of the
3653 1. The vectorizer checks for alignment and aliasing and generates
3654 a condition that allows dynamic vectorization. A cost model
3655 check is ANDED with the versioning condition. Hence scalar code
3656 path now has the added cost of the versioning check.
3658 if (cost > th & versioning_check)
3661 Hence run-time scalar is incremented by not-taken branch cost.
3663 2. The vectorizer then checks if a prologue is required. If the
3664 cost model check was not done before during versioning, it has to
3665 be done before the prologue check.
3668 prologue = scalar_iters
3673 if (prologue == num_iters)
3676 Hence the run-time scalar cost is incremented by a taken branch,
3677 plus a not-taken branch, plus a taken branch cost.
3679 3. The vectorizer then checks if an epilogue is required. If the
3680 cost model check was not done before during prologue check, it
3681 has to be done with the epilogue check.
3687 if (prologue == num_iters)
3690 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3693 Hence the run-time scalar cost should be incremented by 2 taken
3696 TODO: The back end may reorder the BBS's differently and reverse
3697 conditions/branch directions. Change the estimates below to
3698 something more reasonable. */
3700 /* If the number of iterations is known and we do not do versioning, we can
3701 decide whether to vectorize at compile time. Hence the scalar version
3702 do not carry cost model guard costs. */
3703 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
3704 || LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3706 /* Cost model check occurs at versioning. */
3707 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3708 scalar_outside_cost
+= vect_get_stmt_cost (cond_branch_not_taken
);
3711 /* Cost model check occurs at prologue generation. */
3712 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
3713 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
)
3714 + vect_get_stmt_cost (cond_branch_not_taken
);
3715 /* Cost model check occurs at epilogue generation. */
3717 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
);
3721 /* Complete the target-specific cost calculations. */
3722 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
), &vec_prologue_cost
,
3723 &vec_inside_cost
, &vec_epilogue_cost
);
3725 vec_outside_cost
= (int)(vec_prologue_cost
+ vec_epilogue_cost
);
3727 if (dump_enabled_p ())
3729 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
3730 dump_printf (MSG_NOTE
, " Vector inside of loop cost: %d\n",
3732 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n",
3734 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n",
3736 dump_printf (MSG_NOTE
, " Scalar iteration cost: %d\n",
3737 scalar_single_iter_cost
);
3738 dump_printf (MSG_NOTE
, " Scalar outside cost: %d\n",
3739 scalar_outside_cost
);
3740 dump_printf (MSG_NOTE
, " Vector outside cost: %d\n",
3742 dump_printf (MSG_NOTE
, " prologue iterations: %d\n",
3743 peel_iters_prologue
);
3744 dump_printf (MSG_NOTE
, " epilogue iterations: %d\n",
3745 peel_iters_epilogue
);
3748 /* Calculate number of iterations required to make the vector version
3749 profitable, relative to the loop bodies only. The following condition
3751 SIC * niters + SOC > VIC * ((niters - NPEEL) / VF) + VOC
3753 SIC = scalar iteration cost, VIC = vector iteration cost,
3754 VOC = vector outside cost, VF = vectorization factor,
3755 NPEEL = prologue iterations + epilogue iterations,
3756 SOC = scalar outside cost for run time cost model check. */
3758 int saving_per_viter
= (scalar_single_iter_cost
* assumed_vf
3760 if (saving_per_viter
<= 0)
3762 if (LOOP_VINFO_LOOP (loop_vinfo
)->force_vectorize
)
3763 warning_at (vect_location
.get_location_t (), OPT_Wopenmp_simd
,
3764 "vectorization did not happen for a simd loop");
3766 if (dump_enabled_p ())
3767 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3768 "cost model: the vector iteration cost = %d "
3769 "divided by the scalar iteration cost = %d "
3770 "is greater or equal to the vectorization factor = %d"
3772 vec_inside_cost
, scalar_single_iter_cost
, assumed_vf
);
3773 *ret_min_profitable_niters
= -1;
3774 *ret_min_profitable_estimate
= -1;
3778 /* ??? The "if" arm is written to handle all cases; see below for what
3779 we would do for !LOOP_VINFO_FULLY_MASKED_P. */
3780 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
3782 /* Rewriting the condition above in terms of the number of
3783 vector iterations (vniters) rather than the number of
3784 scalar iterations (niters) gives:
3786 SIC * (vniters * VF + NPEEL) + SOC > VIC * vniters + VOC
3788 <==> vniters * (SIC * VF - VIC) > VOC - SIC * NPEEL - SOC
3790 For integer N, X and Y when X > 0:
3792 N * X > Y <==> N >= (Y /[floor] X) + 1. */
3793 int outside_overhead
= (vec_outside_cost
3794 - scalar_single_iter_cost
* peel_iters_prologue
3795 - scalar_single_iter_cost
* peel_iters_epilogue
3796 - scalar_outside_cost
);
3797 /* We're only interested in cases that require at least one
3798 vector iteration. */
3799 int min_vec_niters
= 1;
3800 if (outside_overhead
> 0)
3801 min_vec_niters
= outside_overhead
/ saving_per_viter
+ 1;
3803 if (dump_enabled_p ())
3804 dump_printf (MSG_NOTE
, " Minimum number of vector iterations: %d\n",
3807 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
3809 /* Now that we know the minimum number of vector iterations,
3810 find the minimum niters for which the scalar cost is larger:
3812 SIC * niters > VIC * vniters + VOC - SOC
3814 We know that the minimum niters is no more than
3815 vniters * VF + NPEEL, but it might be (and often is) less
3816 than that if a partial vector iteration is cheaper than the
3817 equivalent scalar code. */
3818 int threshold
= (vec_inside_cost
* min_vec_niters
3820 - scalar_outside_cost
);
3822 min_profitable_iters
= 1;
3824 min_profitable_iters
= threshold
/ scalar_single_iter_cost
+ 1;
3827 /* Convert the number of vector iterations into a number of
3828 scalar iterations. */
3829 min_profitable_iters
= (min_vec_niters
* assumed_vf
3830 + peel_iters_prologue
3831 + peel_iters_epilogue
);
3835 min_profitable_iters
= ((vec_outside_cost
- scalar_outside_cost
)
3837 - vec_inside_cost
* peel_iters_prologue
3838 - vec_inside_cost
* peel_iters_epilogue
);
3839 if (min_profitable_iters
<= 0)
3840 min_profitable_iters
= 0;
3843 min_profitable_iters
/= saving_per_viter
;
3845 if ((scalar_single_iter_cost
* assumed_vf
* min_profitable_iters
)
3846 <= (((int) vec_inside_cost
* min_profitable_iters
)
3847 + (((int) vec_outside_cost
- scalar_outside_cost
)
3849 min_profitable_iters
++;
3853 if (dump_enabled_p ())
3854 dump_printf (MSG_NOTE
,
3855 " Calculated minimum iters for profitability: %d\n",
3856 min_profitable_iters
);
3858 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
3859 && min_profitable_iters
< (assumed_vf
+ peel_iters_prologue
))
3860 /* We want the vectorized loop to execute at least once. */
3861 min_profitable_iters
= assumed_vf
+ peel_iters_prologue
;
3863 if (dump_enabled_p ())
3864 dump_printf_loc (MSG_NOTE
, vect_location
,
3865 " Runtime profitability threshold = %d\n",
3866 min_profitable_iters
);
3868 *ret_min_profitable_niters
= min_profitable_iters
;
3870 /* Calculate number of iterations required to make the vector version
3871 profitable, relative to the loop bodies only.
3873 Non-vectorized variant is SIC * niters and it must win over vector
3874 variant on the expected loop trip count. The following condition must hold true:
3875 SIC * niters > VIC * ((niters - NPEEL) / VF) + VOC + SOC */
3877 if (vec_outside_cost
<= 0)
3878 min_profitable_estimate
= 0;
3879 else if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
3881 /* This is a repeat of the code above, but with + SOC rather
3883 int outside_overhead
= (vec_outside_cost
3884 - scalar_single_iter_cost
* peel_iters_prologue
3885 - scalar_single_iter_cost
* peel_iters_epilogue
3886 + scalar_outside_cost
);
3887 int min_vec_niters
= 1;
3888 if (outside_overhead
> 0)
3889 min_vec_niters
= outside_overhead
/ saving_per_viter
+ 1;
3891 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
3893 int threshold
= (vec_inside_cost
* min_vec_niters
3895 + scalar_outside_cost
);
3896 min_profitable_estimate
= threshold
/ scalar_single_iter_cost
+ 1;
3899 min_profitable_estimate
= (min_vec_niters
* assumed_vf
3900 + peel_iters_prologue
3901 + peel_iters_epilogue
);
3905 min_profitable_estimate
= ((vec_outside_cost
+ scalar_outside_cost
)
3907 - vec_inside_cost
* peel_iters_prologue
3908 - vec_inside_cost
* peel_iters_epilogue
)
3909 / ((scalar_single_iter_cost
* assumed_vf
)
3912 min_profitable_estimate
= MAX (min_profitable_estimate
, min_profitable_iters
);
3913 if (dump_enabled_p ())
3914 dump_printf_loc (MSG_NOTE
, vect_location
,
3915 " Static estimate profitability threshold = %d\n",
3916 min_profitable_estimate
);
3918 *ret_min_profitable_estimate
= min_profitable_estimate
;
3921 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3922 vector elements (not bits) for a vector with NELT elements. */
3924 calc_vec_perm_mask_for_shift (unsigned int offset
, unsigned int nelt
,
3925 vec_perm_builder
*sel
)
3927 /* The encoding is a single stepped pattern. Any wrap-around is handled
3928 by vec_perm_indices. */
3929 sel
->new_vector (nelt
, 1, 3);
3930 for (unsigned int i
= 0; i
< 3; i
++)
3931 sel
->quick_push (i
+ offset
);
3934 /* Checks whether the target supports whole-vector shifts for vectors of mode
3935 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3936 it supports vec_perm_const with masks for all necessary shift amounts. */
3938 have_whole_vector_shift (machine_mode mode
)
3940 if (optab_handler (vec_shr_optab
, mode
) != CODE_FOR_nothing
)
3943 /* Variable-length vectors should be handled via the optab. */
3945 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
3948 vec_perm_builder sel
;
3949 vec_perm_indices indices
;
3950 for (unsigned int i
= nelt
/ 2; i
>= 1; i
/= 2)
3952 calc_vec_perm_mask_for_shift (i
, nelt
, &sel
);
3953 indices
.new_vector (sel
, 2, nelt
);
3954 if (!can_vec_perm_const_p (mode
, indices
, false))
3960 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3961 functions. Design better to avoid maintenance issues. */
3963 /* Function vect_model_reduction_cost.
3965 Models cost for a reduction operation, including the vector ops
3966 generated within the strip-mine loop, the initial definition before
3967 the loop, and the epilogue code that must be generated. */
3970 vect_model_reduction_cost (stmt_vec_info stmt_info
, internal_fn reduc_fn
,
3971 int ncopies
, stmt_vector_for_cost
*cost_vec
)
3973 int prologue_cost
= 0, epilogue_cost
= 0, inside_cost
;
3974 enum tree_code code
;
3978 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3979 struct loop
*loop
= NULL
;
3982 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3984 /* Condition reductions generate two reductions in the loop. */
3985 vect_reduction_type reduction_type
3986 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
3987 if (reduction_type
== COND_REDUCTION
)
3990 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3991 mode
= TYPE_MODE (vectype
);
3992 stmt_vec_info orig_stmt_info
= vect_orig_stmt (stmt_info
);
3994 code
= gimple_assign_rhs_code (orig_stmt_info
->stmt
);
3996 if (reduction_type
== EXTRACT_LAST_REDUCTION
3997 || reduction_type
== FOLD_LEFT_REDUCTION
)
3999 /* No extra instructions needed in the prologue. */
4002 if (reduction_type
== EXTRACT_LAST_REDUCTION
|| reduc_fn
!= IFN_LAST
)
4003 /* Count one reduction-like operation per vector. */
4004 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vec_to_scalar
,
4005 stmt_info
, 0, vect_body
);
4008 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
4009 unsigned int nelements
= ncopies
* vect_nunits_for_cost (vectype
);
4010 inside_cost
= record_stmt_cost (cost_vec
, nelements
,
4011 vec_to_scalar
, stmt_info
, 0,
4013 inside_cost
+= record_stmt_cost (cost_vec
, nelements
,
4014 scalar_stmt
, stmt_info
, 0,
4020 /* Add in cost for initial definition.
4021 For cond reduction we have four vectors: initial index, step,
4022 initial result of the data reduction, initial value of the index
4024 int prologue_stmts
= reduction_type
== COND_REDUCTION
? 4 : 1;
4025 prologue_cost
+= record_stmt_cost (cost_vec
, prologue_stmts
,
4026 scalar_to_vec
, stmt_info
, 0,
4029 /* Cost of reduction op inside loop. */
4030 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vector_stmt
,
4031 stmt_info
, 0, vect_body
);
4034 /* Determine cost of epilogue code.
4036 We have a reduction operator that will reduce the vector in one statement.
4037 Also requires scalar extract. */
4039 if (!loop
|| !nested_in_vect_loop_p (loop
, orig_stmt_info
))
4041 if (reduc_fn
!= IFN_LAST
)
4043 if (reduction_type
== COND_REDUCTION
)
4045 /* An EQ stmt and an COND_EXPR stmt. */
4046 epilogue_cost
+= record_stmt_cost (cost_vec
, 2,
4047 vector_stmt
, stmt_info
, 0,
4049 /* Reduction of the max index and a reduction of the found
4051 epilogue_cost
+= record_stmt_cost (cost_vec
, 2,
4052 vec_to_scalar
, stmt_info
, 0,
4054 /* A broadcast of the max value. */
4055 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
4056 scalar_to_vec
, stmt_info
, 0,
4061 epilogue_cost
+= record_stmt_cost (cost_vec
, 1, vector_stmt
,
4062 stmt_info
, 0, vect_epilogue
);
4063 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
4064 vec_to_scalar
, stmt_info
, 0,
4068 else if (reduction_type
== COND_REDUCTION
)
4070 unsigned estimated_nunits
= vect_nunits_for_cost (vectype
);
4071 /* Extraction of scalar elements. */
4072 epilogue_cost
+= record_stmt_cost (cost_vec
,
4073 2 * estimated_nunits
,
4074 vec_to_scalar
, stmt_info
, 0,
4076 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
4077 epilogue_cost
+= record_stmt_cost (cost_vec
,
4078 2 * estimated_nunits
- 3,
4079 scalar_stmt
, stmt_info
, 0,
4082 else if (reduction_type
== EXTRACT_LAST_REDUCTION
4083 || reduction_type
== FOLD_LEFT_REDUCTION
)
4084 /* No extra instructions need in the epilogue. */
4088 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
4090 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info
->stmt
)));
4091 int element_bitsize
= tree_to_uhwi (bitsize
);
4092 int nelements
= vec_size_in_bits
/ element_bitsize
;
4094 if (code
== COND_EXPR
)
4097 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4099 /* We have a whole vector shift available. */
4100 if (optab
!= unknown_optab
4101 && VECTOR_MODE_P (mode
)
4102 && optab_handler (optab
, mode
) != CODE_FOR_nothing
4103 && have_whole_vector_shift (mode
))
4105 /* Final reduction via vector shifts and the reduction operator.
4106 Also requires scalar extract. */
4107 epilogue_cost
+= record_stmt_cost (cost_vec
,
4108 exact_log2 (nelements
) * 2,
4109 vector_stmt
, stmt_info
, 0,
4111 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
4112 vec_to_scalar
, stmt_info
, 0,
4116 /* Use extracts and reduction op for final reduction. For N
4117 elements, we have N extracts and N-1 reduction ops. */
4118 epilogue_cost
+= record_stmt_cost (cost_vec
,
4119 nelements
+ nelements
- 1,
4120 vector_stmt
, stmt_info
, 0,
4125 if (dump_enabled_p ())
4126 dump_printf (MSG_NOTE
,
4127 "vect_model_reduction_cost: inside_cost = %d, "
4128 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost
,
4129 prologue_cost
, epilogue_cost
);
4133 /* Function vect_model_induction_cost.
4135 Models cost for induction operations. */
4138 vect_model_induction_cost (stmt_vec_info stmt_info
, int ncopies
,
4139 stmt_vector_for_cost
*cost_vec
)
4141 unsigned inside_cost
, prologue_cost
;
4143 if (PURE_SLP_STMT (stmt_info
))
4146 /* loop cost for vec_loop. */
4147 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vector_stmt
,
4148 stmt_info
, 0, vect_body
);
4150 /* prologue cost for vec_init and vec_step. */
4151 prologue_cost
= record_stmt_cost (cost_vec
, 2, scalar_to_vec
,
4152 stmt_info
, 0, vect_prologue
);
4154 if (dump_enabled_p ())
4155 dump_printf_loc (MSG_NOTE
, vect_location
,
4156 "vect_model_induction_cost: inside_cost = %d, "
4157 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
4162 /* Function get_initial_def_for_reduction
4165 STMT_VINFO - a stmt that performs a reduction operation in the loop.
4166 INIT_VAL - the initial value of the reduction variable
4169 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4170 of the reduction (used for adjusting the epilog - see below).
4171 Return a vector variable, initialized according to the operation that
4172 STMT_VINFO performs. This vector will be used as the initial value
4173 of the vector of partial results.
4175 Option1 (adjust in epilog): Initialize the vector as follows:
4176 add/bit or/xor: [0,0,...,0,0]
4177 mult/bit and: [1,1,...,1,1]
4178 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4179 and when necessary (e.g. add/mult case) let the caller know
4180 that it needs to adjust the result by init_val.
4182 Option2: Initialize the vector as follows:
4183 add/bit or/xor: [init_val,0,0,...,0]
4184 mult/bit and: [init_val,1,1,...,1]
4185 min/max/cond_expr: [init_val,init_val,...,init_val]
4186 and no adjustments are needed.
4188 For example, for the following code:
4194 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
4195 For a vector of 4 units, we want to return either [0,0,0,init_val],
4196 or [0,0,0,0] and let the caller know that it needs to adjust
4197 the result at the end by 'init_val'.
4199 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4200 initialization vector is simpler (same element in all entries), if
4201 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4203 A cost model should help decide between these two schemes. */
4206 get_initial_def_for_reduction (stmt_vec_info stmt_vinfo
, tree init_val
,
4207 tree
*adjustment_def
)
4209 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
4210 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4211 tree scalar_type
= TREE_TYPE (init_val
);
4212 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
4213 enum tree_code code
= gimple_assign_rhs_code (stmt_vinfo
->stmt
);
4216 REAL_VALUE_TYPE real_init_val
= dconst0
;
4217 int int_init_val
= 0;
4218 gimple_seq stmts
= NULL
;
4220 gcc_assert (vectype
);
4222 gcc_assert (POINTER_TYPE_P (scalar_type
) || INTEGRAL_TYPE_P (scalar_type
)
4223 || SCALAR_FLOAT_TYPE_P (scalar_type
));
4225 gcc_assert (nested_in_vect_loop_p (loop
, stmt_vinfo
)
4226 || loop
== (gimple_bb (stmt_vinfo
->stmt
))->loop_father
);
4228 vect_reduction_type reduction_type
4229 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo
);
4233 case WIDEN_SUM_EXPR
:
4243 /* ADJUSTMENT_DEF is NULL when called from
4244 vect_create_epilog_for_reduction to vectorize double reduction. */
4246 *adjustment_def
= init_val
;
4248 if (code
== MULT_EXPR
)
4250 real_init_val
= dconst1
;
4254 if (code
== BIT_AND_EXPR
)
4257 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
4258 def_for_init
= build_real (scalar_type
, real_init_val
);
4260 def_for_init
= build_int_cst (scalar_type
, int_init_val
);
4263 /* Option1: the first element is '0' or '1' as well. */
4264 init_def
= gimple_build_vector_from_val (&stmts
, vectype
,
4266 else if (!TYPE_VECTOR_SUBPARTS (vectype
).is_constant ())
4268 /* Option2 (variable length): the first element is INIT_VAL. */
4269 init_def
= gimple_build_vector_from_val (&stmts
, vectype
,
4271 init_def
= gimple_build (&stmts
, CFN_VEC_SHL_INSERT
,
4272 vectype
, init_def
, init_val
);
4276 /* Option2: the first element is INIT_VAL. */
4277 tree_vector_builder
elts (vectype
, 1, 2);
4278 elts
.quick_push (init_val
);
4279 elts
.quick_push (def_for_init
);
4280 init_def
= gimple_build_vector (&stmts
, &elts
);
4291 *adjustment_def
= NULL_TREE
;
4292 if (reduction_type
!= COND_REDUCTION
4293 && reduction_type
!= EXTRACT_LAST_REDUCTION
)
4295 init_def
= vect_get_vec_def_for_operand (init_val
, stmt_vinfo
);
4299 init_val
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_val
);
4300 init_def
= gimple_build_vector_from_val (&stmts
, vectype
, init_val
);
4309 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
4313 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4314 NUMBER_OF_VECTORS is the number of vector defs to create.
4315 If NEUTRAL_OP is nonnull, introducing extra elements of that
4316 value will not change the result. */
4319 get_initial_defs_for_reduction (slp_tree slp_node
,
4320 vec
<tree
> *vec_oprnds
,
4321 unsigned int number_of_vectors
,
4322 bool reduc_chain
, tree neutral_op
)
4324 vec
<stmt_vec_info
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4325 stmt_vec_info stmt_vinfo
= stmts
[0];
4326 unsigned HOST_WIDE_INT nunits
;
4327 unsigned j
, number_of_places_left_in_vector
;
4329 unsigned int group_size
= stmts
.length ();
4333 vector_type
= STMT_VINFO_VECTYPE (stmt_vinfo
);
4335 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
);
4337 loop
= (gimple_bb (stmt_vinfo
->stmt
))->loop_father
;
4339 edge pe
= loop_preheader_edge (loop
);
4341 gcc_assert (!reduc_chain
|| neutral_op
);
4343 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4344 created vectors. It is greater than 1 if unrolling is performed.
4346 For example, we have two scalar operands, s1 and s2 (e.g., group of
4347 strided accesses of size two), while NUNITS is four (i.e., four scalars
4348 of this type can be packed in a vector). The output vector will contain
4349 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4352 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4353 vectors containing the operands.
4355 For example, NUNITS is four as before, and the group size is 8
4356 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4357 {s5, s6, s7, s8}. */
4359 if (!TYPE_VECTOR_SUBPARTS (vector_type
).is_constant (&nunits
))
4360 nunits
= group_size
;
4362 number_of_places_left_in_vector
= nunits
;
4363 bool constant_p
= true;
4364 tree_vector_builder
elts (vector_type
, nunits
, 1);
4365 elts
.quick_grow (nunits
);
4366 gimple_seq ctor_seq
= NULL
;
4367 for (j
= 0; j
< nunits
* number_of_vectors
; ++j
)
4371 stmt_vinfo
= stmts
[i
];
4373 /* Get the def before the loop. In reduction chain we have only
4374 one initial value. Else we have as many as PHIs in the group. */
4376 op
= j
!= 0 ? neutral_op
: PHI_ARG_DEF_FROM_EDGE (stmt_vinfo
->stmt
, pe
);
4377 else if (((vec_oprnds
->length () + 1) * nunits
4378 - number_of_places_left_in_vector
>= group_size
)
4382 op
= PHI_ARG_DEF_FROM_EDGE (stmt_vinfo
->stmt
, pe
);
4384 /* Create 'vect_ = {op0,op1,...,opn}'. */
4385 number_of_places_left_in_vector
--;
4386 elts
[nunits
- number_of_places_left_in_vector
- 1] = op
;
4387 if (!CONSTANT_CLASS_P (op
))
4390 if (number_of_places_left_in_vector
== 0)
4393 if (constant_p
&& !neutral_op
4394 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type
), nunits
)
4395 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type
), nunits
))
4396 /* Build the vector directly from ELTS. */
4397 init
= gimple_build_vector (&ctor_seq
, &elts
);
4398 else if (neutral_op
)
4400 /* Build a vector of the neutral value and shift the
4401 other elements into place. */
4402 init
= gimple_build_vector_from_val (&ctor_seq
, vector_type
,
4405 while (k
> 0 && elts
[k
- 1] == neutral_op
)
4410 init
= gimple_build (&ctor_seq
, CFN_VEC_SHL_INSERT
,
4411 vector_type
, init
, elts
[k
]);
4416 /* First time round, duplicate ELTS to fill the
4417 required number of vectors. */
4418 duplicate_and_interleave (&ctor_seq
, vector_type
, elts
,
4419 number_of_vectors
, *vec_oprnds
);
4422 vec_oprnds
->quick_push (init
);
4424 number_of_places_left_in_vector
= nunits
;
4425 elts
.new_vector (vector_type
, nunits
, 1);
4426 elts
.quick_grow (nunits
);
4430 if (ctor_seq
!= NULL
)
4431 gsi_insert_seq_on_edge_immediate (pe
, ctor_seq
);
4435 /* Function vect_create_epilog_for_reduction
4437 Create code at the loop-epilog to finalize the result of a reduction
4440 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4441 reduction statements.
4442 STMT_INFO is the scalar reduction stmt that is being vectorized.
4443 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4444 number of elements that we can fit in a vectype (nunits). In this case
4445 we have to generate more than one vector stmt - i.e - we need to "unroll"
4446 the vector stmt by a factor VF/nunits. For more details see documentation
4447 in vectorizable_operation.
4448 REDUC_FN is the internal function for the epilog reduction.
4449 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4451 REDUC_INDEX is the index of the operand in the right hand side of the
4452 statement that is defined by REDUCTION_PHI.
4453 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4454 SLP_NODE is an SLP node containing a group of reduction statements. The
4455 first one in this group is STMT_INFO.
4456 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4457 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4458 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4459 any value of the IV in the loop.
4460 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4461 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4462 null if this is not an SLP reduction
4465 1. Creates the reduction def-use cycles: sets the arguments for
4467 The loop-entry argument is the vectorized initial-value of the reduction.
4468 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4470 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4471 by calling the function specified by REDUC_FN if available, or by
4472 other means (whole-vector shifts or a scalar loop).
4473 The function also creates a new phi node at the loop exit to preserve
4474 loop-closed form, as illustrated below.
4476 The flow at the entry to this function:
4479 vec_def = phi <null, null> # REDUCTION_PHI
4480 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4481 s_loop = scalar_stmt # (scalar) STMT_INFO
4483 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4487 The above is transformed by this function into:
4490 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4491 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4492 s_loop = scalar_stmt # (scalar) STMT_INFO
4494 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4495 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4496 v_out2 = reduce <v_out1>
4497 s_out3 = extract_field <v_out2, 0>
4498 s_out4 = adjust_result <s_out3>
4504 vect_create_epilog_for_reduction (vec
<tree
> vect_defs
,
4505 stmt_vec_info stmt_info
,
4506 gimple
*reduc_def_stmt
,
4507 int ncopies
, internal_fn reduc_fn
,
4508 vec
<stmt_vec_info
> reduction_phis
,
4511 slp_instance slp_node_instance
,
4512 tree induc_val
, enum tree_code induc_code
,
4515 stmt_vec_info prev_phi_info
;
4518 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4519 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
), *outer_loop
= NULL
;
4520 basic_block exit_bb
;
4523 gimple
*new_phi
= NULL
, *phi
;
4524 stmt_vec_info phi_info
;
4525 gimple_stmt_iterator exit_gsi
;
4527 tree new_temp
= NULL_TREE
, new_dest
, new_name
, new_scalar_dest
;
4528 gimple
*epilog_stmt
= NULL
;
4529 enum tree_code code
= gimple_assign_rhs_code (stmt_info
->stmt
);
4532 tree adjustment_def
= NULL
;
4533 tree vec_initial_def
= NULL
;
4534 tree expr
, def
, initial_def
= NULL
;
4535 tree orig_name
, scalar_result
;
4536 imm_use_iterator imm_iter
, phi_imm_iter
;
4537 use_operand_p use_p
, phi_use_p
;
4539 stmt_vec_info reduction_phi_info
= NULL
;
4540 bool nested_in_vect_loop
= false;
4541 auto_vec
<gimple
*> new_phis
;
4542 auto_vec
<stmt_vec_info
> inner_phis
;
4544 auto_vec
<tree
> scalar_results
;
4545 unsigned int group_size
= 1, k
, ratio
;
4546 auto_vec
<tree
> vec_initial_defs
;
4547 auto_vec
<gimple
*> phis
;
4548 bool slp_reduc
= false;
4549 bool direct_slp_reduc
;
4550 tree new_phi_result
;
4551 stmt_vec_info inner_phi
= NULL
;
4552 tree induction_index
= NULL_TREE
;
4555 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
4557 if (nested_in_vect_loop_p (loop
, stmt_info
))
4561 nested_in_vect_loop
= true;
4562 gcc_assert (!slp_node
);
4565 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4566 gcc_assert (vectype
);
4567 mode
= TYPE_MODE (vectype
);
4569 /* 1. Create the reduction def-use cycle:
4570 Set the arguments of REDUCTION_PHIS, i.e., transform
4573 vec_def = phi <null, null> # REDUCTION_PHI
4574 VECT_DEF = vector_stmt # vectorized form of STMT
4580 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4581 VECT_DEF = vector_stmt # vectorized form of STMT
4584 (in case of SLP, do it for all the phis). */
4586 /* Get the loop-entry arguments. */
4587 enum vect_def_type initial_def_dt
= vect_unknown_def_type
;
4590 unsigned vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
4591 vec_initial_defs
.reserve (vec_num
);
4592 get_initial_defs_for_reduction (slp_node_instance
->reduc_phis
,
4593 &vec_initial_defs
, vec_num
,
4594 REDUC_GROUP_FIRST_ELEMENT (stmt_info
),
4599 /* Get at the scalar def before the loop, that defines the initial value
4600 of the reduction variable. */
4601 initial_def
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
4602 loop_preheader_edge (loop
));
4603 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4604 and we can't use zero for induc_val, use initial_def. Similarly
4605 for REDUC_MIN and initial_def larger than the base. */
4606 if (TREE_CODE (initial_def
) == INTEGER_CST
4607 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
4608 == INTEGER_INDUC_COND_REDUCTION
)
4609 && !integer_zerop (induc_val
)
4610 && ((induc_code
== MAX_EXPR
4611 && tree_int_cst_lt (initial_def
, induc_val
))
4612 || (induc_code
== MIN_EXPR
4613 && tree_int_cst_lt (induc_val
, initial_def
))))
4614 induc_val
= initial_def
;
4617 /* In case of double reduction we only create a vector variable
4618 to be put in the reduction phi node. The actual statement
4619 creation is done later in this function. */
4620 vec_initial_def
= vect_create_destination_var (initial_def
, vectype
);
4621 else if (nested_in_vect_loop
)
4623 /* Do not use an adjustment def as that case is not supported
4624 correctly if ncopies is not one. */
4625 vect_is_simple_use (initial_def
, loop_vinfo
, &initial_def_dt
);
4626 vec_initial_def
= vect_get_vec_def_for_operand (initial_def
,
4631 = get_initial_def_for_reduction (stmt_info
, initial_def
,
4633 vec_initial_defs
.create (1);
4634 vec_initial_defs
.quick_push (vec_initial_def
);
4637 /* Set phi nodes arguments. */
4638 FOR_EACH_VEC_ELT (reduction_phis
, i
, phi_info
)
4640 tree vec_init_def
= vec_initial_defs
[i
];
4641 tree def
= vect_defs
[i
];
4642 for (j
= 0; j
< ncopies
; j
++)
4646 phi_info
= STMT_VINFO_RELATED_STMT (phi_info
);
4647 if (nested_in_vect_loop
)
4649 = vect_get_vec_def_for_stmt_copy (loop_vinfo
, vec_init_def
);
4652 /* Set the loop-entry arg of the reduction-phi. */
4654 gphi
*phi
= as_a
<gphi
*> (phi_info
->stmt
);
4655 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
4656 == INTEGER_INDUC_COND_REDUCTION
)
4658 /* Initialise the reduction phi to zero. This prevents initial
4659 values of non-zero interferring with the reduction op. */
4660 gcc_assert (ncopies
== 1);
4661 gcc_assert (i
== 0);
4663 tree vec_init_def_type
= TREE_TYPE (vec_init_def
);
4665 = build_vector_from_val (vec_init_def_type
, induc_val
);
4667 add_phi_arg (phi
, induc_val_vec
, loop_preheader_edge (loop
),
4671 add_phi_arg (phi
, vec_init_def
, loop_preheader_edge (loop
),
4674 /* Set the loop-latch arg for the reduction-phi. */
4676 def
= vect_get_vec_def_for_stmt_copy (loop_vinfo
, def
);
4678 add_phi_arg (phi
, def
, loop_latch_edge (loop
), UNKNOWN_LOCATION
);
4680 if (dump_enabled_p ())
4681 dump_printf_loc (MSG_NOTE
, vect_location
,
4682 "transform reduction: created def-use cycle: %G%G",
4683 phi
, SSA_NAME_DEF_STMT (def
));
4687 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4688 which is updated with the current index of the loop for every match of
4689 the original loop's cond_expr (VEC_STMT). This results in a vector
4690 containing the last time the condition passed for that vector lane.
4691 The first match will be a 1 to allow 0 to be used for non-matching
4692 indexes. If there are no matches at all then the vector will be all
4694 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
)
4696 tree indx_before_incr
, indx_after_incr
;
4697 poly_uint64 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype
);
4699 gimple
*vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
)->stmt
;
4700 gcc_assert (gimple_assign_rhs_code (vec_stmt
) == VEC_COND_EXPR
);
4702 int scalar_precision
4703 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype
)));
4704 tree cr_index_scalar_type
= make_unsigned_type (scalar_precision
);
4705 tree cr_index_vector_type
= build_vector_type
4706 (cr_index_scalar_type
, TYPE_VECTOR_SUBPARTS (vectype
));
4708 /* First we create a simple vector induction variable which starts
4709 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4710 vector size (STEP). */
4712 /* Create a {1,2,3,...} vector. */
4713 tree series_vect
= build_index_vector (cr_index_vector_type
, 1, 1);
4715 /* Create a vector of the step value. */
4716 tree step
= build_int_cst (cr_index_scalar_type
, nunits_out
);
4717 tree vec_step
= build_vector_from_val (cr_index_vector_type
, step
);
4719 /* Create an induction variable. */
4720 gimple_stmt_iterator incr_gsi
;
4722 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
4723 create_iv (series_vect
, vec_step
, NULL_TREE
, loop
, &incr_gsi
,
4724 insert_after
, &indx_before_incr
, &indx_after_incr
);
4726 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4727 filled with zeros (VEC_ZERO). */
4729 /* Create a vector of 0s. */
4730 tree zero
= build_zero_cst (cr_index_scalar_type
);
4731 tree vec_zero
= build_vector_from_val (cr_index_vector_type
, zero
);
4733 /* Create a vector phi node. */
4734 tree new_phi_tree
= make_ssa_name (cr_index_vector_type
);
4735 new_phi
= create_phi_node (new_phi_tree
, loop
->header
);
4736 loop_vinfo
->add_stmt (new_phi
);
4737 add_phi_arg (as_a
<gphi
*> (new_phi
), vec_zero
,
4738 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4740 /* Now take the condition from the loops original cond_expr
4741 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4742 every match uses values from the induction variable
4743 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4745 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4746 the new cond_expr (INDEX_COND_EXPR). */
4748 /* Duplicate the condition from vec_stmt. */
4749 tree ccompare
= unshare_expr (gimple_assign_rhs1 (vec_stmt
));
4751 /* Create a conditional, where the condition is taken from vec_stmt
4752 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4753 else is the phi (NEW_PHI_TREE). */
4754 tree index_cond_expr
= build3 (VEC_COND_EXPR
, cr_index_vector_type
,
4755 ccompare
, indx_before_incr
,
4757 induction_index
= make_ssa_name (cr_index_vector_type
);
4758 gimple
*index_condition
= gimple_build_assign (induction_index
,
4760 gsi_insert_before (&incr_gsi
, index_condition
, GSI_SAME_STMT
);
4761 stmt_vec_info index_vec_info
= loop_vinfo
->add_stmt (index_condition
);
4762 STMT_VINFO_VECTYPE (index_vec_info
) = cr_index_vector_type
;
4764 /* Update the phi with the vec cond. */
4765 add_phi_arg (as_a
<gphi
*> (new_phi
), induction_index
,
4766 loop_latch_edge (loop
), UNKNOWN_LOCATION
);
4769 /* 2. Create epilog code.
4770 The reduction epilog code operates across the elements of the vector
4771 of partial results computed by the vectorized loop.
4772 The reduction epilog code consists of:
4774 step 1: compute the scalar result in a vector (v_out2)
4775 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4776 step 3: adjust the scalar result (s_out3) if needed.
4778 Step 1 can be accomplished using one the following three schemes:
4779 (scheme 1) using reduc_fn, if available.
4780 (scheme 2) using whole-vector shifts, if available.
4781 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4784 The overall epilog code looks like this:
4786 s_out0 = phi <s_loop> # original EXIT_PHI
4787 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4788 v_out2 = reduce <v_out1> # step 1
4789 s_out3 = extract_field <v_out2, 0> # step 2
4790 s_out4 = adjust_result <s_out3> # step 3
4792 (step 3 is optional, and steps 1 and 2 may be combined).
4793 Lastly, the uses of s_out0 are replaced by s_out4. */
4796 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4797 v_out1 = phi <VECT_DEF>
4798 Store them in NEW_PHIS. */
4800 exit_bb
= single_exit (loop
)->dest
;
4801 prev_phi_info
= NULL
;
4802 new_phis
.create (vect_defs
.length ());
4803 FOR_EACH_VEC_ELT (vect_defs
, i
, def
)
4805 for (j
= 0; j
< ncopies
; j
++)
4807 tree new_def
= copy_ssa_name (def
);
4808 phi
= create_phi_node (new_def
, exit_bb
);
4809 stmt_vec_info phi_info
= loop_vinfo
->add_stmt (phi
);
4811 new_phis
.quick_push (phi
);
4814 def
= vect_get_vec_def_for_stmt_copy (loop_vinfo
, def
);
4815 STMT_VINFO_RELATED_STMT (prev_phi_info
) = phi_info
;
4818 SET_PHI_ARG_DEF (phi
, single_exit (loop
)->dest_idx
, def
);
4819 prev_phi_info
= phi_info
;
4823 /* The epilogue is created for the outer-loop, i.e., for the loop being
4824 vectorized. Create exit phis for the outer loop. */
4828 exit_bb
= single_exit (loop
)->dest
;
4829 inner_phis
.create (vect_defs
.length ());
4830 FOR_EACH_VEC_ELT (new_phis
, i
, phi
)
4832 stmt_vec_info phi_info
= loop_vinfo
->lookup_stmt (phi
);
4833 tree new_result
= copy_ssa_name (PHI_RESULT (phi
));
4834 gphi
*outer_phi
= create_phi_node (new_result
, exit_bb
);
4835 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4837 prev_phi_info
= loop_vinfo
->add_stmt (outer_phi
);
4838 inner_phis
.quick_push (phi_info
);
4839 new_phis
[i
] = outer_phi
;
4840 while (STMT_VINFO_RELATED_STMT (phi_info
))
4842 phi_info
= STMT_VINFO_RELATED_STMT (phi_info
);
4843 new_result
= copy_ssa_name (PHI_RESULT (phi_info
->stmt
));
4844 outer_phi
= create_phi_node (new_result
, exit_bb
);
4845 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4846 PHI_RESULT (phi_info
->stmt
));
4847 stmt_vec_info outer_phi_info
= loop_vinfo
->add_stmt (outer_phi
);
4848 STMT_VINFO_RELATED_STMT (prev_phi_info
) = outer_phi_info
;
4849 prev_phi_info
= outer_phi_info
;
4854 exit_gsi
= gsi_after_labels (exit_bb
);
4856 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4857 (i.e. when reduc_fn is not available) and in the final adjustment
4858 code (if needed). Also get the original scalar reduction variable as
4859 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4860 represents a reduction pattern), the tree-code and scalar-def are
4861 taken from the original stmt that the pattern-stmt (STMT) replaces.
4862 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4863 are taken from STMT. */
4865 stmt_vec_info orig_stmt_info
= vect_orig_stmt (stmt_info
);
4866 if (orig_stmt_info
!= stmt_info
)
4868 /* Reduction pattern */
4869 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
4870 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info
) == stmt_info
);
4873 code
= gimple_assign_rhs_code (orig_stmt_info
->stmt
);
4874 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4875 partial results are added and not subtracted. */
4876 if (code
== MINUS_EXPR
)
4879 scalar_dest
= gimple_assign_lhs (orig_stmt_info
->stmt
);
4880 scalar_type
= TREE_TYPE (scalar_dest
);
4881 scalar_results
.create (group_size
);
4882 new_scalar_dest
= vect_create_destination_var (scalar_dest
, NULL
);
4883 bitsize
= TYPE_SIZE (scalar_type
);
4885 /* In case this is a reduction in an inner-loop while vectorizing an outer
4886 loop - we don't need to extract a single scalar result at the end of the
4887 inner-loop (unless it is double reduction, i.e., the use of reduction is
4888 outside the outer-loop). The final vector of partial results will be used
4889 in the vectorized outer-loop, or reduced to a scalar result at the end of
4891 if (nested_in_vect_loop
&& !double_reduc
)
4892 goto vect_finalize_reduction
;
4894 /* SLP reduction without reduction chain, e.g.,
4898 b2 = operation (b1) */
4899 slp_reduc
= (slp_node
&& !REDUC_GROUP_FIRST_ELEMENT (stmt_info
));
4901 /* True if we should implement SLP_REDUC using native reduction operations
4902 instead of scalar operations. */
4903 direct_slp_reduc
= (reduc_fn
!= IFN_LAST
4905 && !TYPE_VECTOR_SUBPARTS (vectype
).is_constant ());
4907 /* In case of reduction chain, e.g.,
4910 a3 = operation (a2),
4912 we may end up with more than one vector result. Here we reduce them to
4914 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info
) || direct_slp_reduc
)
4916 tree first_vect
= PHI_RESULT (new_phis
[0]);
4917 gassign
*new_vec_stmt
= NULL
;
4918 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4919 for (k
= 1; k
< new_phis
.length (); k
++)
4921 gimple
*next_phi
= new_phis
[k
];
4922 tree second_vect
= PHI_RESULT (next_phi
);
4923 tree tem
= make_ssa_name (vec_dest
, new_vec_stmt
);
4924 new_vec_stmt
= gimple_build_assign (tem
, code
,
4925 first_vect
, second_vect
);
4926 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4930 new_phi_result
= first_vect
;
4933 new_phis
.truncate (0);
4934 new_phis
.safe_push (new_vec_stmt
);
4937 /* Likewise if we couldn't use a single defuse cycle. */
4938 else if (ncopies
> 1)
4940 gcc_assert (new_phis
.length () == 1);
4941 tree first_vect
= PHI_RESULT (new_phis
[0]);
4942 gassign
*new_vec_stmt
= NULL
;
4943 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4944 stmt_vec_info next_phi_info
= loop_vinfo
->lookup_stmt (new_phis
[0]);
4945 for (int k
= 1; k
< ncopies
; ++k
)
4947 next_phi_info
= STMT_VINFO_RELATED_STMT (next_phi_info
);
4948 tree second_vect
= PHI_RESULT (next_phi_info
->stmt
);
4949 tree tem
= make_ssa_name (vec_dest
, new_vec_stmt
);
4950 new_vec_stmt
= gimple_build_assign (tem
, code
,
4951 first_vect
, second_vect
);
4952 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4955 new_phi_result
= first_vect
;
4956 new_phis
.truncate (0);
4957 new_phis
.safe_push (new_vec_stmt
);
4960 new_phi_result
= PHI_RESULT (new_phis
[0]);
4962 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
4963 && reduc_fn
!= IFN_LAST
)
4965 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4966 various data values where the condition matched and another vector
4967 (INDUCTION_INDEX) containing all the indexes of those matches. We
4968 need to extract the last matching index (which will be the index with
4969 highest value) and use this to index into the data vector.
4970 For the case where there were no matches, the data vector will contain
4971 all default values and the index vector will be all zeros. */
4973 /* Get various versions of the type of the vector of indexes. */
4974 tree index_vec_type
= TREE_TYPE (induction_index
);
4975 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type
));
4976 tree index_scalar_type
= TREE_TYPE (index_vec_type
);
4977 tree index_vec_cmp_type
= build_same_sized_truth_vector_type
4980 /* Get an unsigned integer version of the type of the data vector. */
4981 int scalar_precision
4982 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type
));
4983 tree scalar_type_unsigned
= make_unsigned_type (scalar_precision
);
4984 tree vectype_unsigned
= build_vector_type
4985 (scalar_type_unsigned
, TYPE_VECTOR_SUBPARTS (vectype
));
4987 /* First we need to create a vector (ZERO_VEC) of zeros and another
4988 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4989 can create using a MAX reduction and then expanding.
4990 In the case where the loop never made any matches, the max index will
4993 /* Vector of {0, 0, 0,...}. */
4994 tree zero_vec
= make_ssa_name (vectype
);
4995 tree zero_vec_rhs
= build_zero_cst (vectype
);
4996 gimple
*zero_vec_stmt
= gimple_build_assign (zero_vec
, zero_vec_rhs
);
4997 gsi_insert_before (&exit_gsi
, zero_vec_stmt
, GSI_SAME_STMT
);
4999 /* Find maximum value from the vector of found indexes. */
5000 tree max_index
= make_ssa_name (index_scalar_type
);
5001 gcall
*max_index_stmt
= gimple_build_call_internal (IFN_REDUC_MAX
,
5002 1, induction_index
);
5003 gimple_call_set_lhs (max_index_stmt
, max_index
);
5004 gsi_insert_before (&exit_gsi
, max_index_stmt
, GSI_SAME_STMT
);
5006 /* Vector of {max_index, max_index, max_index,...}. */
5007 tree max_index_vec
= make_ssa_name (index_vec_type
);
5008 tree max_index_vec_rhs
= build_vector_from_val (index_vec_type
,
5010 gimple
*max_index_vec_stmt
= gimple_build_assign (max_index_vec
,
5012 gsi_insert_before (&exit_gsi
, max_index_vec_stmt
, GSI_SAME_STMT
);
5014 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
5015 with the vector (INDUCTION_INDEX) of found indexes, choosing values
5016 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
5017 otherwise. Only one value should match, resulting in a vector
5018 (VEC_COND) with one data value and the rest zeros.
5019 In the case where the loop never made any matches, every index will
5020 match, resulting in a vector with all data values (which will all be
5021 the default value). */
5023 /* Compare the max index vector to the vector of found indexes to find
5024 the position of the max value. */
5025 tree vec_compare
= make_ssa_name (index_vec_cmp_type
);
5026 gimple
*vec_compare_stmt
= gimple_build_assign (vec_compare
, EQ_EXPR
,
5029 gsi_insert_before (&exit_gsi
, vec_compare_stmt
, GSI_SAME_STMT
);
5031 /* Use the compare to choose either values from the data vector or
5033 tree vec_cond
= make_ssa_name (vectype
);
5034 gimple
*vec_cond_stmt
= gimple_build_assign (vec_cond
, VEC_COND_EXPR
,
5035 vec_compare
, new_phi_result
,
5037 gsi_insert_before (&exit_gsi
, vec_cond_stmt
, GSI_SAME_STMT
);
5039 /* Finally we need to extract the data value from the vector (VEC_COND)
5040 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
5041 reduction, but because this doesn't exist, we can use a MAX reduction
5042 instead. The data value might be signed or a float so we need to cast
5044 In the case where the loop never made any matches, the data values are
5045 all identical, and so will reduce down correctly. */
5047 /* Make the matched data values unsigned. */
5048 tree vec_cond_cast
= make_ssa_name (vectype_unsigned
);
5049 tree vec_cond_cast_rhs
= build1 (VIEW_CONVERT_EXPR
, vectype_unsigned
,
5051 gimple
*vec_cond_cast_stmt
= gimple_build_assign (vec_cond_cast
,
5054 gsi_insert_before (&exit_gsi
, vec_cond_cast_stmt
, GSI_SAME_STMT
);
5056 /* Reduce down to a scalar value. */
5057 tree data_reduc
= make_ssa_name (scalar_type_unsigned
);
5058 gcall
*data_reduc_stmt
= gimple_build_call_internal (IFN_REDUC_MAX
,
5060 gimple_call_set_lhs (data_reduc_stmt
, data_reduc
);
5061 gsi_insert_before (&exit_gsi
, data_reduc_stmt
, GSI_SAME_STMT
);
5063 /* Convert the reduced value back to the result type and set as the
5065 gimple_seq stmts
= NULL
;
5066 new_temp
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, scalar_type
,
5068 gsi_insert_seq_before (&exit_gsi
, stmts
, GSI_SAME_STMT
);
5069 scalar_results
.safe_push (new_temp
);
5071 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
5072 && reduc_fn
== IFN_LAST
)
5074 /* Condition reduction without supported IFN_REDUC_MAX. Generate
5076 idx_val = induction_index[0];
5077 val = data_reduc[0];
5078 for (idx = 0, val = init, i = 0; i < nelts; ++i)
5079 if (induction_index[i] > idx_val)
5080 val = data_reduc[i], idx_val = induction_index[i];
5083 tree data_eltype
= TREE_TYPE (TREE_TYPE (new_phi_result
));
5084 tree idx_eltype
= TREE_TYPE (TREE_TYPE (induction_index
));
5085 unsigned HOST_WIDE_INT el_size
= tree_to_uhwi (TYPE_SIZE (idx_eltype
));
5086 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index
));
5087 /* Enforced by vectorizable_reduction, which ensures we have target
5088 support before allowing a conditional reduction on variable-length
5090 unsigned HOST_WIDE_INT v_size
= el_size
* nunits
.to_constant ();
5091 tree idx_val
= NULL_TREE
, val
= NULL_TREE
;
5092 for (unsigned HOST_WIDE_INT off
= 0; off
< v_size
; off
+= el_size
)
5094 tree old_idx_val
= idx_val
;
5096 idx_val
= make_ssa_name (idx_eltype
);
5097 epilog_stmt
= gimple_build_assign (idx_val
, BIT_FIELD_REF
,
5098 build3 (BIT_FIELD_REF
, idx_eltype
,
5100 bitsize_int (el_size
),
5101 bitsize_int (off
)));
5102 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5103 val
= make_ssa_name (data_eltype
);
5104 epilog_stmt
= gimple_build_assign (val
, BIT_FIELD_REF
,
5105 build3 (BIT_FIELD_REF
,
5108 bitsize_int (el_size
),
5109 bitsize_int (off
)));
5110 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5113 tree new_idx_val
= idx_val
;
5114 if (off
!= v_size
- el_size
)
5116 new_idx_val
= make_ssa_name (idx_eltype
);
5117 epilog_stmt
= gimple_build_assign (new_idx_val
,
5120 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5122 tree new_val
= make_ssa_name (data_eltype
);
5123 epilog_stmt
= gimple_build_assign (new_val
,
5130 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5131 idx_val
= new_idx_val
;
5135 /* Convert the reduced value back to the result type and set as the
5137 gimple_seq stmts
= NULL
;
5138 val
= gimple_convert (&stmts
, scalar_type
, val
);
5139 gsi_insert_seq_before (&exit_gsi
, stmts
, GSI_SAME_STMT
);
5140 scalar_results
.safe_push (val
);
5143 /* 2.3 Create the reduction code, using one of the three schemes described
5144 above. In SLP we simply need to extract all the elements from the
5145 vector (without reducing them), so we use scalar shifts. */
5146 else if (reduc_fn
!= IFN_LAST
&& !slp_reduc
)
5152 v_out2 = reduc_expr <v_out1> */
5154 if (dump_enabled_p ())
5155 dump_printf_loc (MSG_NOTE
, vect_location
,
5156 "Reduce using direct vector reduction.\n");
5158 vec_elem_type
= TREE_TYPE (TREE_TYPE (new_phi_result
));
5159 if (!useless_type_conversion_p (scalar_type
, vec_elem_type
))
5162 = vect_create_destination_var (scalar_dest
, vec_elem_type
);
5163 epilog_stmt
= gimple_build_call_internal (reduc_fn
, 1,
5165 gimple_set_lhs (epilog_stmt
, tmp_dest
);
5166 new_temp
= make_ssa_name (tmp_dest
, epilog_stmt
);
5167 gimple_set_lhs (epilog_stmt
, new_temp
);
5168 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5170 epilog_stmt
= gimple_build_assign (new_scalar_dest
, NOP_EXPR
,
5175 epilog_stmt
= gimple_build_call_internal (reduc_fn
, 1,
5177 gimple_set_lhs (epilog_stmt
, new_scalar_dest
);
5180 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5181 gimple_set_lhs (epilog_stmt
, new_temp
);
5182 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5184 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5185 == INTEGER_INDUC_COND_REDUCTION
)
5186 && !operand_equal_p (initial_def
, induc_val
, 0))
5188 /* Earlier we set the initial value to be a vector if induc_val
5189 values. Check the result and if it is induc_val then replace
5190 with the original initial value, unless induc_val is
5191 the same as initial_def already. */
5192 tree zcompare
= build2 (EQ_EXPR
, boolean_type_node
, new_temp
,
5195 tmp
= make_ssa_name (new_scalar_dest
);
5196 epilog_stmt
= gimple_build_assign (tmp
, COND_EXPR
, zcompare
,
5197 initial_def
, new_temp
);
5198 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5202 scalar_results
.safe_push (new_temp
);
5204 else if (direct_slp_reduc
)
5206 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5207 with the elements for other SLP statements replaced with the
5208 neutral value. We can then do a normal reduction on each vector. */
5210 /* Enforced by vectorizable_reduction. */
5211 gcc_assert (new_phis
.length () == 1);
5212 gcc_assert (pow2p_hwi (group_size
));
5214 slp_tree orig_phis_slp_node
= slp_node_instance
->reduc_phis
;
5215 vec
<stmt_vec_info
> orig_phis
5216 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node
);
5217 gimple_seq seq
= NULL
;
5219 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5220 and the same element size as VECTYPE. */
5221 tree index
= build_index_vector (vectype
, 0, 1);
5222 tree index_type
= TREE_TYPE (index
);
5223 tree index_elt_type
= TREE_TYPE (index_type
);
5224 tree mask_type
= build_same_sized_truth_vector_type (index_type
);
5226 /* Create a vector that, for each element, identifies which of
5227 the REDUC_GROUP_SIZE results should use it. */
5228 tree index_mask
= build_int_cst (index_elt_type
, group_size
- 1);
5229 index
= gimple_build (&seq
, BIT_AND_EXPR
, index_type
, index
,
5230 build_vector_from_val (index_type
, index_mask
));
5232 /* Get a neutral vector value. This is simply a splat of the neutral
5233 scalar value if we have one, otherwise the initial scalar value
5234 is itself a neutral value. */
5235 tree vector_identity
= NULL_TREE
;
5237 vector_identity
= gimple_build_vector_from_val (&seq
, vectype
,
5239 for (unsigned int i
= 0; i
< group_size
; ++i
)
5241 /* If there's no univeral neutral value, we can use the
5242 initial scalar value from the original PHI. This is used
5243 for MIN and MAX reduction, for example. */
5247 = PHI_ARG_DEF_FROM_EDGE (orig_phis
[i
]->stmt
,
5248 loop_preheader_edge (loop
));
5249 vector_identity
= gimple_build_vector_from_val (&seq
, vectype
,
5253 /* Calculate the equivalent of:
5255 sel[j] = (index[j] == i);
5257 which selects the elements of NEW_PHI_RESULT that should
5258 be included in the result. */
5259 tree compare_val
= build_int_cst (index_elt_type
, i
);
5260 compare_val
= build_vector_from_val (index_type
, compare_val
);
5261 tree sel
= gimple_build (&seq
, EQ_EXPR
, mask_type
,
5262 index
, compare_val
);
5264 /* Calculate the equivalent of:
5266 vec = seq ? new_phi_result : vector_identity;
5268 VEC is now suitable for a full vector reduction. */
5269 tree vec
= gimple_build (&seq
, VEC_COND_EXPR
, vectype
,
5270 sel
, new_phi_result
, vector_identity
);
5272 /* Do the reduction and convert it to the appropriate type. */
5273 tree scalar
= gimple_build (&seq
, as_combined_fn (reduc_fn
),
5274 TREE_TYPE (vectype
), vec
);
5275 scalar
= gimple_convert (&seq
, scalar_type
, scalar
);
5276 scalar_results
.safe_push (scalar
);
5278 gsi_insert_seq_before (&exit_gsi
, seq
, GSI_SAME_STMT
);
5282 bool reduce_with_shift
;
5285 /* COND reductions all do the final reduction with MAX_EXPR
5287 if (code
== COND_EXPR
)
5289 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5290 == INTEGER_INDUC_COND_REDUCTION
)
5292 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5293 == CONST_COND_REDUCTION
)
5294 code
= STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
);
5299 /* See if the target wants to do the final (shift) reduction
5300 in a vector mode of smaller size and first reduce upper/lower
5301 halves against each other. */
5302 enum machine_mode mode1
= mode
;
5303 unsigned sz
= tree_to_uhwi (TYPE_SIZE_UNIT (vectype
));
5306 && (mode1
= targetm
.vectorize
.split_reduction (mode
)) != mode
)
5307 sz1
= GET_MODE_SIZE (mode1
).to_constant ();
5309 tree vectype1
= get_vectype_for_scalar_type_and_size (scalar_type
, sz1
);
5310 reduce_with_shift
= have_whole_vector_shift (mode1
);
5311 if (!VECTOR_MODE_P (mode1
))
5312 reduce_with_shift
= false;
5315 optab optab
= optab_for_tree_code (code
, vectype1
, optab_default
);
5316 if (optab_handler (optab
, mode1
) == CODE_FOR_nothing
)
5317 reduce_with_shift
= false;
5320 /* First reduce the vector to the desired vector size we should
5321 do shift reduction on by combining upper and lower halves. */
5322 new_temp
= new_phi_result
;
5325 gcc_assert (!slp_reduc
);
5327 vectype1
= get_vectype_for_scalar_type_and_size (scalar_type
, sz
);
5329 /* The target has to make sure we support lowpart/highpart
5330 extraction, either via direct vector extract or through
5331 an integer mode punning. */
5333 if (convert_optab_handler (vec_extract_optab
,
5334 TYPE_MODE (TREE_TYPE (new_temp
)),
5335 TYPE_MODE (vectype1
))
5336 != CODE_FOR_nothing
)
5338 /* Extract sub-vectors directly once vec_extract becomes
5339 a conversion optab. */
5340 dst1
= make_ssa_name (vectype1
);
5342 = gimple_build_assign (dst1
, BIT_FIELD_REF
,
5343 build3 (BIT_FIELD_REF
, vectype1
,
5344 new_temp
, TYPE_SIZE (vectype1
),
5346 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5347 dst2
= make_ssa_name (vectype1
);
5349 = gimple_build_assign (dst2
, BIT_FIELD_REF
,
5350 build3 (BIT_FIELD_REF
, vectype1
,
5351 new_temp
, TYPE_SIZE (vectype1
),
5352 bitsize_int (sz
* BITS_PER_UNIT
)));
5353 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5357 /* Extract via punning to appropriately sized integer mode
5359 tree eltype
= build_nonstandard_integer_type (sz
* BITS_PER_UNIT
,
5361 tree etype
= build_vector_type (eltype
, 2);
5362 gcc_assert (convert_optab_handler (vec_extract_optab
,
5365 != CODE_FOR_nothing
);
5366 tree tem
= make_ssa_name (etype
);
5367 epilog_stmt
= gimple_build_assign (tem
, VIEW_CONVERT_EXPR
,
5368 build1 (VIEW_CONVERT_EXPR
,
5370 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5372 tem
= make_ssa_name (eltype
);
5374 = gimple_build_assign (tem
, BIT_FIELD_REF
,
5375 build3 (BIT_FIELD_REF
, eltype
,
5376 new_temp
, TYPE_SIZE (eltype
),
5378 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5379 dst1
= make_ssa_name (vectype1
);
5380 epilog_stmt
= gimple_build_assign (dst1
, VIEW_CONVERT_EXPR
,
5381 build1 (VIEW_CONVERT_EXPR
,
5383 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5384 tem
= make_ssa_name (eltype
);
5386 = gimple_build_assign (tem
, BIT_FIELD_REF
,
5387 build3 (BIT_FIELD_REF
, eltype
,
5388 new_temp
, TYPE_SIZE (eltype
),
5389 bitsize_int (sz
* BITS_PER_UNIT
)));
5390 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5391 dst2
= make_ssa_name (vectype1
);
5392 epilog_stmt
= gimple_build_assign (dst2
, VIEW_CONVERT_EXPR
,
5393 build1 (VIEW_CONVERT_EXPR
,
5395 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5398 new_temp
= make_ssa_name (vectype1
);
5399 epilog_stmt
= gimple_build_assign (new_temp
, code
, dst1
, dst2
);
5400 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5403 if (reduce_with_shift
&& !slp_reduc
)
5405 int element_bitsize
= tree_to_uhwi (bitsize
);
5406 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5407 for variable-length vectors and also requires direct target support
5408 for loop reductions. */
5409 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype1
));
5410 int nelements
= vec_size_in_bits
/ element_bitsize
;
5411 vec_perm_builder sel
;
5412 vec_perm_indices indices
;
5416 tree zero_vec
= build_zero_cst (vectype1
);
5418 for (offset = nelements/2; offset >= 1; offset/=2)
5420 Create: va' = vec_shift <va, offset>
5421 Create: va = vop <va, va'>
5426 if (dump_enabled_p ())
5427 dump_printf_loc (MSG_NOTE
, vect_location
,
5428 "Reduce using vector shifts\n");
5430 vec_dest
= vect_create_destination_var (scalar_dest
, vectype1
);
5431 for (elt_offset
= nelements
/ 2;
5435 calc_vec_perm_mask_for_shift (elt_offset
, nelements
, &sel
);
5436 indices
.new_vector (sel
, 2, nelements
);
5437 tree mask
= vect_gen_perm_mask_any (vectype1
, indices
);
5438 epilog_stmt
= gimple_build_assign (vec_dest
, VEC_PERM_EXPR
,
5439 new_temp
, zero_vec
, mask
);
5440 new_name
= make_ssa_name (vec_dest
, epilog_stmt
);
5441 gimple_assign_set_lhs (epilog_stmt
, new_name
);
5442 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5444 epilog_stmt
= gimple_build_assign (vec_dest
, code
, new_name
,
5446 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
5447 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5448 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5451 /* 2.4 Extract the final scalar result. Create:
5452 s_out3 = extract_field <v_out2, bitpos> */
5454 if (dump_enabled_p ())
5455 dump_printf_loc (MSG_NOTE
, vect_location
,
5456 "extract scalar result\n");
5458 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
,
5459 bitsize
, bitsize_zero_node
);
5460 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5461 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5462 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5463 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5464 scalar_results
.safe_push (new_temp
);
5469 s = extract_field <v_out2, 0>
5470 for (offset = element_size;
5471 offset < vector_size;
5472 offset += element_size;)
5474 Create: s' = extract_field <v_out2, offset>
5475 Create: s = op <s, s'> // For non SLP cases
5478 if (dump_enabled_p ())
5479 dump_printf_loc (MSG_NOTE
, vect_location
,
5480 "Reduce using scalar code.\n");
5482 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype1
));
5483 int element_bitsize
= tree_to_uhwi (bitsize
);
5484 FOR_EACH_VEC_ELT (new_phis
, i
, new_phi
)
5487 if (gimple_code (new_phi
) == GIMPLE_PHI
)
5488 vec_temp
= PHI_RESULT (new_phi
);
5490 vec_temp
= gimple_assign_lhs (new_phi
);
5491 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
, bitsize
,
5493 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5494 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5495 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5496 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5498 /* In SLP we don't need to apply reduction operation, so we just
5499 collect s' values in SCALAR_RESULTS. */
5501 scalar_results
.safe_push (new_temp
);
5503 for (bit_offset
= element_bitsize
;
5504 bit_offset
< vec_size_in_bits
;
5505 bit_offset
+= element_bitsize
)
5507 tree bitpos
= bitsize_int (bit_offset
);
5508 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
,
5511 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5512 new_name
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5513 gimple_assign_set_lhs (epilog_stmt
, new_name
);
5514 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5518 /* In SLP we don't need to apply reduction operation, so
5519 we just collect s' values in SCALAR_RESULTS. */
5520 new_temp
= new_name
;
5521 scalar_results
.safe_push (new_name
);
5525 epilog_stmt
= gimple_build_assign (new_scalar_dest
, code
,
5526 new_name
, new_temp
);
5527 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5528 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5529 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5534 /* The only case where we need to reduce scalar results in SLP, is
5535 unrolling. If the size of SCALAR_RESULTS is greater than
5536 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5537 REDUC_GROUP_SIZE. */
5540 tree res
, first_res
, new_res
;
5543 /* Reduce multiple scalar results in case of SLP unrolling. */
5544 for (j
= group_size
; scalar_results
.iterate (j
, &res
);
5547 first_res
= scalar_results
[j
% group_size
];
5548 new_stmt
= gimple_build_assign (new_scalar_dest
, code
,
5550 new_res
= make_ssa_name (new_scalar_dest
, new_stmt
);
5551 gimple_assign_set_lhs (new_stmt
, new_res
);
5552 gsi_insert_before (&exit_gsi
, new_stmt
, GSI_SAME_STMT
);
5553 scalar_results
[j
% group_size
] = new_res
;
5557 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5558 scalar_results
.safe_push (new_temp
);
5561 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5562 == INTEGER_INDUC_COND_REDUCTION
)
5563 && !operand_equal_p (initial_def
, induc_val
, 0))
5565 /* Earlier we set the initial value to be a vector if induc_val
5566 values. Check the result and if it is induc_val then replace
5567 with the original initial value, unless induc_val is
5568 the same as initial_def already. */
5569 tree zcompare
= build2 (EQ_EXPR
, boolean_type_node
, new_temp
,
5572 tree tmp
= make_ssa_name (new_scalar_dest
);
5573 epilog_stmt
= gimple_build_assign (tmp
, COND_EXPR
, zcompare
,
5574 initial_def
, new_temp
);
5575 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5576 scalar_results
[0] = tmp
;
5580 vect_finalize_reduction
:
5585 /* 2.5 Adjust the final result by the initial value of the reduction
5586 variable. (When such adjustment is not needed, then
5587 'adjustment_def' is zero). For example, if code is PLUS we create:
5588 new_temp = loop_exit_def + adjustment_def */
5592 gcc_assert (!slp_reduc
);
5593 if (nested_in_vect_loop
)
5595 new_phi
= new_phis
[0];
5596 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) == VECTOR_TYPE
);
5597 expr
= build2 (code
, vectype
, PHI_RESULT (new_phi
), adjustment_def
);
5598 new_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5602 new_temp
= scalar_results
[0];
5603 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) != VECTOR_TYPE
);
5604 expr
= build2 (code
, scalar_type
, new_temp
, adjustment_def
);
5605 new_dest
= vect_create_destination_var (scalar_dest
, scalar_type
);
5608 epilog_stmt
= gimple_build_assign (new_dest
, expr
);
5609 new_temp
= make_ssa_name (new_dest
, epilog_stmt
);
5610 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5611 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5612 if (nested_in_vect_loop
)
5614 stmt_vec_info epilog_stmt_info
= loop_vinfo
->add_stmt (epilog_stmt
);
5615 STMT_VINFO_RELATED_STMT (epilog_stmt_info
)
5616 = STMT_VINFO_RELATED_STMT (loop_vinfo
->lookup_stmt (new_phi
));
5619 scalar_results
.quick_push (new_temp
);
5621 scalar_results
[0] = new_temp
;
5624 scalar_results
[0] = new_temp
;
5626 new_phis
[0] = epilog_stmt
;
5629 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5630 phis with new adjusted scalar results, i.e., replace use <s_out0>
5635 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5636 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5637 v_out2 = reduce <v_out1>
5638 s_out3 = extract_field <v_out2, 0>
5639 s_out4 = adjust_result <s_out3>
5646 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5647 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5648 v_out2 = reduce <v_out1>
5649 s_out3 = extract_field <v_out2, 0>
5650 s_out4 = adjust_result <s_out3>
5655 /* In SLP reduction chain we reduce vector results into one vector if
5656 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5657 LHS of the last stmt in the reduction chain, since we are looking for
5658 the loop exit phi node. */
5659 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info
))
5661 stmt_vec_info dest_stmt_info
5662 = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1]);
5663 scalar_dest
= gimple_assign_lhs (dest_stmt_info
->stmt
);
5667 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5668 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5669 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5670 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5671 correspond to the first vector stmt, etc.
5672 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5673 if (group_size
> new_phis
.length ())
5675 ratio
= group_size
/ new_phis
.length ();
5676 gcc_assert (!(group_size
% new_phis
.length ()));
5681 stmt_vec_info epilog_stmt_info
= NULL
;
5682 for (k
= 0; k
< group_size
; k
++)
5686 epilog_stmt_info
= loop_vinfo
->lookup_stmt (new_phis
[k
/ ratio
]);
5687 reduction_phi_info
= reduction_phis
[k
/ ratio
];
5689 inner_phi
= inner_phis
[k
/ ratio
];
5694 stmt_vec_info scalar_stmt_info
= SLP_TREE_SCALAR_STMTS (slp_node
)[k
];
5696 orig_stmt_info
= STMT_VINFO_RELATED_STMT (scalar_stmt_info
);
5697 /* SLP statements can't participate in patterns. */
5698 gcc_assert (!orig_stmt_info
);
5699 scalar_dest
= gimple_assign_lhs (scalar_stmt_info
->stmt
);
5703 /* Find the loop-closed-use at the loop exit of the original scalar
5704 result. (The reduction result is expected to have two immediate uses -
5705 one at the latch block, and one at the loop exit). */
5706 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5707 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
)))
5708 && !is_gimple_debug (USE_STMT (use_p
)))
5709 phis
.safe_push (USE_STMT (use_p
));
5711 /* While we expect to have found an exit_phi because of loop-closed-ssa
5712 form we can end up without one if the scalar cycle is dead. */
5714 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
5718 stmt_vec_info exit_phi_vinfo
5719 = loop_vinfo
->lookup_stmt (exit_phi
);
5723 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = inner_phi
;
5725 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = epilog_stmt_info
;
5727 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo
)
5728 != vect_double_reduction_def
)
5731 /* Handle double reduction:
5733 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5734 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5735 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5736 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5738 At that point the regular reduction (stmt2 and stmt3) is
5739 already vectorized, as well as the exit phi node, stmt4.
5740 Here we vectorize the phi node of double reduction, stmt1, and
5741 update all relevant statements. */
5743 /* Go through all the uses of s2 to find double reduction phi
5744 node, i.e., stmt1 above. */
5745 orig_name
= PHI_RESULT (exit_phi
);
5746 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
5748 stmt_vec_info use_stmt_vinfo
;
5749 tree vect_phi_init
, preheader_arg
, vect_phi_res
;
5750 basic_block bb
= gimple_bb (use_stmt
);
5752 /* Check that USE_STMT is really double reduction phi
5754 if (gimple_code (use_stmt
) != GIMPLE_PHI
5755 || gimple_phi_num_args (use_stmt
) != 2
5756 || bb
->loop_father
!= outer_loop
)
5758 use_stmt_vinfo
= loop_vinfo
->lookup_stmt (use_stmt
);
5760 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo
)
5761 != vect_double_reduction_def
)
5764 /* Create vector phi node for double reduction:
5765 vs1 = phi <vs0, vs2>
5766 vs1 was created previously in this function by a call to
5767 vect_get_vec_def_for_operand and is stored in
5769 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5770 vs0 is created here. */
5772 /* Create vector phi node. */
5773 vect_phi
= create_phi_node (vec_initial_def
, bb
);
5774 loop_vec_info_for_loop (outer_loop
)->add_stmt (vect_phi
);
5776 /* Create vs0 - initial def of the double reduction phi. */
5777 preheader_arg
= PHI_ARG_DEF_FROM_EDGE (use_stmt
,
5778 loop_preheader_edge (outer_loop
));
5779 vect_phi_init
= get_initial_def_for_reduction
5780 (stmt_info
, preheader_arg
, NULL
);
5782 /* Update phi node arguments with vs0 and vs2. */
5783 add_phi_arg (vect_phi
, vect_phi_init
,
5784 loop_preheader_edge (outer_loop
),
5786 add_phi_arg (vect_phi
, PHI_RESULT (inner_phi
->stmt
),
5787 loop_latch_edge (outer_loop
), UNKNOWN_LOCATION
);
5788 if (dump_enabled_p ())
5789 dump_printf_loc (MSG_NOTE
, vect_location
,
5790 "created double reduction phi node: %G",
5793 vect_phi_res
= PHI_RESULT (vect_phi
);
5795 /* Replace the use, i.e., set the correct vs1 in the regular
5796 reduction phi node. FORNOW, NCOPIES is always 1, so the
5797 loop is redundant. */
5798 stmt_vec_info use_info
= reduction_phi_info
;
5799 for (j
= 0; j
< ncopies
; j
++)
5801 edge pr_edge
= loop_preheader_edge (loop
);
5802 SET_PHI_ARG_DEF (as_a
<gphi
*> (use_info
->stmt
),
5803 pr_edge
->dest_idx
, vect_phi_res
);
5804 use_info
= STMT_VINFO_RELATED_STMT (use_info
);
5811 if (nested_in_vect_loop
)
5820 /* Find the loop-closed-use at the loop exit of the original scalar
5821 result. (The reduction result is expected to have two immediate uses,
5822 one at the latch block, and one at the loop exit). For double
5823 reductions we are looking for exit phis of the outer loop. */
5824 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5826 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
5828 if (!is_gimple_debug (USE_STMT (use_p
)))
5829 phis
.safe_push (USE_STMT (use_p
));
5833 if (double_reduc
&& gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
)
5835 tree phi_res
= PHI_RESULT (USE_STMT (use_p
));
5837 FOR_EACH_IMM_USE_FAST (phi_use_p
, phi_imm_iter
, phi_res
)
5839 if (!flow_bb_inside_loop_p (loop
,
5840 gimple_bb (USE_STMT (phi_use_p
)))
5841 && !is_gimple_debug (USE_STMT (phi_use_p
)))
5842 phis
.safe_push (USE_STMT (phi_use_p
));
5848 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
5850 /* Replace the uses: */
5851 orig_name
= PHI_RESULT (exit_phi
);
5852 scalar_result
= scalar_results
[k
];
5853 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
5854 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
5855 SET_USE (use_p
, scalar_result
);
5862 /* Return a vector of type VECTYPE that is equal to the vector select
5863 operation "MASK ? VEC : IDENTITY". Insert the select statements
5867 merge_with_identity (gimple_stmt_iterator
*gsi
, tree mask
, tree vectype
,
5868 tree vec
, tree identity
)
5870 tree cond
= make_temp_ssa_name (vectype
, NULL
, "cond");
5871 gimple
*new_stmt
= gimple_build_assign (cond
, VEC_COND_EXPR
,
5872 mask
, vec
, identity
);
5873 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
5877 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5878 order, starting with LHS. Insert the extraction statements before GSI and
5879 associate the new scalar SSA names with variable SCALAR_DEST.
5880 Return the SSA name for the result. */
5883 vect_expand_fold_left (gimple_stmt_iterator
*gsi
, tree scalar_dest
,
5884 tree_code code
, tree lhs
, tree vector_rhs
)
5886 tree vectype
= TREE_TYPE (vector_rhs
);
5887 tree scalar_type
= TREE_TYPE (vectype
);
5888 tree bitsize
= TYPE_SIZE (scalar_type
);
5889 unsigned HOST_WIDE_INT vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
5890 unsigned HOST_WIDE_INT element_bitsize
= tree_to_uhwi (bitsize
);
5892 for (unsigned HOST_WIDE_INT bit_offset
= 0;
5893 bit_offset
< vec_size_in_bits
;
5894 bit_offset
+= element_bitsize
)
5896 tree bitpos
= bitsize_int (bit_offset
);
5897 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vector_rhs
,
5900 gassign
*stmt
= gimple_build_assign (scalar_dest
, rhs
);
5901 rhs
= make_ssa_name (scalar_dest
, stmt
);
5902 gimple_assign_set_lhs (stmt
, rhs
);
5903 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
5905 stmt
= gimple_build_assign (scalar_dest
, code
, lhs
, rhs
);
5906 tree new_name
= make_ssa_name (scalar_dest
, stmt
);
5907 gimple_assign_set_lhs (stmt
, new_name
);
5908 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
5914 /* Get a masked internal function equivalent to REDUC_FN. VECTYPE_IN is the
5915 type of the vector input. */
5918 get_masked_reduction_fn (internal_fn reduc_fn
, tree vectype_in
)
5920 internal_fn mask_reduc_fn
;
5924 case IFN_FOLD_LEFT_PLUS
:
5925 mask_reduc_fn
= IFN_MASK_FOLD_LEFT_PLUS
;
5932 if (direct_internal_fn_supported_p (mask_reduc_fn
, vectype_in
,
5933 OPTIMIZE_FOR_SPEED
))
5934 return mask_reduc_fn
;
5938 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
5939 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5940 statement. CODE is the operation performed by STMT_INFO and OPS are
5941 its scalar operands. REDUC_INDEX is the index of the operand in
5942 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5943 implements in-order reduction, or IFN_LAST if we should open-code it.
5944 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5945 that should be used to control the operation in a fully-masked loop. */
5948 vectorize_fold_left_reduction (stmt_vec_info stmt_info
,
5949 gimple_stmt_iterator
*gsi
,
5950 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
5951 gimple
*reduc_def_stmt
,
5952 tree_code code
, internal_fn reduc_fn
,
5953 tree ops
[3], tree vectype_in
,
5954 int reduc_index
, vec_loop_masks
*masks
)
5956 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5957 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5958 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5959 stmt_vec_info new_stmt_info
= NULL
;
5960 internal_fn mask_reduc_fn
= get_masked_reduction_fn (reduc_fn
, vectype_in
);
5966 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
5968 gcc_assert (!nested_in_vect_loop_p (loop
, stmt_info
));
5969 gcc_assert (ncopies
== 1);
5970 gcc_assert (TREE_CODE_LENGTH (code
) == binary_op
);
5971 gcc_assert (reduc_index
== (code
== MINUS_EXPR
? 0 : 1));
5972 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5973 == FOLD_LEFT_REDUCTION
);
5976 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out
),
5977 TYPE_VECTOR_SUBPARTS (vectype_in
)));
5979 tree op0
= ops
[1 - reduc_index
];
5982 stmt_vec_info scalar_dest_def_info
;
5983 auto_vec
<tree
> vec_oprnds0
;
5986 auto_vec
<vec
<tree
> > vec_defs (2);
5987 auto_vec
<tree
> sops(2);
5988 sops
.quick_push (ops
[0]);
5989 sops
.quick_push (ops
[1]);
5990 vect_get_slp_defs (sops
, slp_node
, &vec_defs
);
5991 vec_oprnds0
.safe_splice (vec_defs
[1 - reduc_index
]);
5992 vec_defs
[0].release ();
5993 vec_defs
[1].release ();
5994 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
5995 scalar_dest_def_info
= SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1];
5999 tree loop_vec_def0
= vect_get_vec_def_for_operand (op0
, stmt_info
);
6000 vec_oprnds0
.create (1);
6001 vec_oprnds0
.quick_push (loop_vec_def0
);
6002 scalar_dest_def_info
= stmt_info
;
6005 tree scalar_dest
= gimple_assign_lhs (scalar_dest_def_info
->stmt
);
6006 tree scalar_type
= TREE_TYPE (scalar_dest
);
6007 tree reduc_var
= gimple_phi_result (reduc_def_stmt
);
6009 int vec_num
= vec_oprnds0
.length ();
6010 gcc_assert (vec_num
== 1 || slp_node
);
6011 tree vec_elem_type
= TREE_TYPE (vectype_out
);
6012 gcc_checking_assert (useless_type_conversion_p (scalar_type
, vec_elem_type
));
6014 tree vector_identity
= NULL_TREE
;
6015 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
6016 vector_identity
= build_zero_cst (vectype_out
);
6018 tree scalar_dest_var
= vect_create_destination_var (scalar_dest
, NULL
);
6021 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
6024 tree mask
= NULL_TREE
;
6025 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
6026 mask
= vect_get_loop_mask (gsi
, masks
, vec_num
, vectype_in
, i
);
6028 /* Handle MINUS by adding the negative. */
6029 if (reduc_fn
!= IFN_LAST
&& code
== MINUS_EXPR
)
6031 tree negated
= make_ssa_name (vectype_out
);
6032 new_stmt
= gimple_build_assign (negated
, NEGATE_EXPR
, def0
);
6033 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
6037 if (mask
&& mask_reduc_fn
== IFN_LAST
)
6038 def0
= merge_with_identity (gsi
, mask
, vectype_out
, def0
,
6041 /* On the first iteration the input is simply the scalar phi
6042 result, and for subsequent iterations it is the output of
6043 the preceding operation. */
6044 if (reduc_fn
!= IFN_LAST
|| (mask
&& mask_reduc_fn
!= IFN_LAST
))
6046 if (mask
&& mask_reduc_fn
!= IFN_LAST
)
6047 new_stmt
= gimple_build_call_internal (mask_reduc_fn
, 3, reduc_var
,
6050 new_stmt
= gimple_build_call_internal (reduc_fn
, 2, reduc_var
,
6052 /* For chained SLP reductions the output of the previous reduction
6053 operation serves as the input of the next. For the final statement
6054 the output cannot be a temporary - we reuse the original
6055 scalar destination of the last statement. */
6056 if (i
!= vec_num
- 1)
6058 gimple_set_lhs (new_stmt
, scalar_dest_var
);
6059 reduc_var
= make_ssa_name (scalar_dest_var
, new_stmt
);
6060 gimple_set_lhs (new_stmt
, reduc_var
);
6065 reduc_var
= vect_expand_fold_left (gsi
, scalar_dest_var
, code
,
6067 new_stmt
= SSA_NAME_DEF_STMT (reduc_var
);
6068 /* Remove the statement, so that we can use the same code paths
6069 as for statements that we've just created. */
6070 gimple_stmt_iterator tmp_gsi
= gsi_for_stmt (new_stmt
);
6071 gsi_remove (&tmp_gsi
, true);
6074 if (i
== vec_num
- 1)
6076 gimple_set_lhs (new_stmt
, scalar_dest
);
6077 new_stmt_info
= vect_finish_replace_stmt (scalar_dest_def_info
,
6081 new_stmt_info
= vect_finish_stmt_generation (scalar_dest_def_info
,
6085 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
6089 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
6094 /* Function is_nonwrapping_integer_induction.
6096 Check if STMT_VINO (which is part of loop LOOP) both increments and
6097 does not cause overflow. */
6100 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo
, struct loop
*loop
)
6102 gphi
*phi
= as_a
<gphi
*> (stmt_vinfo
->stmt
);
6103 tree base
= STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
);
6104 tree step
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
);
6105 tree lhs_type
= TREE_TYPE (gimple_phi_result (phi
));
6106 widest_int ni
, max_loop_value
, lhs_max
;
6107 wi::overflow_type overflow
= wi::OVF_NONE
;
6109 /* Make sure the loop is integer based. */
6110 if (TREE_CODE (base
) != INTEGER_CST
6111 || TREE_CODE (step
) != INTEGER_CST
)
6114 /* Check that the max size of the loop will not wrap. */
6116 if (TYPE_OVERFLOW_UNDEFINED (lhs_type
))
6119 if (! max_stmt_executions (loop
, &ni
))
6122 max_loop_value
= wi::mul (wi::to_widest (step
), ni
, TYPE_SIGN (lhs_type
),
6127 max_loop_value
= wi::add (wi::to_widest (base
), max_loop_value
,
6128 TYPE_SIGN (lhs_type
), &overflow
);
6132 return (wi::min_precision (max_loop_value
, TYPE_SIGN (lhs_type
))
6133 <= TYPE_PRECISION (lhs_type
));
6136 /* Check if masking can be supported by inserting a conditional expression.
6137 CODE is the code for the operation. COND_FN is the conditional internal
6138 function, if it exists. VECTYPE_IN is the type of the vector input. */
6140 use_mask_by_cond_expr_p (enum tree_code code
, internal_fn cond_fn
,
6143 if (cond_fn
!= IFN_LAST
6144 && direct_internal_fn_supported_p (cond_fn
, vectype_in
,
6145 OPTIMIZE_FOR_SPEED
))
6159 /* Insert a conditional expression to enable masked vectorization. CODE is the
6160 code for the operation. VOP is the array of operands. MASK is the loop
6161 mask. GSI is a statement iterator used to place the new conditional
6164 build_vect_cond_expr (enum tree_code code
, tree vop
[3], tree mask
,
6165 gimple_stmt_iterator
*gsi
)
6171 tree vectype
= TREE_TYPE (vop
[1]);
6172 tree zero
= build_zero_cst (vectype
);
6173 tree masked_op1
= make_temp_ssa_name (vectype
, NULL
, "masked_op1");
6174 gassign
*select
= gimple_build_assign (masked_op1
, VEC_COND_EXPR
,
6175 mask
, vop
[1], zero
);
6176 gsi_insert_before (gsi
, select
, GSI_SAME_STMT
);
6177 vop
[1] = masked_op1
;
6183 tree vectype
= TREE_TYPE (vop
[1]);
6184 tree masked_op1
= make_temp_ssa_name (vectype
, NULL
, "masked_op1");
6185 gassign
*select
= gimple_build_assign (masked_op1
, VEC_COND_EXPR
,
6186 mask
, vop
[1], vop
[0]);
6187 gsi_insert_before (gsi
, select
, GSI_SAME_STMT
);
6188 vop
[1] = masked_op1
;
6197 /* Function vectorizable_reduction.
6199 Check if STMT_INFO performs a reduction operation that can be vectorized.
6200 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6201 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6202 Return true if STMT_INFO is vectorizable in this way.
6204 This function also handles reduction idioms (patterns) that have been
6205 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
6206 may be of this form:
6207 X = pattern_expr (arg0, arg1, ..., X)
6208 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
6209 sequence that had been detected and replaced by the pattern-stmt
6212 This function also handles reduction of condition expressions, for example:
6213 for (int i = 0; i < N; i++)
6216 This is handled by vectorising the loop and creating an additional vector
6217 containing the loop indexes for which "a[i] < value" was true. In the
6218 function epilogue this is reduced to a single max value and then used to
6219 index into the vector of results.
6221 In some cases of reduction patterns, the type of the reduction variable X is
6222 different than the type of the other arguments of STMT_INFO.
6223 In such cases, the vectype that is used when transforming STMT_INFO into
6224 a vector stmt is different than the vectype that is used to determine the
6225 vectorization factor, because it consists of a different number of elements
6226 than the actual number of elements that are being operated upon in parallel.
6228 For example, consider an accumulation of shorts into an int accumulator.
6229 On some targets it's possible to vectorize this pattern operating on 8
6230 shorts at a time (hence, the vectype for purposes of determining the
6231 vectorization factor should be V8HI); on the other hand, the vectype that
6232 is used to create the vector form is actually V4SI (the type of the result).
6234 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6235 indicates what is the actual level of parallelism (V8HI in the example), so
6236 that the right vectorization factor would be derived. This vectype
6237 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6238 be used to create the vectorized stmt. The right vectype for the vectorized
6239 stmt is obtained from the type of the result X:
6240 get_vectype_for_scalar_type (TREE_TYPE (X))
6242 This means that, contrary to "regular" reductions (or "regular" stmts in
6243 general), the following equation:
6244 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6245 does *NOT* necessarily hold for reduction patterns. */
6248 vectorizable_reduction (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
6249 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
6250 slp_instance slp_node_instance
,
6251 stmt_vector_for_cost
*cost_vec
)
6255 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
6256 tree vectype_in
= NULL_TREE
;
6257 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6258 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6259 enum tree_code code
, orig_code
;
6260 internal_fn reduc_fn
;
6261 machine_mode vec_mode
;
6264 tree new_temp
= NULL_TREE
;
6265 enum vect_def_type dt
, cond_reduc_dt
= vect_unknown_def_type
;
6266 stmt_vec_info cond_stmt_vinfo
= NULL
;
6267 enum tree_code cond_reduc_op_code
= ERROR_MARK
;
6273 stmt_vec_info prev_stmt_info
, prev_phi_info
;
6274 bool single_defuse_cycle
= false;
6275 stmt_vec_info new_stmt_info
= NULL
;
6278 enum vect_def_type dts
[3];
6279 bool nested_cycle
= false, found_nested_cycle_def
= false;
6280 bool double_reduc
= false;
6282 struct loop
* def_stmt_loop
;
6284 auto_vec
<tree
> vec_oprnds0
;
6285 auto_vec
<tree
> vec_oprnds1
;
6286 auto_vec
<tree
> vec_oprnds2
;
6287 auto_vec
<tree
> vect_defs
;
6288 auto_vec
<stmt_vec_info
> phis
;
6291 tree cr_index_scalar_type
= NULL_TREE
, cr_index_vector_type
= NULL_TREE
;
6292 tree cond_reduc_val
= NULL_TREE
;
6294 /* Make sure it was already recognized as a reduction computation. */
6295 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_reduction_def
6296 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_nested_cycle
)
6299 if (nested_in_vect_loop_p (loop
, stmt_info
))
6302 nested_cycle
= true;
6305 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info
))
6306 gcc_assert (slp_node
6307 && REDUC_GROUP_FIRST_ELEMENT (stmt_info
) == stmt_info
);
6309 if (gphi
*phi
= dyn_cast
<gphi
*> (stmt_info
->stmt
))
6311 tree phi_result
= gimple_phi_result (phi
);
6312 /* Analysis is fully done on the reduction stmt invocation. */
6316 slp_node_instance
->reduc_phis
= slp_node
;
6318 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
6322 if (STMT_VINFO_REDUC_TYPE (stmt_info
) == FOLD_LEFT_REDUCTION
)
6323 /* Leave the scalar phi in place. Note that checking
6324 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6325 for reductions involving a single statement. */
6328 stmt_vec_info reduc_stmt_info
= STMT_VINFO_REDUC_DEF (stmt_info
);
6329 reduc_stmt_info
= vect_stmt_to_vectorize (reduc_stmt_info
);
6331 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info
)
6332 == EXTRACT_LAST_REDUCTION
)
6333 /* Leave the scalar phi in place. */
6336 gassign
*reduc_stmt
= as_a
<gassign
*> (reduc_stmt_info
->stmt
);
6337 code
= gimple_assign_rhs_code (reduc_stmt
);
6338 for (unsigned k
= 1; k
< gimple_num_ops (reduc_stmt
); ++k
)
6340 tree op
= gimple_op (reduc_stmt
, k
);
6341 if (op
== phi_result
)
6343 if (k
== 1 && code
== COND_EXPR
)
6345 bool is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &dt
);
6346 gcc_assert (is_simple_use
);
6347 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
6350 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in
)))
6351 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op
)))))
6352 vectype_in
= get_vectype_for_scalar_type (TREE_TYPE (op
));
6355 /* For a nested cycle we might end up with an operation like
6356 phi_result * phi_result. */
6358 vectype_in
= STMT_VINFO_VECTYPE (stmt_info
);
6359 gcc_assert (vectype_in
);
6364 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
6366 stmt_vec_info use_stmt_info
;
6368 && STMT_VINFO_RELEVANT (reduc_stmt_info
) <= vect_used_only_live
6369 && (use_stmt_info
= loop_vinfo
->lookup_single_use (phi_result
))
6370 && vect_stmt_to_vectorize (use_stmt_info
) == reduc_stmt_info
)
6371 single_defuse_cycle
= true;
6373 /* Create the destination vector */
6374 scalar_dest
= gimple_assign_lhs (reduc_stmt
);
6375 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
6378 /* The size vect_schedule_slp_instance computes is off for us. */
6379 vec_num
= vect_get_num_vectors
6380 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6381 * SLP_TREE_SCALAR_STMTS (slp_node
).length (),
6386 /* Generate the reduction PHIs upfront. */
6387 prev_phi_info
= NULL
;
6388 for (j
= 0; j
< ncopies
; j
++)
6390 if (j
== 0 || !single_defuse_cycle
)
6392 for (i
= 0; i
< vec_num
; i
++)
6394 /* Create the reduction-phi that defines the reduction
6396 gimple
*new_phi
= create_phi_node (vec_dest
, loop
->header
);
6397 stmt_vec_info new_phi_info
= loop_vinfo
->add_stmt (new_phi
);
6400 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_phi_info
);
6404 STMT_VINFO_VEC_STMT (stmt_info
)
6405 = *vec_stmt
= new_phi_info
;
6407 STMT_VINFO_RELATED_STMT (prev_phi_info
) = new_phi_info
;
6408 prev_phi_info
= new_phi_info
;
6417 /* 1. Is vectorizable reduction? */
6418 /* Not supportable if the reduction variable is used in the loop, unless
6419 it's a reduction chain. */
6420 if (STMT_VINFO_RELEVANT (stmt_info
) > vect_used_in_outer
6421 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info
))
6424 /* Reductions that are not used even in an enclosing outer-loop,
6425 are expected to be "live" (used out of the loop). */
6426 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
6427 && !STMT_VINFO_LIVE_P (stmt_info
))
6430 /* 2. Has this been recognized as a reduction pattern?
6432 Check if STMT represents a pattern that has been recognized
6433 in earlier analysis stages. For stmts that represent a pattern,
6434 the STMT_VINFO_RELATED_STMT field records the last stmt in
6435 the original sequence that constitutes the pattern. */
6437 stmt_vec_info orig_stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
6440 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
6441 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info
));
6444 /* 3. Check the operands of the operation. The first operands are defined
6445 inside the loop body. The last operand is the reduction variable,
6446 which is defined by the loop-header-phi. */
6448 gassign
*stmt
= as_a
<gassign
*> (stmt_info
->stmt
);
6451 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
6453 case GIMPLE_BINARY_RHS
:
6454 code
= gimple_assign_rhs_code (stmt
);
6455 op_type
= TREE_CODE_LENGTH (code
);
6456 gcc_assert (op_type
== binary_op
);
6457 ops
[0] = gimple_assign_rhs1 (stmt
);
6458 ops
[1] = gimple_assign_rhs2 (stmt
);
6461 case GIMPLE_TERNARY_RHS
:
6462 code
= gimple_assign_rhs_code (stmt
);
6463 op_type
= TREE_CODE_LENGTH (code
);
6464 gcc_assert (op_type
== ternary_op
);
6465 ops
[0] = gimple_assign_rhs1 (stmt
);
6466 ops
[1] = gimple_assign_rhs2 (stmt
);
6467 ops
[2] = gimple_assign_rhs3 (stmt
);
6470 case GIMPLE_UNARY_RHS
:
6477 if (code
== COND_EXPR
&& slp_node
)
6480 scalar_dest
= gimple_assign_lhs (stmt
);
6481 scalar_type
= TREE_TYPE (scalar_dest
);
6482 if (!POINTER_TYPE_P (scalar_type
) && !INTEGRAL_TYPE_P (scalar_type
)
6483 && !SCALAR_FLOAT_TYPE_P (scalar_type
))
6486 /* Do not try to vectorize bit-precision reductions. */
6487 if (!type_has_mode_precision_p (scalar_type
))
6490 /* All uses but the last are expected to be defined in the loop.
6491 The last use is the reduction variable. In case of nested cycle this
6492 assumption is not true: we use reduc_index to record the index of the
6493 reduction variable. */
6494 stmt_vec_info reduc_def_info
;
6496 reduc_def_info
= STMT_VINFO_REDUC_DEF (orig_stmt_info
);
6498 reduc_def_info
= STMT_VINFO_REDUC_DEF (stmt_info
);
6499 gcc_assert (reduc_def_info
);
6500 gphi
*reduc_def_phi
= as_a
<gphi
*> (reduc_def_info
->stmt
);
6501 tree reduc_def
= PHI_RESULT (reduc_def_phi
);
6502 int reduc_index
= -1;
6503 for (i
= 0; i
< op_type
; i
++)
6505 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6506 if (i
== 0 && code
== COND_EXPR
)
6509 stmt_vec_info def_stmt_info
;
6510 is_simple_use
= vect_is_simple_use (ops
[i
], loop_vinfo
, &dts
[i
], &tem
,
6513 gcc_assert (is_simple_use
);
6514 if (dt
== vect_reduction_def
6515 && ops
[i
] == reduc_def
)
6522 /* To properly compute ncopies we are interested in the widest
6523 input type in case we're looking at a widening accumulation. */
6525 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in
)))
6526 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem
)))))
6530 if (dt
!= vect_internal_def
6531 && dt
!= vect_external_def
6532 && dt
!= vect_constant_def
6533 && dt
!= vect_induction_def
6534 && !(dt
== vect_nested_cycle
&& nested_cycle
))
6537 if (dt
== vect_nested_cycle
6538 && ops
[i
] == reduc_def
)
6540 found_nested_cycle_def
= true;
6544 if (i
== 1 && code
== COND_EXPR
)
6546 /* Record how value of COND_EXPR is defined. */
6547 if (dt
== vect_constant_def
)
6550 cond_reduc_val
= ops
[i
];
6552 if (dt
== vect_induction_def
6554 && is_nonwrapping_integer_induction (def_stmt_info
, loop
))
6557 cond_stmt_vinfo
= def_stmt_info
;
6563 vectype_in
= vectype_out
;
6565 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6566 directy used in stmt. */
6567 if (reduc_index
== -1)
6569 if (STMT_VINFO_REDUC_TYPE (stmt_info
) == FOLD_LEFT_REDUCTION
)
6571 if (dump_enabled_p ())
6572 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6573 "in-order reduction chain without SLP.\n");
6578 if (!(reduc_index
== -1
6579 || dts
[reduc_index
] == vect_reduction_def
6580 || dts
[reduc_index
] == vect_nested_cycle
6581 || ((dts
[reduc_index
] == vect_internal_def
6582 || dts
[reduc_index
] == vect_external_def
6583 || dts
[reduc_index
] == vect_constant_def
6584 || dts
[reduc_index
] == vect_induction_def
)
6585 && nested_cycle
&& found_nested_cycle_def
)))
6587 /* For pattern recognized stmts, orig_stmt might be a reduction,
6588 but some helper statements for the pattern might not, or
6589 might be COND_EXPRs with reduction uses in the condition. */
6590 gcc_assert (orig_stmt_info
);
6594 /* PHIs should not participate in patterns. */
6595 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info
));
6596 enum vect_reduction_type v_reduc_type
6597 = STMT_VINFO_REDUC_TYPE (reduc_def_info
);
6598 stmt_vec_info tmp
= STMT_VINFO_REDUC_DEF (reduc_def_info
);
6600 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = v_reduc_type
;
6601 /* If we have a condition reduction, see if we can simplify it further. */
6602 if (v_reduc_type
== COND_REDUCTION
)
6604 /* TODO: We can't yet handle reduction chains, since we need to treat
6605 each COND_EXPR in the chain specially, not just the last one.
6608 x_1 = PHI <x_3, ...>
6609 x_2 = a_2 ? ... : x_1;
6610 x_3 = a_3 ? ... : x_2;
6612 we're interested in the last element in x_3 for which a_2 || a_3
6613 is true, whereas the current reduction chain handling would
6614 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6615 as a reduction operation. */
6616 if (reduc_index
== -1)
6618 if (dump_enabled_p ())
6619 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6620 "conditional reduction chains not supported\n");
6624 /* vect_is_simple_reduction ensured that operand 2 is the
6625 loop-carried operand. */
6626 gcc_assert (reduc_index
== 2);
6628 /* Loop peeling modifies initial value of reduction PHI, which
6629 makes the reduction stmt to be transformed different to the
6630 original stmt analyzed. We need to record reduction code for
6631 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6632 it can be used directly at transform stage. */
6633 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
) == MAX_EXPR
6634 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
) == MIN_EXPR
)
6636 /* Also set the reduction type to CONST_COND_REDUCTION. */
6637 gcc_assert (cond_reduc_dt
== vect_constant_def
);
6638 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = CONST_COND_REDUCTION
;
6640 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST
,
6641 vectype_in
, OPTIMIZE_FOR_SPEED
))
6643 if (dump_enabled_p ())
6644 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6645 "optimizing condition reduction with"
6646 " FOLD_EXTRACT_LAST.\n");
6647 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = EXTRACT_LAST_REDUCTION
;
6649 else if (cond_reduc_dt
== vect_induction_def
)
6652 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo
);
6653 tree step
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo
);
6655 gcc_assert (TREE_CODE (base
) == INTEGER_CST
6656 && TREE_CODE (step
) == INTEGER_CST
);
6657 cond_reduc_val
= NULL_TREE
;
6658 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6659 above base; punt if base is the minimum value of the type for
6660 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6661 if (tree_int_cst_sgn (step
) == -1)
6663 cond_reduc_op_code
= MIN_EXPR
;
6664 if (tree_int_cst_sgn (base
) == -1)
6665 cond_reduc_val
= build_int_cst (TREE_TYPE (base
), 0);
6666 else if (tree_int_cst_lt (base
,
6667 TYPE_MAX_VALUE (TREE_TYPE (base
))))
6669 = int_const_binop (PLUS_EXPR
, base
, integer_one_node
);
6673 cond_reduc_op_code
= MAX_EXPR
;
6674 if (tree_int_cst_sgn (base
) == 1)
6675 cond_reduc_val
= build_int_cst (TREE_TYPE (base
), 0);
6676 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base
)),
6679 = int_const_binop (MINUS_EXPR
, base
, integer_one_node
);
6683 if (dump_enabled_p ())
6684 dump_printf_loc (MSG_NOTE
, vect_location
,
6685 "condition expression based on "
6686 "integer induction.\n");
6687 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
6688 = INTEGER_INDUC_COND_REDUCTION
;
6691 else if (cond_reduc_dt
== vect_constant_def
)
6693 enum vect_def_type cond_initial_dt
;
6694 gimple
*def_stmt
= SSA_NAME_DEF_STMT (ops
[reduc_index
]);
6695 tree cond_initial_val
6696 = PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
6698 gcc_assert (cond_reduc_val
!= NULL_TREE
);
6699 vect_is_simple_use (cond_initial_val
, loop_vinfo
, &cond_initial_dt
);
6700 if (cond_initial_dt
== vect_constant_def
6701 && types_compatible_p (TREE_TYPE (cond_initial_val
),
6702 TREE_TYPE (cond_reduc_val
)))
6704 tree e
= fold_binary (LE_EXPR
, boolean_type_node
,
6705 cond_initial_val
, cond_reduc_val
);
6706 if (e
&& (integer_onep (e
) || integer_zerop (e
)))
6708 if (dump_enabled_p ())
6709 dump_printf_loc (MSG_NOTE
, vect_location
,
6710 "condition expression based on "
6711 "compile time constant.\n");
6712 /* Record reduction code at analysis stage. */
6713 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
)
6714 = integer_onep (e
) ? MAX_EXPR
: MIN_EXPR
;
6715 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
6716 = CONST_COND_REDUCTION
;
6723 gcc_assert (tmp
== orig_stmt_info
6724 || REDUC_GROUP_FIRST_ELEMENT (tmp
) == orig_stmt_info
);
6726 /* We changed STMT to be the first stmt in reduction chain, hence we
6727 check that in this case the first element in the chain is STMT. */
6728 gcc_assert (tmp
== stmt_info
6729 || REDUC_GROUP_FIRST_ELEMENT (tmp
) == stmt_info
);
6731 if (STMT_VINFO_LIVE_P (reduc_def_info
))
6737 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
6739 gcc_assert (ncopies
>= 1);
6741 vec_mode
= TYPE_MODE (vectype_in
);
6742 poly_uint64 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
6746 def_bb
= gimple_bb (reduc_def_phi
);
6747 def_stmt_loop
= def_bb
->loop_father
;
6748 def_arg
= PHI_ARG_DEF_FROM_EDGE (reduc_def_phi
,
6749 loop_preheader_edge (def_stmt_loop
));
6750 stmt_vec_info def_arg_stmt_info
= loop_vinfo
->lookup_def (def_arg
);
6751 if (def_arg_stmt_info
6752 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info
)
6753 == vect_double_reduction_def
))
6754 double_reduc
= true;
6757 vect_reduction_type reduction_type
6758 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
6759 if ((double_reduc
|| reduction_type
!= TREE_CODE_REDUCTION
)
6762 if (dump_enabled_p ())
6763 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6764 "multiple types in double reduction or condition "
6769 if (code
== COND_EXPR
)
6771 /* Only call during the analysis stage, otherwise we'll lose
6773 if (!vec_stmt
&& !vectorizable_condition (stmt_info
, gsi
, NULL
,
6774 true, NULL
, cost_vec
))
6776 if (dump_enabled_p ())
6777 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6778 "unsupported condition in reduction\n");
6782 else if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
6783 || code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
6785 /* Only call during the analysis stage, otherwise we'll lose
6786 STMT_VINFO_TYPE. We only support this for nested cycles
6787 without double reductions at the moment. */
6790 || (!vec_stmt
&& !vectorizable_shift (stmt_info
, gsi
, NULL
,
6793 if (dump_enabled_p ())
6794 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6795 "unsupported shift or rotation in reduction\n");
6801 /* 4. Supportable by target? */
6803 /* 4.1. check support for the operation in the loop */
6804 optab
= optab_for_tree_code (code
, vectype_in
, optab_default
);
6807 if (dump_enabled_p ())
6808 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6814 if (optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
6816 if (dump_enabled_p ())
6817 dump_printf (MSG_NOTE
, "op not supported by target.\n");
6819 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
6820 || !vect_worthwhile_without_simd_p (loop_vinfo
, code
))
6823 if (dump_enabled_p ())
6824 dump_printf (MSG_NOTE
, "proceeding using word mode.\n");
6827 /* Worthwhile without SIMD support? */
6828 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in
))
6829 && !vect_worthwhile_without_simd_p (loop_vinfo
, code
))
6831 if (dump_enabled_p ())
6832 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6833 "not worthwhile without SIMD support.\n");
6839 /* 4.2. Check support for the epilog operation.
6841 If STMT represents a reduction pattern, then the type of the
6842 reduction variable may be different than the type of the rest
6843 of the arguments. For example, consider the case of accumulation
6844 of shorts into an int accumulator; The original code:
6845 S1: int_a = (int) short_a;
6846 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6849 STMT: int_acc = widen_sum <short_a, int_acc>
6852 1. The tree-code that is used to create the vector operation in the
6853 epilog code (that reduces the partial results) is not the
6854 tree-code of STMT, but is rather the tree-code of the original
6855 stmt from the pattern that STMT is replacing. I.e, in the example
6856 above we want to use 'widen_sum' in the loop, but 'plus' in the
6858 2. The type (mode) we use to check available target support
6859 for the vector operation to be created in the *epilog*, is
6860 determined by the type of the reduction variable (in the example
6861 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6862 However the type (mode) we use to check available target support
6863 for the vector operation to be created *inside the loop*, is
6864 determined by the type of the other arguments to STMT (in the
6865 example we'd check this: optab_handler (widen_sum_optab,
6868 This is contrary to "regular" reductions, in which the types of all
6869 the arguments are the same as the type of the reduction variable.
6870 For "regular" reductions we can therefore use the same vector type
6871 (and also the same tree-code) when generating the epilog code and
6872 when generating the code inside the loop. */
6875 && (reduction_type
== TREE_CODE_REDUCTION
6876 || reduction_type
== FOLD_LEFT_REDUCTION
))
6878 /* This is a reduction pattern: get the vectype from the type of the
6879 reduction variable, and get the tree-code from orig_stmt. */
6880 orig_code
= gimple_assign_rhs_code (orig_stmt_info
->stmt
);
6881 gcc_assert (vectype_out
);
6882 vec_mode
= TYPE_MODE (vectype_out
);
6886 /* Regular reduction: use the same vectype and tree-code as used for
6887 the vector code inside the loop can be used for the epilog code. */
6890 if (code
== MINUS_EXPR
)
6891 orig_code
= PLUS_EXPR
;
6893 /* For simple condition reductions, replace with the actual expression
6894 we want to base our reduction around. */
6895 if (reduction_type
== CONST_COND_REDUCTION
)
6897 orig_code
= STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
);
6898 gcc_assert (orig_code
== MAX_EXPR
|| orig_code
== MIN_EXPR
);
6900 else if (reduction_type
== INTEGER_INDUC_COND_REDUCTION
)
6901 orig_code
= cond_reduc_op_code
;
6904 reduc_fn
= IFN_LAST
;
6906 if (reduction_type
== TREE_CODE_REDUCTION
6907 || reduction_type
== FOLD_LEFT_REDUCTION
6908 || reduction_type
== INTEGER_INDUC_COND_REDUCTION
6909 || reduction_type
== CONST_COND_REDUCTION
)
6911 if (reduction_type
== FOLD_LEFT_REDUCTION
6912 ? fold_left_reduction_fn (orig_code
, &reduc_fn
)
6913 : reduction_fn_for_scalar_code (orig_code
, &reduc_fn
))
6915 if (reduc_fn
!= IFN_LAST
6916 && !direct_internal_fn_supported_p (reduc_fn
, vectype_out
,
6917 OPTIMIZE_FOR_SPEED
))
6919 if (dump_enabled_p ())
6920 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6921 "reduc op not supported by target.\n");
6923 reduc_fn
= IFN_LAST
;
6928 if (!nested_cycle
|| double_reduc
)
6930 if (dump_enabled_p ())
6931 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6932 "no reduc code for scalar code.\n");
6938 else if (reduction_type
== COND_REDUCTION
)
6940 int scalar_precision
6941 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type
));
6942 cr_index_scalar_type
= make_unsigned_type (scalar_precision
);
6943 cr_index_vector_type
= build_vector_type (cr_index_scalar_type
,
6946 if (direct_internal_fn_supported_p (IFN_REDUC_MAX
, cr_index_vector_type
,
6947 OPTIMIZE_FOR_SPEED
))
6948 reduc_fn
= IFN_REDUC_MAX
;
6951 if (reduction_type
!= EXTRACT_LAST_REDUCTION
6952 && (!nested_cycle
|| double_reduc
)
6953 && reduc_fn
== IFN_LAST
6954 && !nunits_out
.is_constant ())
6956 if (dump_enabled_p ())
6957 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6958 "missing target support for reduction on"
6959 " variable-length vectors.\n");
6963 /* For SLP reductions, see if there is a neutral value we can use. */
6964 tree neutral_op
= NULL_TREE
;
6966 neutral_op
= neutral_op_for_slp_reduction
6967 (slp_node_instance
->reduc_phis
, code
,
6968 REDUC_GROUP_FIRST_ELEMENT (stmt_info
) != NULL
);
6970 if (double_reduc
&& reduction_type
== FOLD_LEFT_REDUCTION
)
6972 /* We can't support in-order reductions of code such as this:
6974 for (int i = 0; i < n1; ++i)
6975 for (int j = 0; j < n2; ++j)
6978 since GCC effectively transforms the loop when vectorizing:
6980 for (int i = 0; i < n1 / VF; ++i)
6981 for (int j = 0; j < n2; ++j)
6982 for (int k = 0; k < VF; ++k)
6985 which is a reassociation of the original operation. */
6986 if (dump_enabled_p ())
6987 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6988 "in-order double reduction not supported.\n");
6993 if (reduction_type
== FOLD_LEFT_REDUCTION
6995 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info
))
6997 /* We cannot use in-order reductions in this case because there is
6998 an implicit reassociation of the operations involved. */
6999 if (dump_enabled_p ())
7000 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7001 "in-order unchained SLP reductions not supported.\n");
7005 /* For double reductions, and for SLP reductions with a neutral value,
7006 we construct a variable-length initial vector by loading a vector
7007 full of the neutral value and then shift-and-inserting the start
7008 values into the low-numbered elements. */
7009 if ((double_reduc
|| neutral_op
)
7010 && !nunits_out
.is_constant ()
7011 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT
,
7012 vectype_out
, OPTIMIZE_FOR_SPEED
))
7014 if (dump_enabled_p ())
7015 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7016 "reduction on variable-length vectors requires"
7017 " target support for a vector-shift-and-insert"
7022 /* Check extra constraints for variable-length unchained SLP reductions. */
7023 if (STMT_SLP_TYPE (stmt_info
)
7024 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info
)
7025 && !nunits_out
.is_constant ())
7027 /* We checked above that we could build the initial vector when
7028 there's a neutral element value. Check here for the case in
7029 which each SLP statement has its own initial value and in which
7030 that value needs to be repeated for every instance of the
7031 statement within the initial vector. */
7032 unsigned int group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
7033 scalar_mode elt_mode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype_out
));
7035 && !can_duplicate_and_interleave_p (group_size
, elt_mode
))
7037 if (dump_enabled_p ())
7038 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7039 "unsupported form of SLP reduction for"
7040 " variable-length vectors: cannot build"
7041 " initial vector.\n");
7044 /* The epilogue code relies on the number of elements being a multiple
7045 of the group size. The duplicate-and-interleave approach to setting
7046 up the the initial vector does too. */
7047 if (!multiple_p (nunits_out
, group_size
))
7049 if (dump_enabled_p ())
7050 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7051 "unsupported form of SLP reduction for"
7052 " variable-length vectors: the vector size"
7053 " is not a multiple of the number of results.\n");
7058 /* In case of widenning multiplication by a constant, we update the type
7059 of the constant to be the type of the other operand. We check that the
7060 constant fits the type in the pattern recognition pass. */
7061 if (code
== DOT_PROD_EXPR
7062 && !types_compatible_p (TREE_TYPE (ops
[0]), TREE_TYPE (ops
[1])))
7064 if (TREE_CODE (ops
[0]) == INTEGER_CST
)
7065 ops
[0] = fold_convert (TREE_TYPE (ops
[1]), ops
[0]);
7066 else if (TREE_CODE (ops
[1]) == INTEGER_CST
)
7067 ops
[1] = fold_convert (TREE_TYPE (ops
[0]), ops
[1]);
7070 if (dump_enabled_p ())
7071 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7072 "invalid types in dot-prod\n");
7078 if (reduction_type
== COND_REDUCTION
)
7082 if (! max_loop_iterations (loop
, &ni
))
7084 if (dump_enabled_p ())
7085 dump_printf_loc (MSG_NOTE
, vect_location
,
7086 "loop count not known, cannot create cond "
7090 /* Convert backedges to iterations. */
7093 /* The additional index will be the same type as the condition. Check
7094 that the loop can fit into this less one (because we'll use up the
7095 zero slot for when there are no matches). */
7096 tree max_index
= TYPE_MAX_VALUE (cr_index_scalar_type
);
7097 if (wi::geu_p (ni
, wi::to_widest (max_index
)))
7099 if (dump_enabled_p ())
7100 dump_printf_loc (MSG_NOTE
, vect_location
,
7101 "loop size is greater than data size.\n");
7106 /* In case the vectorization factor (VF) is bigger than the number
7107 of elements that we can fit in a vectype (nunits), we have to generate
7108 more than one vector stmt - i.e - we need to "unroll" the
7109 vector stmt by a factor VF/nunits. For more details see documentation
7110 in vectorizable_operation. */
7112 /* If the reduction is used in an outer loop we need to generate
7113 VF intermediate results, like so (e.g. for ncopies=2):
7118 (i.e. we generate VF results in 2 registers).
7119 In this case we have a separate def-use cycle for each copy, and therefore
7120 for each copy we get the vector def for the reduction variable from the
7121 respective phi node created for this copy.
7123 Otherwise (the reduction is unused in the loop nest), we can combine
7124 together intermediate results, like so (e.g. for ncopies=2):
7128 (i.e. we generate VF/2 results in a single register).
7129 In this case for each copy we get the vector def for the reduction variable
7130 from the vectorized reduction operation generated in the previous iteration.
7132 This only works when we see both the reduction PHI and its only consumer
7133 in vectorizable_reduction and there are no intermediate stmts
7135 stmt_vec_info use_stmt_info
;
7136 tree reduc_phi_result
= gimple_phi_result (reduc_def_phi
);
7138 && (STMT_VINFO_RELEVANT (stmt_info
) <= vect_used_only_live
)
7139 && (use_stmt_info
= loop_vinfo
->lookup_single_use (reduc_phi_result
))
7140 && vect_stmt_to_vectorize (use_stmt_info
) == stmt_info
)
7142 single_defuse_cycle
= true;
7146 epilog_copies
= ncopies
;
7148 /* If the reduction stmt is one of the patterns that have lane
7149 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
7151 && ! single_defuse_cycle
)
7152 && (code
== DOT_PROD_EXPR
7153 || code
== WIDEN_SUM_EXPR
7154 || code
== SAD_EXPR
))
7156 if (dump_enabled_p ())
7157 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7158 "multi def-use cycle not possible for lane-reducing "
7159 "reduction operation\n");
7164 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7168 internal_fn cond_fn
= get_conditional_internal_fn (code
);
7169 vec_loop_masks
*masks
= &LOOP_VINFO_MASKS (loop_vinfo
);
7170 bool mask_by_cond_expr
= use_mask_by_cond_expr_p (code
, cond_fn
, vectype_in
);
7172 if (!vec_stmt
) /* transformation not required. */
7174 vect_model_reduction_cost (stmt_info
, reduc_fn
, ncopies
, cost_vec
);
7175 if (loop_vinfo
&& LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
7177 if (reduction_type
!= FOLD_LEFT_REDUCTION
7178 && !mask_by_cond_expr
7179 && (cond_fn
== IFN_LAST
7180 || !direct_internal_fn_supported_p (cond_fn
, vectype_in
,
7181 OPTIMIZE_FOR_SPEED
)))
7183 if (dump_enabled_p ())
7184 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7185 "can't use a fully-masked loop because no"
7186 " conditional operation is available.\n");
7187 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7189 else if (reduc_index
== -1)
7191 if (dump_enabled_p ())
7192 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7193 "can't use a fully-masked loop for chained"
7195 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7198 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
* vec_num
,
7201 if (dump_enabled_p ()
7202 && reduction_type
== FOLD_LEFT_REDUCTION
)
7203 dump_printf_loc (MSG_NOTE
, vect_location
,
7204 "using an in-order (fold-left) reduction.\n");
7205 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
7211 if (dump_enabled_p ())
7212 dump_printf_loc (MSG_NOTE
, vect_location
, "transform reduction.\n");
7214 /* FORNOW: Multiple types are not supported for condition. */
7215 if (code
== COND_EXPR
)
7216 gcc_assert (ncopies
== 1);
7218 bool masked_loop_p
= LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
);
7220 if (reduction_type
== FOLD_LEFT_REDUCTION
)
7221 return vectorize_fold_left_reduction
7222 (stmt_info
, gsi
, vec_stmt
, slp_node
, reduc_def_phi
, code
,
7223 reduc_fn
, ops
, vectype_in
, reduc_index
, masks
);
7225 if (reduction_type
== EXTRACT_LAST_REDUCTION
)
7227 gcc_assert (!slp_node
);
7228 return vectorizable_condition (stmt_info
, gsi
, vec_stmt
,
7232 /* Create the destination vector */
7233 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
7235 prev_stmt_info
= NULL
;
7236 prev_phi_info
= NULL
;
7239 vec_oprnds0
.create (1);
7240 vec_oprnds1
.create (1);
7241 if (op_type
== ternary_op
)
7242 vec_oprnds2
.create (1);
7245 phis
.create (vec_num
);
7246 vect_defs
.create (vec_num
);
7248 vect_defs
.quick_push (NULL_TREE
);
7251 phis
.splice (SLP_TREE_VEC_STMTS (slp_node_instance
->reduc_phis
));
7253 phis
.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info
));
7255 for (j
= 0; j
< ncopies
; j
++)
7257 if (code
== COND_EXPR
)
7259 gcc_assert (!slp_node
);
7260 vectorizable_condition (stmt_info
, gsi
, vec_stmt
,
7264 if (code
== LSHIFT_EXPR
7265 || code
== RSHIFT_EXPR
)
7267 vectorizable_shift (stmt_info
, gsi
, vec_stmt
, slp_node
, NULL
);
7276 /* Get vec defs for all the operands except the reduction index,
7277 ensuring the ordering of the ops in the vector is kept. */
7278 auto_vec
<tree
, 3> slp_ops
;
7279 auto_vec
<vec
<tree
>, 3> vec_defs
;
7281 slp_ops
.quick_push (ops
[0]);
7282 slp_ops
.quick_push (ops
[1]);
7283 if (op_type
== ternary_op
)
7284 slp_ops
.quick_push (ops
[2]);
7286 vect_get_slp_defs (slp_ops
, slp_node
, &vec_defs
);
7288 vec_oprnds0
.safe_splice (vec_defs
[0]);
7289 vec_defs
[0].release ();
7290 vec_oprnds1
.safe_splice (vec_defs
[1]);
7291 vec_defs
[1].release ();
7292 if (op_type
== ternary_op
)
7294 vec_oprnds2
.safe_splice (vec_defs
[2]);
7295 vec_defs
[2].release ();
7300 vec_oprnds0
.quick_push
7301 (vect_get_vec_def_for_operand (ops
[0], stmt_info
));
7302 vec_oprnds1
.quick_push
7303 (vect_get_vec_def_for_operand (ops
[1], stmt_info
));
7304 if (op_type
== ternary_op
)
7305 vec_oprnds2
.quick_push
7306 (vect_get_vec_def_for_operand (ops
[2], stmt_info
));
7313 gcc_assert (reduc_index
!= -1 || ! single_defuse_cycle
);
7315 if (single_defuse_cycle
&& reduc_index
== 0)
7316 vec_oprnds0
[0] = gimple_get_lhs (new_stmt_info
->stmt
);
7319 = vect_get_vec_def_for_stmt_copy (loop_vinfo
,
7321 if (single_defuse_cycle
&& reduc_index
== 1)
7322 vec_oprnds1
[0] = gimple_get_lhs (new_stmt_info
->stmt
);
7325 = vect_get_vec_def_for_stmt_copy (loop_vinfo
,
7327 if (op_type
== ternary_op
)
7329 if (single_defuse_cycle
&& reduc_index
== 2)
7330 vec_oprnds2
[0] = gimple_get_lhs (new_stmt_info
->stmt
);
7333 = vect_get_vec_def_for_stmt_copy (loop_vinfo
,
7339 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
7341 tree vop
[3] = { def0
, vec_oprnds1
[i
], NULL_TREE
};
7342 if (masked_loop_p
&& !mask_by_cond_expr
)
7344 /* Make sure that the reduction accumulator is vop[0]. */
7345 if (reduc_index
== 1)
7347 gcc_assert (commutative_tree_code (code
));
7348 std::swap (vop
[0], vop
[1]);
7350 tree mask
= vect_get_loop_mask (gsi
, masks
, vec_num
* ncopies
,
7351 vectype_in
, i
* ncopies
+ j
);
7352 gcall
*call
= gimple_build_call_internal (cond_fn
, 4, mask
,
7355 new_temp
= make_ssa_name (vec_dest
, call
);
7356 gimple_call_set_lhs (call
, new_temp
);
7357 gimple_call_set_nothrow (call
, true);
7359 = vect_finish_stmt_generation (stmt_info
, call
, gsi
);
7363 if (op_type
== ternary_op
)
7364 vop
[2] = vec_oprnds2
[i
];
7366 if (masked_loop_p
&& mask_by_cond_expr
)
7368 tree mask
= vect_get_loop_mask (gsi
, masks
,
7370 vectype_in
, i
* ncopies
+ j
);
7371 build_vect_cond_expr (code
, vop
, mask
, gsi
);
7374 gassign
*new_stmt
= gimple_build_assign (vec_dest
, code
,
7375 vop
[0], vop
[1], vop
[2]);
7376 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7377 gimple_assign_set_lhs (new_stmt
, new_temp
);
7379 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
7384 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
7385 vect_defs
.quick_push (new_temp
);
7388 vect_defs
[0] = new_temp
;
7395 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
7397 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
7399 prev_stmt_info
= new_stmt_info
;
7402 /* Finalize the reduction-phi (set its arguments) and create the
7403 epilog reduction code. */
7404 if ((!single_defuse_cycle
|| code
== COND_EXPR
) && !slp_node
)
7405 vect_defs
[0] = gimple_get_lhs ((*vec_stmt
)->stmt
);
7407 vect_create_epilog_for_reduction (vect_defs
, stmt_info
, reduc_def_phi
,
7408 epilog_copies
, reduc_fn
, phis
,
7409 double_reduc
, slp_node
, slp_node_instance
,
7410 cond_reduc_val
, cond_reduc_op_code
,
7416 /* Function vect_min_worthwhile_factor.
7418 For a loop where we could vectorize the operation indicated by CODE,
7419 return the minimum vectorization factor that makes it worthwhile
7420 to use generic vectors. */
7422 vect_min_worthwhile_factor (enum tree_code code
)
7442 /* Return true if VINFO indicates we are doing loop vectorization and if
7443 it is worth decomposing CODE operations into scalar operations for
7444 that loop's vectorization factor. */
7447 vect_worthwhile_without_simd_p (vec_info
*vinfo
, tree_code code
)
7449 loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
);
7450 unsigned HOST_WIDE_INT value
;
7452 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&value
)
7453 && value
>= vect_min_worthwhile_factor (code
));
7456 /* Function vectorizable_induction
7458 Check if STMT_INFO performs an induction computation that can be vectorized.
7459 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7460 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7461 Return true if STMT_INFO is vectorizable in this way. */
7464 vectorizable_induction (stmt_vec_info stmt_info
,
7465 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
7466 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
7467 stmt_vector_for_cost
*cost_vec
)
7469 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7470 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7472 bool nested_in_vect_loop
= false;
7473 struct loop
*iv_loop
;
7475 edge pe
= loop_preheader_edge (loop
);
7477 tree new_vec
, vec_init
, vec_step
, t
;
7480 gphi
*induction_phi
;
7481 tree induc_def
, vec_dest
;
7482 tree init_expr
, step_expr
;
7483 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
7487 imm_use_iterator imm_iter
;
7488 use_operand_p use_p
;
7492 gimple_stmt_iterator si
;
7494 gphi
*phi
= dyn_cast
<gphi
*> (stmt_info
->stmt
);
7498 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
7501 /* Make sure it was recognized as induction computation. */
7502 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
7505 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7506 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7511 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7512 gcc_assert (ncopies
>= 1);
7514 /* FORNOW. These restrictions should be relaxed. */
7515 if (nested_in_vect_loop_p (loop
, stmt_info
))
7517 imm_use_iterator imm_iter
;
7518 use_operand_p use_p
;
7525 if (dump_enabled_p ())
7526 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7527 "multiple types in nested loop.\n");
7531 /* FORNOW: outer loop induction with SLP not supported. */
7532 if (STMT_SLP_TYPE (stmt_info
))
7536 latch_e
= loop_latch_edge (loop
->inner
);
7537 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
7538 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
7540 gimple
*use_stmt
= USE_STMT (use_p
);
7541 if (is_gimple_debug (use_stmt
))
7544 if (!flow_bb_inside_loop_p (loop
->inner
, gimple_bb (use_stmt
)))
7546 exit_phi
= use_stmt
;
7552 stmt_vec_info exit_phi_vinfo
= loop_vinfo
->lookup_stmt (exit_phi
);
7553 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
7554 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
)))
7556 if (dump_enabled_p ())
7557 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7558 "inner-loop induction only used outside "
7559 "of the outer vectorized loop.\n");
7564 nested_in_vect_loop
= true;
7565 iv_loop
= loop
->inner
;
7569 gcc_assert (iv_loop
== (gimple_bb (phi
))->loop_father
);
7571 if (slp_node
&& !nunits
.is_constant ())
7573 /* The current SLP code creates the initial value element-by-element. */
7574 if (dump_enabled_p ())
7575 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7576 "SLP induction not supported for variable-length"
7581 if (!vec_stmt
) /* transformation not required. */
7583 STMT_VINFO_TYPE (stmt_info
) = induc_vec_info_type
;
7584 DUMP_VECT_SCOPE ("vectorizable_induction");
7585 vect_model_induction_cost (stmt_info
, ncopies
, cost_vec
);
7591 /* Compute a vector variable, initialized with the first VF values of
7592 the induction variable. E.g., for an iv with IV_PHI='X' and
7593 evolution S, for a vector of 4 units, we want to compute:
7594 [X, X + S, X + 2*S, X + 3*S]. */
7596 if (dump_enabled_p ())
7597 dump_printf_loc (MSG_NOTE
, vect_location
, "transform induction phi.\n");
7599 latch_e
= loop_latch_edge (iv_loop
);
7600 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
7602 step_expr
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info
);
7603 gcc_assert (step_expr
!= NULL_TREE
);
7605 pe
= loop_preheader_edge (iv_loop
);
7606 init_expr
= PHI_ARG_DEF_FROM_EDGE (phi
,
7607 loop_preheader_edge (iv_loop
));
7610 if (!nested_in_vect_loop
)
7612 /* Convert the initial value to the desired type. */
7613 tree new_type
= TREE_TYPE (vectype
);
7614 init_expr
= gimple_convert (&stmts
, new_type
, init_expr
);
7616 /* If we are using the loop mask to "peel" for alignment then we need
7617 to adjust the start value here. */
7618 tree skip_niters
= LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo
);
7619 if (skip_niters
!= NULL_TREE
)
7621 if (FLOAT_TYPE_P (vectype
))
7622 skip_niters
= gimple_build (&stmts
, FLOAT_EXPR
, new_type
,
7625 skip_niters
= gimple_convert (&stmts
, new_type
, skip_niters
);
7626 tree skip_step
= gimple_build (&stmts
, MULT_EXPR
, new_type
,
7627 skip_niters
, step_expr
);
7628 init_expr
= gimple_build (&stmts
, MINUS_EXPR
, new_type
,
7629 init_expr
, skip_step
);
7633 /* Convert the step to the desired type. */
7634 step_expr
= gimple_convert (&stmts
, TREE_TYPE (vectype
), step_expr
);
7638 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7639 gcc_assert (!new_bb
);
7642 /* Find the first insertion point in the BB. */
7643 basic_block bb
= gimple_bb (phi
);
7644 si
= gsi_after_labels (bb
);
7646 /* For SLP induction we have to generate several IVs as for example
7647 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7648 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7649 [VF*S, VF*S, VF*S, VF*S] for all. */
7652 /* Enforced above. */
7653 unsigned int const_nunits
= nunits
.to_constant ();
7655 /* Generate [VF*S, VF*S, ... ]. */
7656 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7658 expr
= build_int_cst (integer_type_node
, vf
);
7659 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
7662 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
7663 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
7665 if (! CONSTANT_CLASS_P (new_name
))
7666 new_name
= vect_init_vector (stmt_info
, new_name
,
7667 TREE_TYPE (step_expr
), NULL
);
7668 new_vec
= build_vector_from_val (vectype
, new_name
);
7669 vec_step
= vect_init_vector (stmt_info
, new_vec
, vectype
, NULL
);
7671 /* Now generate the IVs. */
7672 unsigned group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
7673 unsigned nvects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7674 unsigned elts
= const_nunits
* nvects
;
7675 unsigned nivs
= least_common_multiple (group_size
,
7676 const_nunits
) / const_nunits
;
7677 gcc_assert (elts
% group_size
== 0);
7678 tree elt
= init_expr
;
7680 for (ivn
= 0; ivn
< nivs
; ++ivn
)
7682 tree_vector_builder
elts (vectype
, const_nunits
, 1);
7684 for (unsigned eltn
= 0; eltn
< const_nunits
; ++eltn
)
7686 if (ivn
*const_nunits
+ eltn
>= group_size
7687 && (ivn
* const_nunits
+ eltn
) % group_size
== 0)
7688 elt
= gimple_build (&stmts
, PLUS_EXPR
, TREE_TYPE (elt
),
7690 elts
.quick_push (elt
);
7692 vec_init
= gimple_build_vector (&stmts
, &elts
);
7695 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7696 gcc_assert (!new_bb
);
7699 /* Create the induction-phi that defines the induction-operand. */
7700 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
7701 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
7702 stmt_vec_info induction_phi_info
7703 = loop_vinfo
->add_stmt (induction_phi
);
7704 induc_def
= PHI_RESULT (induction_phi
);
7706 /* Create the iv update inside the loop */
7707 vec_def
= make_ssa_name (vec_dest
);
7708 new_stmt
= gimple_build_assign (vec_def
, PLUS_EXPR
, induc_def
, vec_step
);
7709 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7710 loop_vinfo
->add_stmt (new_stmt
);
7712 /* Set the arguments of the phi node: */
7713 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
7714 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
7717 SLP_TREE_VEC_STMTS (slp_node
).quick_push (induction_phi_info
);
7720 /* Re-use IVs when we can. */
7724 = least_common_multiple (group_size
, const_nunits
) / group_size
;
7725 /* Generate [VF'*S, VF'*S, ... ]. */
7726 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7728 expr
= build_int_cst (integer_type_node
, vfp
);
7729 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
7732 expr
= build_int_cst (TREE_TYPE (step_expr
), vfp
);
7733 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
7735 if (! CONSTANT_CLASS_P (new_name
))
7736 new_name
= vect_init_vector (stmt_info
, new_name
,
7737 TREE_TYPE (step_expr
), NULL
);
7738 new_vec
= build_vector_from_val (vectype
, new_name
);
7739 vec_step
= vect_init_vector (stmt_info
, new_vec
, vectype
, NULL
);
7740 for (; ivn
< nvects
; ++ivn
)
7742 gimple
*iv
= SLP_TREE_VEC_STMTS (slp_node
)[ivn
- nivs
]->stmt
;
7744 if (gimple_code (iv
) == GIMPLE_PHI
)
7745 def
= gimple_phi_result (iv
);
7747 def
= gimple_assign_lhs (iv
);
7748 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7751 if (gimple_code (iv
) == GIMPLE_PHI
)
7752 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7755 gimple_stmt_iterator tgsi
= gsi_for_stmt (iv
);
7756 gsi_insert_after (&tgsi
, new_stmt
, GSI_CONTINUE_LINKING
);
7758 SLP_TREE_VEC_STMTS (slp_node
).quick_push
7759 (loop_vinfo
->add_stmt (new_stmt
));
7766 /* Create the vector that holds the initial_value of the induction. */
7767 if (nested_in_vect_loop
)
7769 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7770 been created during vectorization of previous stmts. We obtain it
7771 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7772 vec_init
= vect_get_vec_def_for_operand (init_expr
, stmt_info
);
7773 /* If the initial value is not of proper type, convert it. */
7774 if (!useless_type_conversion_p (vectype
, TREE_TYPE (vec_init
)))
7777 = gimple_build_assign (vect_get_new_ssa_name (vectype
,
7781 build1 (VIEW_CONVERT_EXPR
, vectype
,
7783 vec_init
= gimple_assign_lhs (new_stmt
);
7784 new_bb
= gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop
),
7786 gcc_assert (!new_bb
);
7787 loop_vinfo
->add_stmt (new_stmt
);
7792 /* iv_loop is the loop to be vectorized. Create:
7793 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7795 new_name
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_expr
);
7797 unsigned HOST_WIDE_INT const_nunits
;
7798 if (nunits
.is_constant (&const_nunits
))
7800 tree_vector_builder
elts (vectype
, const_nunits
, 1);
7801 elts
.quick_push (new_name
);
7802 for (i
= 1; i
< const_nunits
; i
++)
7804 /* Create: new_name_i = new_name + step_expr */
7805 new_name
= gimple_build (&stmts
, PLUS_EXPR
, TREE_TYPE (new_name
),
7806 new_name
, step_expr
);
7807 elts
.quick_push (new_name
);
7809 /* Create a vector from [new_name_0, new_name_1, ...,
7810 new_name_nunits-1] */
7811 vec_init
= gimple_build_vector (&stmts
, &elts
);
7813 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr
)))
7814 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7815 vec_init
= gimple_build (&stmts
, VEC_SERIES_EXPR
, vectype
,
7816 new_name
, step_expr
);
7820 [base, base, base, ...]
7821 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7822 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)));
7823 gcc_assert (flag_associative_math
);
7824 tree index
= build_index_vector (vectype
, 0, 1);
7825 tree base_vec
= gimple_build_vector_from_val (&stmts
, vectype
,
7827 tree step_vec
= gimple_build_vector_from_val (&stmts
, vectype
,
7829 vec_init
= gimple_build (&stmts
, FLOAT_EXPR
, vectype
, index
);
7830 vec_init
= gimple_build (&stmts
, MULT_EXPR
, vectype
,
7831 vec_init
, step_vec
);
7832 vec_init
= gimple_build (&stmts
, PLUS_EXPR
, vectype
,
7833 vec_init
, base_vec
);
7838 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7839 gcc_assert (!new_bb
);
7844 /* Create the vector that holds the step of the induction. */
7845 if (nested_in_vect_loop
)
7846 /* iv_loop is nested in the loop to be vectorized. Generate:
7847 vec_step = [S, S, S, S] */
7848 new_name
= step_expr
;
7851 /* iv_loop is the loop to be vectorized. Generate:
7852 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7853 gimple_seq seq
= NULL
;
7854 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7856 expr
= build_int_cst (integer_type_node
, vf
);
7857 expr
= gimple_build (&seq
, FLOAT_EXPR
, TREE_TYPE (step_expr
), expr
);
7860 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
7861 new_name
= gimple_build (&seq
, MULT_EXPR
, TREE_TYPE (step_expr
),
7865 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
7866 gcc_assert (!new_bb
);
7870 t
= unshare_expr (new_name
);
7871 gcc_assert (CONSTANT_CLASS_P (new_name
)
7872 || TREE_CODE (new_name
) == SSA_NAME
);
7873 new_vec
= build_vector_from_val (vectype
, t
);
7874 vec_step
= vect_init_vector (stmt_info
, new_vec
, vectype
, NULL
);
7877 /* Create the following def-use cycle:
7882 vec_iv = PHI <vec_init, vec_loop>
7886 vec_loop = vec_iv + vec_step; */
7888 /* Create the induction-phi that defines the induction-operand. */
7889 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
7890 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
7891 stmt_vec_info induction_phi_info
= loop_vinfo
->add_stmt (induction_phi
);
7892 induc_def
= PHI_RESULT (induction_phi
);
7894 /* Create the iv update inside the loop */
7895 vec_def
= make_ssa_name (vec_dest
);
7896 new_stmt
= gimple_build_assign (vec_def
, PLUS_EXPR
, induc_def
, vec_step
);
7897 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7898 stmt_vec_info new_stmt_info
= loop_vinfo
->add_stmt (new_stmt
);
7900 /* Set the arguments of the phi node: */
7901 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
7902 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
7905 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= induction_phi_info
;
7907 /* In case that vectorization factor (VF) is bigger than the number
7908 of elements that we can fit in a vectype (nunits), we have to generate
7909 more than one vector stmt - i.e - we need to "unroll" the
7910 vector stmt by a factor VF/nunits. For more details see documentation
7911 in vectorizable_operation. */
7915 gimple_seq seq
= NULL
;
7916 stmt_vec_info prev_stmt_vinfo
;
7917 /* FORNOW. This restriction should be relaxed. */
7918 gcc_assert (!nested_in_vect_loop
);
7920 /* Create the vector that holds the step of the induction. */
7921 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7923 expr
= build_int_cst (integer_type_node
, nunits
);
7924 expr
= gimple_build (&seq
, FLOAT_EXPR
, TREE_TYPE (step_expr
), expr
);
7927 expr
= build_int_cst (TREE_TYPE (step_expr
), nunits
);
7928 new_name
= gimple_build (&seq
, MULT_EXPR
, TREE_TYPE (step_expr
),
7932 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
7933 gcc_assert (!new_bb
);
7936 t
= unshare_expr (new_name
);
7937 gcc_assert (CONSTANT_CLASS_P (new_name
)
7938 || TREE_CODE (new_name
) == SSA_NAME
);
7939 new_vec
= build_vector_from_val (vectype
, t
);
7940 vec_step
= vect_init_vector (stmt_info
, new_vec
, vectype
, NULL
);
7942 vec_def
= induc_def
;
7943 prev_stmt_vinfo
= induction_phi_info
;
7944 for (i
= 1; i
< ncopies
; i
++)
7946 /* vec_i = vec_prev + vec_step */
7947 new_stmt
= gimple_build_assign (vec_dest
, PLUS_EXPR
,
7949 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
7950 gimple_assign_set_lhs (new_stmt
, vec_def
);
7952 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7953 new_stmt_info
= loop_vinfo
->add_stmt (new_stmt
);
7954 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo
) = new_stmt_info
;
7955 prev_stmt_vinfo
= new_stmt_info
;
7959 if (nested_in_vect_loop
)
7961 /* Find the loop-closed exit-phi of the induction, and record
7962 the final vector of induction results: */
7964 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
7966 gimple
*use_stmt
= USE_STMT (use_p
);
7967 if (is_gimple_debug (use_stmt
))
7970 if (!flow_bb_inside_loop_p (iv_loop
, gimple_bb (use_stmt
)))
7972 exit_phi
= use_stmt
;
7978 stmt_vec_info stmt_vinfo
= loop_vinfo
->lookup_stmt (exit_phi
);
7979 /* FORNOW. Currently not supporting the case that an inner-loop induction
7980 is not used in the outer-loop (i.e. only outside the outer-loop). */
7981 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo
)
7982 && !STMT_VINFO_LIVE_P (stmt_vinfo
));
7984 STMT_VINFO_VEC_STMT (stmt_vinfo
) = new_stmt_info
;
7985 if (dump_enabled_p ())
7986 dump_printf_loc (MSG_NOTE
, vect_location
,
7987 "vector of inductions after inner-loop:%G",
7993 if (dump_enabled_p ())
7994 dump_printf_loc (MSG_NOTE
, vect_location
,
7995 "transform induction: created def-use cycle: %G%G",
7996 induction_phi
, SSA_NAME_DEF_STMT (vec_def
));
8001 /* Function vectorizable_live_operation.
8003 STMT_INFO computes a value that is used outside the loop. Check if
8004 it can be supported. */
8007 vectorizable_live_operation (stmt_vec_info stmt_info
,
8008 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
8009 slp_tree slp_node
, int slp_index
,
8010 stmt_vec_info
*vec_stmt
,
8011 stmt_vector_for_cost
*)
8013 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8014 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8015 imm_use_iterator imm_iter
;
8016 tree lhs
, lhs_type
, bitsize
, vec_bitsize
;
8017 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8018 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
8021 auto_vec
<tree
> vec_oprnds
;
8023 poly_uint64 vec_index
= 0;
8025 gcc_assert (STMT_VINFO_LIVE_P (stmt_info
));
8027 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
)
8030 /* FORNOW. CHECKME. */
8031 if (nested_in_vect_loop_p (loop
, stmt_info
))
8034 /* If STMT is not relevant and it is a simple assignment and its inputs are
8035 invariant then it can remain in place, unvectorized. The original last
8036 scalar value that it computes will be used. */
8037 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
8039 gcc_assert (is_simple_and_all_uses_invariant (stmt_info
, loop_vinfo
));
8040 if (dump_enabled_p ())
8041 dump_printf_loc (MSG_NOTE
, vect_location
,
8042 "statement is simple and uses invariant. Leaving in "
8050 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8054 gcc_assert (slp_index
>= 0);
8056 int num_scalar
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
8057 int num_vec
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
8059 /* Get the last occurrence of the scalar index from the concatenation of
8060 all the slp vectors. Calculate which slp vector it is and the index
8062 poly_uint64 pos
= (num_vec
* nunits
) - num_scalar
+ slp_index
;
8064 /* Calculate which vector contains the result, and which lane of
8065 that vector we need. */
8066 if (!can_div_trunc_p (pos
, nunits
, &vec_entry
, &vec_index
))
8068 if (dump_enabled_p ())
8069 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8070 "Cannot determine which vector holds the"
8071 " final result.\n");
8078 /* No transformation required. */
8079 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
8081 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST
, vectype
,
8082 OPTIMIZE_FOR_SPEED
))
8084 if (dump_enabled_p ())
8085 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8086 "can't use a fully-masked loop because "
8087 "the target doesn't support extract last "
8089 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
8093 if (dump_enabled_p ())
8094 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8095 "can't use a fully-masked loop because an "
8096 "SLP statement is live after the loop.\n");
8097 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
8099 else if (ncopies
> 1)
8101 if (dump_enabled_p ())
8102 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8103 "can't use a fully-masked loop because"
8104 " ncopies is greater than 1.\n");
8105 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
8109 gcc_assert (ncopies
== 1 && !slp_node
);
8110 vect_record_loop_mask (loop_vinfo
,
8111 &LOOP_VINFO_MASKS (loop_vinfo
),
8118 /* Use the lhs of the original scalar statement. */
8119 gimple
*stmt
= vect_orig_stmt (stmt_info
)->stmt
;
8121 lhs
= (is_a
<gphi
*> (stmt
)) ? gimple_phi_result (stmt
)
8122 : gimple_get_lhs (stmt
);
8123 lhs_type
= TREE_TYPE (lhs
);
8125 bitsize
= (VECTOR_BOOLEAN_TYPE_P (vectype
)
8126 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype
)))
8127 : TYPE_SIZE (TREE_TYPE (vectype
)));
8128 vec_bitsize
= TYPE_SIZE (vectype
);
8130 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
8131 tree vec_lhs
, bitstart
;
8134 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
8136 /* Get the correct slp vectorized stmt. */
8137 gimple
*vec_stmt
= SLP_TREE_VEC_STMTS (slp_node
)[vec_entry
]->stmt
;
8138 if (gphi
*phi
= dyn_cast
<gphi
*> (vec_stmt
))
8139 vec_lhs
= gimple_phi_result (phi
);
8141 vec_lhs
= gimple_get_lhs (vec_stmt
);
8143 /* Get entry to use. */
8144 bitstart
= bitsize_int (vec_index
);
8145 bitstart
= int_const_binop (MULT_EXPR
, bitsize
, bitstart
);
8149 enum vect_def_type dt
= STMT_VINFO_DEF_TYPE (stmt_info
);
8150 vec_lhs
= vect_get_vec_def_for_operand_1 (stmt_info
, dt
);
8151 gcc_checking_assert (ncopies
== 1
8152 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
8154 /* For multiple copies, get the last copy. */
8155 for (int i
= 1; i
< ncopies
; ++i
)
8156 vec_lhs
= vect_get_vec_def_for_stmt_copy (loop_vinfo
, vec_lhs
);
8158 /* Get the last lane in the vector. */
8159 bitstart
= int_const_binop (MINUS_EXPR
, vec_bitsize
, bitsize
);
8162 gimple_seq stmts
= NULL
;
8164 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
8168 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
8170 where VEC_LHS is the vectorized live-out result and MASK is
8171 the loop mask for the final iteration. */
8172 gcc_assert (ncopies
== 1 && !slp_node
);
8173 tree scalar_type
= TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info
));
8174 tree mask
= vect_get_loop_mask (gsi
, &LOOP_VINFO_MASKS (loop_vinfo
),
8176 tree scalar_res
= gimple_build (&stmts
, CFN_EXTRACT_LAST
,
8177 scalar_type
, mask
, vec_lhs
);
8179 /* Convert the extracted vector element to the required scalar type. */
8180 new_tree
= gimple_convert (&stmts
, lhs_type
, scalar_res
);
8184 tree bftype
= TREE_TYPE (vectype
);
8185 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
8186 bftype
= build_nonstandard_integer_type (tree_to_uhwi (bitsize
), 1);
8187 new_tree
= build3 (BIT_FIELD_REF
, bftype
, vec_lhs
, bitsize
, bitstart
);
8188 new_tree
= force_gimple_operand (fold_convert (lhs_type
, new_tree
),
8189 &stmts
, true, NULL_TREE
);
8193 gsi_insert_seq_on_edge_immediate (single_exit (loop
), stmts
);
8195 /* Replace use of lhs with newly computed result. If the use stmt is a
8196 single arg PHI, just replace all uses of PHI result. It's necessary
8197 because lcssa PHI defining lhs may be before newly inserted stmt. */
8198 use_operand_p use_p
;
8199 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, lhs
)
8200 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
8201 && !is_gimple_debug (use_stmt
))
8203 if (gimple_code (use_stmt
) == GIMPLE_PHI
8204 && gimple_phi_num_args (use_stmt
) == 1)
8206 replace_uses_by (gimple_phi_result (use_stmt
), new_tree
);
8210 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
8211 SET_USE (use_p
, new_tree
);
8213 update_stmt (use_stmt
);
8219 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
8222 vect_loop_kill_debug_uses (struct loop
*loop
, stmt_vec_info stmt_info
)
8224 ssa_op_iter op_iter
;
8225 imm_use_iterator imm_iter
;
8226 def_operand_p def_p
;
8229 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt_info
->stmt
, op_iter
, SSA_OP_DEF
)
8231 FOR_EACH_IMM_USE_STMT (ustmt
, imm_iter
, DEF_FROM_PTR (def_p
))
8235 if (!is_gimple_debug (ustmt
))
8238 bb
= gimple_bb (ustmt
);
8240 if (!flow_bb_inside_loop_p (loop
, bb
))
8242 if (gimple_debug_bind_p (ustmt
))
8244 if (dump_enabled_p ())
8245 dump_printf_loc (MSG_NOTE
, vect_location
,
8246 "killing debug use\n");
8248 gimple_debug_bind_reset_value (ustmt
);
8249 update_stmt (ustmt
);
8258 /* Given loop represented by LOOP_VINFO, return true if computation of
8259 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8263 loop_niters_no_overflow (loop_vec_info loop_vinfo
)
8265 /* Constant case. */
8266 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
8268 tree cst_niters
= LOOP_VINFO_NITERS (loop_vinfo
);
8269 tree cst_nitersm1
= LOOP_VINFO_NITERSM1 (loop_vinfo
);
8271 gcc_assert (TREE_CODE (cst_niters
) == INTEGER_CST
);
8272 gcc_assert (TREE_CODE (cst_nitersm1
) == INTEGER_CST
);
8273 if (wi::to_widest (cst_nitersm1
) < wi::to_widest (cst_niters
))
8278 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8279 /* Check the upper bound of loop niters. */
8280 if (get_max_loop_iterations (loop
, &max
))
8282 tree type
= TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
));
8283 signop sgn
= TYPE_SIGN (type
);
8284 widest_int type_max
= widest_int::from (wi::max_value (type
), sgn
);
8291 /* Return a mask type with half the number of elements as TYPE. */
8294 vect_halve_mask_nunits (tree type
)
8296 poly_uint64 nunits
= exact_div (TYPE_VECTOR_SUBPARTS (type
), 2);
8297 return build_truth_vector_type (nunits
, current_vector_size
);
8300 /* Return a mask type with twice as many elements as TYPE. */
8303 vect_double_mask_nunits (tree type
)
8305 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (type
) * 2;
8306 return build_truth_vector_type (nunits
, current_vector_size
);
8309 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8310 contain a sequence of NVECTORS masks that each control a vector of type
8314 vect_record_loop_mask (loop_vec_info loop_vinfo
, vec_loop_masks
*masks
,
8315 unsigned int nvectors
, tree vectype
)
8317 gcc_assert (nvectors
!= 0);
8318 if (masks
->length () < nvectors
)
8319 masks
->safe_grow_cleared (nvectors
);
8320 rgroup_masks
*rgm
= &(*masks
)[nvectors
- 1];
8321 /* The number of scalars per iteration and the number of vectors are
8322 both compile-time constants. */
8323 unsigned int nscalars_per_iter
8324 = exact_div (nvectors
* TYPE_VECTOR_SUBPARTS (vectype
),
8325 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)).to_constant ();
8326 if (rgm
->max_nscalars_per_iter
< nscalars_per_iter
)
8328 rgm
->max_nscalars_per_iter
= nscalars_per_iter
;
8329 rgm
->mask_type
= build_same_sized_truth_vector_type (vectype
);
8333 /* Given a complete set of masks MASKS, extract mask number INDEX
8334 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8335 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8337 See the comment above vec_loop_masks for more details about the mask
8341 vect_get_loop_mask (gimple_stmt_iterator
*gsi
, vec_loop_masks
*masks
,
8342 unsigned int nvectors
, tree vectype
, unsigned int index
)
8344 rgroup_masks
*rgm
= &(*masks
)[nvectors
- 1];
8345 tree mask_type
= rgm
->mask_type
;
8347 /* Populate the rgroup's mask array, if this is the first time we've
8349 if (rgm
->masks
.is_empty ())
8351 rgm
->masks
.safe_grow_cleared (nvectors
);
8352 for (unsigned int i
= 0; i
< nvectors
; ++i
)
8354 tree mask
= make_temp_ssa_name (mask_type
, NULL
, "loop_mask");
8355 /* Provide a dummy definition until the real one is available. */
8356 SSA_NAME_DEF_STMT (mask
) = gimple_build_nop ();
8357 rgm
->masks
[i
] = mask
;
8361 tree mask
= rgm
->masks
[index
];
8362 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type
),
8363 TYPE_VECTOR_SUBPARTS (vectype
)))
8365 /* A loop mask for data type X can be reused for data type Y
8366 if X has N times more elements than Y and if Y's elements
8367 are N times bigger than X's. In this case each sequence
8368 of N elements in the loop mask will be all-zero or all-one.
8369 We can then view-convert the mask so that each sequence of
8370 N elements is replaced by a single element. */
8371 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type
),
8372 TYPE_VECTOR_SUBPARTS (vectype
)));
8373 gimple_seq seq
= NULL
;
8374 mask_type
= build_same_sized_truth_vector_type (vectype
);
8375 mask
= gimple_build (&seq
, VIEW_CONVERT_EXPR
, mask_type
, mask
);
8377 gsi_insert_seq_before (gsi
, seq
, GSI_SAME_STMT
);
8382 /* Scale profiling counters by estimation for LOOP which is vectorized
8386 scale_profile_for_vect_loop (struct loop
*loop
, unsigned vf
)
8388 edge preheader
= loop_preheader_edge (loop
);
8389 /* Reduce loop iterations by the vectorization factor. */
8390 gcov_type new_est_niter
= niter_for_unrolled_loop (loop
, vf
);
8391 profile_count freq_h
= loop
->header
->count
, freq_e
= preheader
->count ();
8393 if (freq_h
.nonzero_p ())
8395 profile_probability p
;
8397 /* Avoid dropping loop body profile counter to 0 because of zero count
8398 in loop's preheader. */
8399 if (!(freq_e
== profile_count::zero ()))
8400 freq_e
= freq_e
.force_nonzero ();
8401 p
= freq_e
.apply_scale (new_est_niter
+ 1, 1).probability_in (freq_h
);
8402 scale_loop_frequencies (loop
, p
);
8405 edge exit_e
= single_exit (loop
);
8406 exit_e
->probability
= profile_probability::always ()
8407 .apply_scale (1, new_est_niter
+ 1);
8409 edge exit_l
= single_pred_edge (loop
->latch
);
8410 profile_probability prob
= exit_l
->probability
;
8411 exit_l
->probability
= exit_e
->probability
.invert ();
8412 if (prob
.initialized_p () && exit_l
->probability
.initialized_p ())
8413 scale_bbs_frequencies (&loop
->latch
, 1, exit_l
->probability
/ prob
);
8416 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
8417 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its
8421 vect_transform_loop_stmt (loop_vec_info loop_vinfo
, stmt_vec_info stmt_info
,
8422 gimple_stmt_iterator
*gsi
, stmt_vec_info
*seen_store
)
8424 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8425 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
8427 if (dump_enabled_p ())
8428 dump_printf_loc (MSG_NOTE
, vect_location
,
8429 "------>vectorizing statement: %G", stmt_info
->stmt
);
8431 if (MAY_HAVE_DEBUG_BIND_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
8432 vect_loop_kill_debug_uses (loop
, stmt_info
);
8434 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8435 && !STMT_VINFO_LIVE_P (stmt_info
))
8438 if (STMT_VINFO_VECTYPE (stmt_info
))
8441 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
8442 if (!STMT_SLP_TYPE (stmt_info
)
8443 && maybe_ne (nunits
, vf
)
8444 && dump_enabled_p ())
8445 /* For SLP VF is set according to unrolling factor, and not
8446 to vector size, hence for SLP this print is not valid. */
8447 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
8450 /* Pure SLP statements have already been vectorized. We still need
8451 to apply loop vectorization to hybrid SLP statements. */
8452 if (PURE_SLP_STMT (stmt_info
))
8455 if (dump_enabled_p ())
8456 dump_printf_loc (MSG_NOTE
, vect_location
, "transform statement.\n");
8458 if (vect_transform_stmt (stmt_info
, gsi
, NULL
, NULL
))
8459 *seen_store
= stmt_info
;
8462 /* Function vect_transform_loop.
8464 The analysis phase has determined that the loop is vectorizable.
8465 Vectorize the loop - created vectorized stmts to replace the scalar
8466 stmts in the loop, and update the loop exit condition.
8467 Returns scalar epilogue loop if any. */
8470 vect_transform_loop (loop_vec_info loop_vinfo
)
8472 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8473 struct loop
*epilogue
= NULL
;
8474 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
8475 int nbbs
= loop
->num_nodes
;
8477 tree niters_vector
= NULL_TREE
;
8478 tree step_vector
= NULL_TREE
;
8479 tree niters_vector_mult_vf
= NULL_TREE
;
8480 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
8481 unsigned int lowest_vf
= constant_lower_bound (vf
);
8483 bool check_profitability
= false;
8486 DUMP_VECT_SCOPE ("vec_transform_loop");
8488 loop_vinfo
->shared
->check_datarefs ();
8490 /* Use the more conservative vectorization threshold. If the number
8491 of iterations is constant assume the cost check has been performed
8492 by our caller. If the threshold makes all loops profitable that
8493 run at least the (estimated) vectorization factor number of times
8494 checking is pointless, too. */
8495 th
= LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
);
8496 if (th
>= vect_vf_for_cost (loop_vinfo
)
8497 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
8499 if (dump_enabled_p ())
8500 dump_printf_loc (MSG_NOTE
, vect_location
,
8501 "Profitability threshold is %d loop iterations.\n",
8503 check_profitability
= true;
8506 /* Make sure there exists a single-predecessor exit bb. Do this before
8508 edge e
= single_exit (loop
);
8509 if (! single_pred_p (e
->dest
))
8511 split_loop_exit_edge (e
, true);
8512 if (dump_enabled_p ())
8513 dump_printf (MSG_NOTE
, "split exit edge\n");
8516 /* Version the loop first, if required, so the profitability check
8519 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
8521 poly_uint64 versioning_threshold
8522 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
);
8523 if (check_profitability
8524 && ordered_p (poly_uint64 (th
), versioning_threshold
))
8526 versioning_threshold
= ordered_max (poly_uint64 (th
),
8527 versioning_threshold
);
8528 check_profitability
= false;
8531 = vect_loop_versioning (loop_vinfo
, th
, check_profitability
,
8532 versioning_threshold
);
8533 sloop
->force_vectorize
= false;
8534 check_profitability
= false;
8537 /* Make sure there exists a single-predecessor exit bb also on the
8538 scalar loop copy. Do this after versioning but before peeling
8539 so CFG structure is fine for both scalar and if-converted loop
8540 to make slpeel_duplicate_current_defs_from_edges face matched
8541 loop closed PHI nodes on the exit. */
8542 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
8544 e
= single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
));
8545 if (! single_pred_p (e
->dest
))
8547 split_loop_exit_edge (e
, true);
8548 if (dump_enabled_p ())
8549 dump_printf (MSG_NOTE
, "split exit edge of scalar loop\n");
8553 tree niters
= vect_build_loop_niters (loop_vinfo
);
8554 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = niters
;
8555 tree nitersm1
= unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo
));
8556 bool niters_no_overflow
= loop_niters_no_overflow (loop_vinfo
);
8557 epilogue
= vect_do_peeling (loop_vinfo
, niters
, nitersm1
, &niters_vector
,
8558 &step_vector
, &niters_vector_mult_vf
, th
,
8559 check_profitability
, niters_no_overflow
);
8561 if (niters_vector
== NULL_TREE
)
8563 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
8564 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
8565 && known_eq (lowest_vf
, vf
))
8568 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
)),
8569 LOOP_VINFO_INT_NITERS (loop_vinfo
) / lowest_vf
);
8570 step_vector
= build_one_cst (TREE_TYPE (niters
));
8573 vect_gen_vector_loop_niters (loop_vinfo
, niters
, &niters_vector
,
8574 &step_vector
, niters_no_overflow
);
8577 /* 1) Make sure the loop header has exactly two entries
8578 2) Make sure we have a preheader basic block. */
8580 gcc_assert (EDGE_COUNT (loop
->header
->preds
) == 2);
8582 split_edge (loop_preheader_edge (loop
));
8584 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
8585 && vect_use_loop_mask_for_alignment_p (loop_vinfo
))
8586 /* This will deal with any possible peeling. */
8587 vect_prepare_for_masked_peels (loop_vinfo
);
8589 /* Schedule the SLP instances first, then handle loop vectorization
8591 if (!loop_vinfo
->slp_instances
.is_empty ())
8593 DUMP_VECT_SCOPE ("scheduling SLP instances");
8594 vect_schedule_slp (loop_vinfo
);
8597 /* FORNOW: the vectorizer supports only loops which body consist
8598 of one basic block (header + empty latch). When the vectorizer will
8599 support more involved loop forms, the order by which the BBs are
8600 traversed need to be reconsidered. */
8602 for (i
= 0; i
< nbbs
; i
++)
8604 basic_block bb
= bbs
[i
];
8605 stmt_vec_info stmt_info
;
8607 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
8610 gphi
*phi
= si
.phi ();
8611 if (dump_enabled_p ())
8612 dump_printf_loc (MSG_NOTE
, vect_location
,
8613 "------>vectorizing phi: %G", phi
);
8614 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
8618 if (MAY_HAVE_DEBUG_BIND_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
8619 vect_loop_kill_debug_uses (loop
, stmt_info
);
8621 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8622 && !STMT_VINFO_LIVE_P (stmt_info
))
8625 if (STMT_VINFO_VECTYPE (stmt_info
)
8627 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
)), vf
))
8628 && dump_enabled_p ())
8629 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
8631 if ((STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
8632 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
8633 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
)
8634 && ! PURE_SLP_STMT (stmt_info
))
8636 if (dump_enabled_p ())
8637 dump_printf_loc (MSG_NOTE
, vect_location
, "transform phi.\n");
8638 vect_transform_stmt (stmt_info
, NULL
, NULL
, NULL
);
8642 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
8645 stmt
= gsi_stmt (si
);
8646 /* During vectorization remove existing clobber stmts. */
8647 if (gimple_clobber_p (stmt
))
8649 unlink_stmt_vdef (stmt
);
8650 gsi_remove (&si
, true);
8651 release_defs (stmt
);
8655 stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
8657 /* vector stmts created in the outer-loop during vectorization of
8658 stmts in an inner-loop may not have a stmt_info, and do not
8659 need to be vectorized. */
8660 stmt_vec_info seen_store
= NULL
;
8663 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8665 gimple
*def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
8666 for (gimple_stmt_iterator subsi
= gsi_start (def_seq
);
8667 !gsi_end_p (subsi
); gsi_next (&subsi
))
8669 stmt_vec_info pat_stmt_info
8670 = loop_vinfo
->lookup_stmt (gsi_stmt (subsi
));
8671 vect_transform_loop_stmt (loop_vinfo
, pat_stmt_info
,
8674 stmt_vec_info pat_stmt_info
8675 = STMT_VINFO_RELATED_STMT (stmt_info
);
8676 vect_transform_loop_stmt (loop_vinfo
, pat_stmt_info
, &si
,
8679 vect_transform_loop_stmt (loop_vinfo
, stmt_info
, &si
,
8685 if (STMT_VINFO_GROUPED_ACCESS (seen_store
))
8686 /* Interleaving. If IS_STORE is TRUE, the
8687 vectorization of the interleaving chain was
8688 completed - free all the stores in the chain. */
8689 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store
));
8691 /* Free the attached stmt_vec_info and remove the stmt. */
8692 loop_vinfo
->remove_stmt (stmt_info
);
8697 /* Stub out scalar statements that must not survive vectorization.
8698 Doing this here helps with grouped statements, or statements that
8699 are involved in patterns. */
8700 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
);
8701 !gsi_end_p (gsi
); gsi_next (&gsi
))
8703 gcall
*call
= dyn_cast
<gcall
*> (gsi_stmt (gsi
));
8704 if (call
&& gimple_call_internal_p (call
, IFN_MASK_LOAD
))
8706 tree lhs
= gimple_get_lhs (call
);
8707 if (!VECTOR_TYPE_P (TREE_TYPE (lhs
)))
8709 tree zero
= build_zero_cst (TREE_TYPE (lhs
));
8710 gimple
*new_stmt
= gimple_build_assign (lhs
, zero
);
8711 gsi_replace (&gsi
, new_stmt
, true);
8717 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8718 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8719 if (integer_onep (step_vector
))
8720 niters_no_overflow
= true;
8721 vect_set_loop_condition (loop
, loop_vinfo
, niters_vector
, step_vector
,
8722 niters_vector_mult_vf
, !niters_no_overflow
);
8724 unsigned int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
8725 scale_profile_for_vect_loop (loop
, assumed_vf
);
8727 /* True if the final iteration might not handle a full vector's
8728 worth of scalar iterations. */
8729 bool final_iter_may_be_partial
= LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
);
8730 /* The minimum number of iterations performed by the epilogue. This
8731 is 1 when peeling for gaps because we always need a final scalar
8733 int min_epilogue_iters
= LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) ? 1 : 0;
8734 /* +1 to convert latch counts to loop iteration counts,
8735 -min_epilogue_iters to remove iterations that cannot be performed
8736 by the vector code. */
8737 int bias_for_lowest
= 1 - min_epilogue_iters
;
8738 int bias_for_assumed
= bias_for_lowest
;
8739 int alignment_npeels
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
8740 if (alignment_npeels
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
8742 /* When the amount of peeling is known at compile time, the first
8743 iteration will have exactly alignment_npeels active elements.
8744 In the worst case it will have at least one. */
8745 int min_first_active
= (alignment_npeels
> 0 ? alignment_npeels
: 1);
8746 bias_for_lowest
+= lowest_vf
- min_first_active
;
8747 bias_for_assumed
+= assumed_vf
- min_first_active
;
8749 /* In these calculations the "- 1" converts loop iteration counts
8750 back to latch counts. */
8751 if (loop
->any_upper_bound
)
8752 loop
->nb_iterations_upper_bound
8753 = (final_iter_may_be_partial
8754 ? wi::udiv_ceil (loop
->nb_iterations_upper_bound
+ bias_for_lowest
,
8756 : wi::udiv_floor (loop
->nb_iterations_upper_bound
+ bias_for_lowest
,
8758 if (loop
->any_likely_upper_bound
)
8759 loop
->nb_iterations_likely_upper_bound
8760 = (final_iter_may_be_partial
8761 ? wi::udiv_ceil (loop
->nb_iterations_likely_upper_bound
8762 + bias_for_lowest
, lowest_vf
) - 1
8763 : wi::udiv_floor (loop
->nb_iterations_likely_upper_bound
8764 + bias_for_lowest
, lowest_vf
) - 1);
8765 if (loop
->any_estimate
)
8766 loop
->nb_iterations_estimate
8767 = (final_iter_may_be_partial
8768 ? wi::udiv_ceil (loop
->nb_iterations_estimate
+ bias_for_assumed
,
8770 : wi::udiv_floor (loop
->nb_iterations_estimate
+ bias_for_assumed
,
8773 if (dump_enabled_p ())
8775 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
8777 dump_printf_loc (MSG_NOTE
, vect_location
,
8778 "LOOP VECTORIZED\n");
8780 dump_printf_loc (MSG_NOTE
, vect_location
,
8781 "OUTER LOOP VECTORIZED\n");
8782 dump_printf (MSG_NOTE
, "\n");
8786 dump_printf_loc (MSG_NOTE
, vect_location
,
8787 "LOOP EPILOGUE VECTORIZED (VS=");
8788 dump_dec (MSG_NOTE
, current_vector_size
);
8789 dump_printf (MSG_NOTE
, ")\n");
8793 /* Loops vectorized with a variable factor won't benefit from
8794 unrolling/peeling. */
8795 if (!vf
.is_constant ())
8798 if (dump_enabled_p ())
8799 dump_printf_loc (MSG_NOTE
, vect_location
, "Disabling unrolling due to"
8800 " variable-length vectorization factor\n");
8802 /* Free SLP instances here because otherwise stmt reference counting
8804 slp_instance instance
;
8805 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), i
, instance
)
8806 vect_free_slp_instance (instance
, true);
8807 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
8808 /* Clear-up safelen field since its value is invalid after vectorization
8809 since vectorized loop can have loop-carried dependencies. */
8812 /* Don't vectorize epilogue for epilogue. */
8813 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
8816 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK
))
8821 auto_vector_sizes vector_sizes
;
8822 targetm
.vectorize
.autovectorize_vector_sizes (&vector_sizes
, false);
8823 unsigned int next_size
= 0;
8825 /* Note LOOP_VINFO_NITERS_KNOWN_P and LOOP_VINFO_INT_NITERS work
8826 on niters already ajusted for the iterations of the prologue. */
8827 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
8828 && known_eq (vf
, lowest_vf
))
8830 unsigned HOST_WIDE_INT eiters
8831 = (LOOP_VINFO_INT_NITERS (loop_vinfo
)
8832 - LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
));
8834 = eiters
% lowest_vf
+ LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
);
8835 epilogue
->nb_iterations_upper_bound
= eiters
- 1;
8836 epilogue
->any_upper_bound
= true;
8839 while (next_size
< vector_sizes
.length ()
8840 && !(constant_multiple_p (current_vector_size
,
8841 vector_sizes
[next_size
], &ratio
)
8842 && eiters
>= lowest_vf
/ ratio
))
8846 while (next_size
< vector_sizes
.length ()
8847 && maybe_lt (current_vector_size
, vector_sizes
[next_size
]))
8850 if (next_size
== vector_sizes
.length ())
8856 epilogue
->force_vectorize
= loop
->force_vectorize
;
8857 epilogue
->safelen
= loop
->safelen
;
8858 epilogue
->dont_vectorize
= false;
8860 /* We may need to if-convert epilogue to vectorize it. */
8861 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
8862 tree_if_conversion (epilogue
);
8868 /* The code below is trying to perform simple optimization - revert
8869 if-conversion for masked stores, i.e. if the mask of a store is zero
8870 do not perform it and all stored value producers also if possible.
8878 this transformation will produce the following semi-hammock:
8880 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8882 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8883 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8884 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8885 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8886 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8887 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8892 optimize_mask_stores (struct loop
*loop
)
8894 basic_block
*bbs
= get_loop_body (loop
);
8895 unsigned nbbs
= loop
->num_nodes
;
8898 struct loop
*bb_loop
;
8899 gimple_stmt_iterator gsi
;
8901 auto_vec
<gimple
*> worklist
;
8902 auto_purge_vect_location sentinel
;
8904 vect_location
= find_loop_location (loop
);
8905 /* Pick up all masked stores in loop if any. */
8906 for (i
= 0; i
< nbbs
; i
++)
8909 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
8912 stmt
= gsi_stmt (gsi
);
8913 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
8914 worklist
.safe_push (stmt
);
8919 if (worklist
.is_empty ())
8922 /* Loop has masked stores. */
8923 while (!worklist
.is_empty ())
8925 gimple
*last
, *last_store
;
8928 basic_block store_bb
, join_bb
;
8929 gimple_stmt_iterator gsi_to
;
8930 tree vdef
, new_vdef
;
8935 last
= worklist
.pop ();
8936 mask
= gimple_call_arg (last
, 2);
8937 bb
= gimple_bb (last
);
8938 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8939 the same loop as if_bb. It could be different to LOOP when two
8940 level loop-nest is vectorized and mask_store belongs to the inner
8942 e
= split_block (bb
, last
);
8943 bb_loop
= bb
->loop_father
;
8944 gcc_assert (loop
== bb_loop
|| flow_loop_nested_p (loop
, bb_loop
));
8946 store_bb
= create_empty_bb (bb
);
8947 add_bb_to_loop (store_bb
, bb_loop
);
8948 e
->flags
= EDGE_TRUE_VALUE
;
8949 efalse
= make_edge (bb
, store_bb
, EDGE_FALSE_VALUE
);
8950 /* Put STORE_BB to likely part. */
8951 efalse
->probability
= profile_probability::unlikely ();
8952 store_bb
->count
= efalse
->count ();
8953 make_single_succ_edge (store_bb
, join_bb
, EDGE_FALLTHRU
);
8954 if (dom_info_available_p (CDI_DOMINATORS
))
8955 set_immediate_dominator (CDI_DOMINATORS
, store_bb
, bb
);
8956 if (dump_enabled_p ())
8957 dump_printf_loc (MSG_NOTE
, vect_location
,
8958 "Create new block %d to sink mask stores.",
8960 /* Create vector comparison with boolean result. */
8961 vectype
= TREE_TYPE (mask
);
8962 zero
= build_zero_cst (vectype
);
8963 stmt
= gimple_build_cond (EQ_EXPR
, mask
, zero
, NULL_TREE
, NULL_TREE
);
8964 gsi
= gsi_last_bb (bb
);
8965 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
8966 /* Create new PHI node for vdef of the last masked store:
8967 .MEM_2 = VDEF <.MEM_1>
8968 will be converted to
8969 .MEM.3 = VDEF <.MEM_1>
8970 and new PHI node will be created in join bb
8971 .MEM_2 = PHI <.MEM_1, .MEM_3>
8973 vdef
= gimple_vdef (last
);
8974 new_vdef
= make_ssa_name (gimple_vop (cfun
), last
);
8975 gimple_set_vdef (last
, new_vdef
);
8976 phi
= create_phi_node (vdef
, join_bb
);
8977 add_phi_arg (phi
, new_vdef
, EDGE_SUCC (store_bb
, 0), UNKNOWN_LOCATION
);
8979 /* Put all masked stores with the same mask to STORE_BB if possible. */
8982 gimple_stmt_iterator gsi_from
;
8983 gimple
*stmt1
= NULL
;
8985 /* Move masked store to STORE_BB. */
8987 gsi
= gsi_for_stmt (last
);
8989 /* Shift GSI to the previous stmt for further traversal. */
8991 gsi_to
= gsi_start_bb (store_bb
);
8992 gsi_move_before (&gsi_from
, &gsi_to
);
8993 /* Setup GSI_TO to the non-empty block start. */
8994 gsi_to
= gsi_start_bb (store_bb
);
8995 if (dump_enabled_p ())
8996 dump_printf_loc (MSG_NOTE
, vect_location
,
8997 "Move stmt to created bb\n%G", last
);
8998 /* Move all stored value producers if possible. */
8999 while (!gsi_end_p (gsi
))
9002 imm_use_iterator imm_iter
;
9003 use_operand_p use_p
;
9006 /* Skip debug statements. */
9007 if (is_gimple_debug (gsi_stmt (gsi
)))
9012 stmt1
= gsi_stmt (gsi
);
9013 /* Do not consider statements writing to memory or having
9014 volatile operand. */
9015 if (gimple_vdef (stmt1
)
9016 || gimple_has_volatile_ops (stmt1
))
9020 lhs
= gimple_get_lhs (stmt1
);
9024 /* LHS of vectorized stmt must be SSA_NAME. */
9025 if (TREE_CODE (lhs
) != SSA_NAME
)
9028 if (!VECTOR_TYPE_P (TREE_TYPE (lhs
)))
9030 /* Remove dead scalar statement. */
9031 if (has_zero_uses (lhs
))
9033 gsi_remove (&gsi_from
, true);
9038 /* Check that LHS does not have uses outside of STORE_BB. */
9040 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
9043 use_stmt
= USE_STMT (use_p
);
9044 if (is_gimple_debug (use_stmt
))
9046 if (gimple_bb (use_stmt
) != store_bb
)
9055 if (gimple_vuse (stmt1
)
9056 && gimple_vuse (stmt1
) != gimple_vuse (last_store
))
9059 /* Can move STMT1 to STORE_BB. */
9060 if (dump_enabled_p ())
9061 dump_printf_loc (MSG_NOTE
, vect_location
,
9062 "Move stmt to created bb\n%G", stmt1
);
9063 gsi_move_before (&gsi_from
, &gsi_to
);
9064 /* Shift GSI_TO for further insertion. */
9067 /* Put other masked stores with the same mask to STORE_BB. */
9068 if (worklist
.is_empty ()
9069 || gimple_call_arg (worklist
.last (), 2) != mask
9070 || worklist
.last () != stmt1
)
9072 last
= worklist
.pop ();
9074 add_phi_arg (phi
, gimple_vuse (last_store
), e
, UNKNOWN_LOCATION
);
9078 /* Decide whether it is possible to use a zero-based induction variable
9079 when vectorizing LOOP_VINFO with a fully-masked loop. If it is,
9080 return the value that the induction variable must be able to hold
9081 in order to ensure that the loop ends with an all-false mask.
9082 Return -1 otherwise. */
9084 vect_iv_limit_for_full_masking (loop_vec_info loop_vinfo
)
9086 tree niters_skip
= LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo
);
9087 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
9088 unsigned HOST_WIDE_INT max_vf
= vect_max_vf (loop_vinfo
);
9090 /* Calculate the value that the induction variable must be able
9091 to hit in order to ensure that we end the loop with an all-false mask.
9092 This involves adding the maximum number of inactive trailing scalar
9094 widest_int iv_limit
= -1;
9095 if (max_loop_iterations (loop
, &iv_limit
))
9099 /* Add the maximum number of skipped iterations to the
9100 maximum iteration count. */
9101 if (TREE_CODE (niters_skip
) == INTEGER_CST
)
9102 iv_limit
+= wi::to_widest (niters_skip
);
9104 iv_limit
+= max_vf
- 1;
9106 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
))
9107 /* Make a conservatively-correct assumption. */
9108 iv_limit
+= max_vf
- 1;
9110 /* IV_LIMIT is the maximum number of latch iterations, which is also
9111 the maximum in-range IV value. Round this value down to the previous
9112 vector alignment boundary and then add an extra full iteration. */
9113 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
9114 iv_limit
= (iv_limit
& -(int) known_alignment (vf
)) + max_vf
;