2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
58 /* Loop Vectorization Pass.
60 This pass tries to vectorize loops.
62 For example, the vectorizer transforms the following simple loop:
64 short a[N]; short b[N]; short c[N]; int i;
70 as if it was manually vectorized by rewriting the source code into:
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
77 for (i=0; i<N/8; i++){
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
118 For example, say stmt S1 was vectorized into stmt VS1:
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
156 static void vect_estimate_min_profitable_iters (loop_vec_info
, int *, int *);
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info
,
164 bool vectype_maybe_set_p
,
166 vec
<stmt_vec_info
> *mask_producers
)
168 gimple
*stmt
= stmt_info
->stmt
;
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
171 && !STMT_VINFO_LIVE_P (stmt_info
))
172 || gimple_clobber_p (stmt
))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE
, vect_location
, "skip.\n");
179 tree stmt_vectype
, nunits_vectype
;
180 if (!vect_get_vector_types_for_stmt (stmt_info
, &stmt_vectype
,
186 if (STMT_VINFO_VECTYPE (stmt_info
))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info
)
191 || vectype_maybe_set_p
)
192 && STMT_VINFO_VECTYPE (stmt_info
) == stmt_vectype
);
193 else if (stmt_vectype
== boolean_type_node
)
194 mask_producers
->safe_push (stmt_info
);
196 STMT_VINFO_VECTYPE (stmt_info
) = stmt_vectype
;
200 vect_update_max_nunits (vf
, nunits_vectype
);
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info
, poly_uint64
*vf
,
214 vec
<stmt_vec_info
> *mask_producers
)
216 vec_info
*vinfo
= stmt_info
->vinfo
;
217 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_info
->stmt
, 0);
222 if (!vect_determine_vf_for_stmt_1 (stmt_info
, false, vf
, mask_producers
))
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
226 && STMT_VINFO_RELATED_STMT (stmt_info
))
228 gimple
*pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
229 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si
= gsi_start (pattern_def_seq
);
233 !gsi_end_p (si
); gsi_next (&si
))
235 stmt_vec_info def_stmt_info
= vinfo
->lookup_stmt (gsi_stmt (si
));
236 if (dump_enabled_p ())
238 dump_printf_loc (MSG_NOTE
, vect_location
,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
241 def_stmt_info
->stmt
, 0);
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info
, true,
248 if (dump_enabled_p ())
250 dump_printf_loc (MSG_NOTE
, vect_location
,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_info
->stmt
, 0);
254 if (!vect_determine_vf_for_stmt_1 (stmt_info
, true, vf
, mask_producers
))
261 /* Function vect_determine_vectorization_factor
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo
)
289 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
290 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
291 unsigned nbbs
= loop
->num_nodes
;
292 poly_uint64 vectorization_factor
= 1;
293 tree scalar_type
= NULL_TREE
;
296 stmt_vec_info stmt_info
;
298 auto_vec
<stmt_vec_info
> mask_producers
;
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
302 for (i
= 0; i
< nbbs
; i
++)
304 basic_block bb
= bbs
[i
];
306 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
310 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
311 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
317 gcc_assert (stmt_info
);
319 if (STMT_VINFO_RELEVANT_P (stmt_info
)
320 || STMT_VINFO_LIVE_P (stmt_info
))
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info
));
323 scalar_type
= TREE_TYPE (PHI_RESULT (phi
));
325 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE
, vect_location
,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
330 dump_printf (MSG_NOTE
, "\n");
333 vectype
= get_vectype_for_scalar_type (scalar_type
);
336 if (dump_enabled_p ())
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
339 "not vectorized: unsupported "
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
343 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
347 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
349 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
352 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
353 dump_printf (MSG_NOTE
, "\n");
356 if (dump_enabled_p ())
358 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = ");
359 dump_dec (MSG_NOTE
, TYPE_VECTOR_SUBPARTS (vectype
));
360 dump_printf (MSG_NOTE
, "\n");
363 vect_update_max_nunits (&vectorization_factor
, vectype
);
367 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
370 stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
371 if (!vect_determine_vf_for_stmt (stmt_info
, &vectorization_factor
,
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
380 dump_printf_loc (MSG_NOTE
, vect_location
, "vectorization factor = ");
381 dump_dec (MSG_NOTE
, vectorization_factor
);
382 dump_printf (MSG_NOTE
, "\n");
385 if (known_le (vectorization_factor
, 1U))
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
389 "not vectorized: unsupported data-type\n");
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
394 for (i
= 0; i
< mask_producers
.length (); i
++)
396 stmt_info
= mask_producers
[i
];
397 tree mask_type
= vect_get_mask_type_for_stmt (stmt_info
);
400 STMT_VINFO_VECTYPE (stmt_info
) = mask_type
;
407 /* Function vect_is_simple_iv_evolution.
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
413 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
418 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
421 /* When there is no evolution in this loop, the evolution function
423 if (evolution_part
== NULL_TREE
)
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part
))
431 step_expr
= evolution_part
;
432 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
434 if (dump_enabled_p ())
436 dump_printf_loc (MSG_NOTE
, vect_location
, "step: ");
437 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, step_expr
);
438 dump_printf (MSG_NOTE
, ", init: ");
439 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, init_expr
);
440 dump_printf (MSG_NOTE
, "\n");
446 if (TREE_CODE (step_expr
) != INTEGER_CST
447 && (TREE_CODE (step_expr
) != SSA_NAME
448 || ((bb
= gimple_bb (SSA_NAME_DEF_STMT (step_expr
)))
449 && flow_bb_inside_loop_p (get_loop (cfun
, loop_nb
), bb
))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr
))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
))
452 || !flag_associative_math
)))
453 && (TREE_CODE (step_expr
) != REAL_CST
454 || !flag_associative_math
))
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
465 /* Function vect_analyze_scalar_cycles_1.
467 Examine the cross iteration def-use cycles of scalar variables
468 in LOOP. LOOP_VINFO represents the loop that is now being
469 considered for vectorization (can be LOOP, or an outer-loop
473 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo
, struct loop
*loop
)
475 basic_block bb
= loop
->header
;
477 auto_vec
<gimple
*, 64> worklist
;
481 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
483 /* First - identify all inductions. Reduction detection assumes that all the
484 inductions have been identified, therefore, this order must not be
486 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
488 gphi
*phi
= gsi
.phi ();
489 tree access_fn
= NULL
;
490 tree def
= PHI_RESULT (phi
);
491 stmt_vec_info stmt_vinfo
= loop_vinfo
->lookup_stmt (phi
);
493 if (dump_enabled_p ())
495 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
496 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
499 /* Skip virtual phi's. The data dependences that are associated with
500 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
501 if (virtual_operand_p (def
))
504 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_unknown_def_type
;
506 /* Analyze the evolution function. */
507 access_fn
= analyze_scalar_evolution (loop
, def
);
510 STRIP_NOPS (access_fn
);
511 if (dump_enabled_p ())
513 dump_printf_loc (MSG_NOTE
, vect_location
,
514 "Access function of PHI: ");
515 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, access_fn
);
516 dump_printf (MSG_NOTE
, "\n");
518 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
)
519 = initial_condition_in_loop_num (access_fn
, loop
->num
);
520 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
)
521 = evolution_part_in_loop_num (access_fn
, loop
->num
);
525 || !vect_is_simple_iv_evolution (loop
->num
, access_fn
, &init
, &step
)
526 || (LOOP_VINFO_LOOP (loop_vinfo
) != loop
527 && TREE_CODE (step
) != INTEGER_CST
))
529 worklist
.safe_push (phi
);
533 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
)
535 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
) != NULL_TREE
);
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE
, vect_location
, "Detected induction.\n");
539 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_induction_def
;
543 /* Second - identify all reductions and nested cycles. */
544 while (worklist
.length () > 0)
546 gimple
*phi
= worklist
.pop ();
547 tree def
= PHI_RESULT (phi
);
548 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
551 if (dump_enabled_p ())
553 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
554 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
557 gcc_assert (!virtual_operand_p (def
)
558 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_unknown_def_type
);
560 reduc_stmt
= vect_force_simple_reduction (loop_vinfo
, phi
,
561 &double_reduc
, false);
566 if (dump_enabled_p ())
567 dump_printf_loc (MSG_NOTE
, vect_location
,
568 "Detected double reduction.\n");
570 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_double_reduction_def
;
571 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
572 vect_double_reduction_def
;
576 if (loop
!= LOOP_VINFO_LOOP (loop_vinfo
))
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE
, vect_location
,
580 "Detected vectorizable nested cycle.\n");
582 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_nested_cycle
;
583 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
588 if (dump_enabled_p ())
589 dump_printf_loc (MSG_NOTE
, vect_location
,
590 "Detected reduction.\n");
592 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_reduction_def
;
593 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
595 /* Store the reduction cycles for possible vectorization in
596 loop-aware SLP if it was not detected as reduction
598 if (! REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt
)))
599 LOOP_VINFO_REDUCTIONS (loop_vinfo
).safe_push (reduc_stmt
);
604 if (dump_enabled_p ())
605 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
606 "Unknown def-use cycle pattern.\n");
611 /* Function vect_analyze_scalar_cycles.
613 Examine the cross iteration def-use cycles of scalar variables, by
614 analyzing the loop-header PHIs of scalar variables. Classify each
615 cycle as one of the following: invariant, induction, reduction, unknown.
616 We do that for the loop represented by LOOP_VINFO, and also to its
617 inner-loop, if exists.
618 Examples for scalar cycles:
633 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo
)
635 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
637 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
);
639 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
640 Reductions in such inner-loop therefore have different properties than
641 the reductions in the nest that gets vectorized:
642 1. When vectorized, they are executed in the same order as in the original
643 scalar loop, so we can't change the order of computation when
645 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
646 current checks are too strict. */
649 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
->inner
);
652 /* Transfer group and reduction information from STMT to its pattern stmt. */
655 vect_fixup_reduc_chain (gimple
*stmt
)
657 gimple
*firstp
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
659 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp
))
660 && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)));
661 REDUC_GROUP_SIZE (vinfo_for_stmt (firstp
))
662 = REDUC_GROUP_SIZE (vinfo_for_stmt (stmt
));
665 stmtp
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
666 REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp
)) = firstp
;
667 stmt
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
));
669 REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp
))
670 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
673 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp
)) = vect_reduction_def
;
676 /* Fixup scalar cycles that now have their stmts detected as patterns. */
679 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo
)
684 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
), i
, first
)
685 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first
)))
687 gimple
*next
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first
));
690 if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
)))
692 next
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
694 /* If not all stmt in the chain are patterns try to handle
695 the chain without patterns. */
698 vect_fixup_reduc_chain (first
);
699 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
)[i
]
700 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first
));
705 /* Function vect_get_loop_niters.
707 Determine how many iterations the loop is executed and place it
708 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
709 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
710 niter information holds in ASSUMPTIONS.
712 Return the loop exit condition. */
716 vect_get_loop_niters (struct loop
*loop
, tree
*assumptions
,
717 tree
*number_of_iterations
, tree
*number_of_iterationsm1
)
719 edge exit
= single_exit (loop
);
720 struct tree_niter_desc niter_desc
;
721 tree niter_assumptions
, niter
, may_be_zero
;
722 gcond
*cond
= get_loop_exit_condition (loop
);
724 *assumptions
= boolean_true_node
;
725 *number_of_iterationsm1
= chrec_dont_know
;
726 *number_of_iterations
= chrec_dont_know
;
727 DUMP_VECT_SCOPE ("get_loop_niters");
732 niter
= chrec_dont_know
;
733 may_be_zero
= NULL_TREE
;
734 niter_assumptions
= boolean_true_node
;
735 if (!number_of_iterations_exit_assumptions (loop
, exit
, &niter_desc
, NULL
)
736 || chrec_contains_undetermined (niter_desc
.niter
))
739 niter_assumptions
= niter_desc
.assumptions
;
740 may_be_zero
= niter_desc
.may_be_zero
;
741 niter
= niter_desc
.niter
;
743 if (may_be_zero
&& integer_zerop (may_be_zero
))
744 may_be_zero
= NULL_TREE
;
748 if (COMPARISON_CLASS_P (may_be_zero
))
750 /* Try to combine may_be_zero with assumptions, this can simplify
751 computation of niter expression. */
752 if (niter_assumptions
&& !integer_nonzerop (niter_assumptions
))
753 niter_assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
755 fold_build1 (TRUTH_NOT_EXPR
,
759 niter
= fold_build3 (COND_EXPR
, TREE_TYPE (niter
), may_be_zero
,
760 build_int_cst (TREE_TYPE (niter
), 0),
761 rewrite_to_non_trapping_overflow (niter
));
763 may_be_zero
= NULL_TREE
;
765 else if (integer_nonzerop (may_be_zero
))
767 *number_of_iterationsm1
= build_int_cst (TREE_TYPE (niter
), 0);
768 *number_of_iterations
= build_int_cst (TREE_TYPE (niter
), 1);
775 *assumptions
= niter_assumptions
;
776 *number_of_iterationsm1
= niter
;
778 /* We want the number of loop header executions which is the number
779 of latch executions plus one.
780 ??? For UINT_MAX latch executions this number overflows to zero
781 for loops like do { n++; } while (n != 0); */
782 if (niter
&& !chrec_contains_undetermined (niter
))
783 niter
= fold_build2 (PLUS_EXPR
, TREE_TYPE (niter
), unshare_expr (niter
),
784 build_int_cst (TREE_TYPE (niter
), 1));
785 *number_of_iterations
= niter
;
790 /* Function bb_in_loop_p
792 Used as predicate for dfs order traversal of the loop bbs. */
795 bb_in_loop_p (const_basic_block bb
, const void *data
)
797 const struct loop
*const loop
= (const struct loop
*)data
;
798 if (flow_bb_inside_loop_p (loop
, bb
))
804 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
805 stmt_vec_info structs for all the stmts in LOOP_IN. */
807 _loop_vec_info::_loop_vec_info (struct loop
*loop_in
, vec_info_shared
*shared
)
808 : vec_info (vec_info::loop
, init_cost (loop_in
), shared
),
810 bbs (XCNEWVEC (basic_block
, loop
->num_nodes
)),
811 num_itersm1 (NULL_TREE
),
812 num_iters (NULL_TREE
),
813 num_iters_unchanged (NULL_TREE
),
814 num_iters_assumptions (NULL_TREE
),
816 versioning_threshold (0),
817 vectorization_factor (0),
818 max_vectorization_factor (0),
819 mask_skip_niters (NULL_TREE
),
820 mask_compare_type (NULL_TREE
),
822 peeling_for_alignment (0),
825 slp_unrolling_factor (1),
826 single_scalar_iteration_cost (0),
827 vectorizable (false),
828 can_fully_mask_p (true),
829 fully_masked_p (false),
830 peeling_for_gaps (false),
831 peeling_for_niter (false),
832 operands_swapped (false),
833 no_data_dependencies (false),
834 has_mask_store (false),
836 orig_loop_info (NULL
)
838 /* Create/Update stmt_info for all stmts in the loop. */
839 basic_block
*body
= get_loop_body (loop
);
840 for (unsigned int i
= 0; i
< loop
->num_nodes
; i
++)
842 basic_block bb
= body
[i
];
843 gimple_stmt_iterator si
;
845 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
847 gimple
*phi
= gsi_stmt (si
);
848 gimple_set_uid (phi
, 0);
852 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
854 gimple
*stmt
= gsi_stmt (si
);
855 gimple_set_uid (stmt
, 0);
861 /* CHECKME: We want to visit all BBs before their successors (except for
862 latch blocks, for which this assertion wouldn't hold). In the simple
863 case of the loop forms we allow, a dfs order of the BBs would the same
864 as reversed postorder traversal, so we are safe. */
866 unsigned int nbbs
= dfs_enumerate_from (loop
->header
, 0, bb_in_loop_p
,
867 bbs
, loop
->num_nodes
, loop
);
868 gcc_assert (nbbs
== loop
->num_nodes
);
871 /* Free all levels of MASKS. */
874 release_vec_loop_masks (vec_loop_masks
*masks
)
878 FOR_EACH_VEC_ELT (*masks
, i
, rgm
)
879 rgm
->masks
.release ();
883 /* Free all memory used by the _loop_vec_info, as well as all the
884 stmt_vec_info structs of all the stmts in the loop. */
886 _loop_vec_info::~_loop_vec_info ()
889 gimple_stmt_iterator si
;
892 /* ??? We're releasing loop_vinfos en-block. */
893 set_stmt_vec_info_vec (&stmt_vec_infos
);
894 nbbs
= loop
->num_nodes
;
895 for (j
= 0; j
< nbbs
; j
++)
897 basic_block bb
= bbs
[j
];
898 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
899 free_stmt_vec_info (gsi_stmt (si
));
901 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); )
903 gimple
*stmt
= gsi_stmt (si
);
905 /* We may have broken canonical form by moving a constant
906 into RHS1 of a commutative op. Fix such occurrences. */
907 if (operands_swapped
&& is_gimple_assign (stmt
))
909 enum tree_code code
= gimple_assign_rhs_code (stmt
);
911 if ((code
== PLUS_EXPR
912 || code
== POINTER_PLUS_EXPR
913 || code
== MULT_EXPR
)
914 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt
)))
915 swap_ssa_operands (stmt
,
916 gimple_assign_rhs1_ptr (stmt
),
917 gimple_assign_rhs2_ptr (stmt
));
918 else if (code
== COND_EXPR
919 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt
)))
921 tree cond_expr
= gimple_assign_rhs1 (stmt
);
922 enum tree_code cond_code
= TREE_CODE (cond_expr
);
924 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
926 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
,
928 cond_code
= invert_tree_comparison (cond_code
,
930 if (cond_code
!= ERROR_MARK
)
932 TREE_SET_CODE (cond_expr
, cond_code
);
933 swap_ssa_operands (stmt
,
934 gimple_assign_rhs2_ptr (stmt
),
935 gimple_assign_rhs3_ptr (stmt
));
941 /* Free stmt_vec_info. */
942 free_stmt_vec_info (stmt
);
949 release_vec_loop_masks (&masks
);
955 /* Return an invariant or register for EXPR and emit necessary
956 computations in the LOOP_VINFO loop preheader. */
959 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo
, tree expr
)
961 if (is_gimple_reg (expr
)
962 || is_gimple_min_invariant (expr
))
965 if (! loop_vinfo
->ivexpr_map
)
966 loop_vinfo
->ivexpr_map
= new hash_map
<tree_operand_hash
, tree
>;
967 tree
&cached
= loop_vinfo
->ivexpr_map
->get_or_insert (expr
);
970 gimple_seq stmts
= NULL
;
971 cached
= force_gimple_operand (unshare_expr (expr
),
972 &stmts
, true, NULL_TREE
);
975 edge e
= loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo
));
976 gsi_insert_seq_on_edge_immediate (e
, stmts
);
982 /* Return true if we can use CMP_TYPE as the comparison type to produce
983 all masks required to mask LOOP_VINFO. */
986 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo
, tree cmp_type
)
990 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo
), i
, rgm
)
991 if (rgm
->mask_type
!= NULL_TREE
992 && !direct_internal_fn_supported_p (IFN_WHILE_ULT
,
993 cmp_type
, rgm
->mask_type
,
999 /* Calculate the maximum number of scalars per iteration for every
1000 rgroup in LOOP_VINFO. */
1003 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo
)
1005 unsigned int res
= 1;
1008 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo
), i
, rgm
)
1009 res
= MAX (res
, rgm
->max_nscalars_per_iter
);
1013 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1014 whether we can actually generate the masks required. Return true if so,
1015 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1018 vect_verify_full_masking (loop_vec_info loop_vinfo
)
1020 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1021 unsigned int min_ni_width
;
1023 /* Use a normal loop if there are no statements that need masking.
1024 This only happens in rare degenerate cases: it means that the loop
1025 has no loads, no stores, and no live-out values. */
1026 if (LOOP_VINFO_MASKS (loop_vinfo
).is_empty ())
1029 /* Get the maximum number of iterations that is representable
1030 in the counter type. */
1031 tree ni_type
= TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo
));
1032 widest_int max_ni
= wi::to_widest (TYPE_MAX_VALUE (ni_type
)) + 1;
1034 /* Get a more refined estimate for the number of iterations. */
1035 widest_int max_back_edges
;
1036 if (max_loop_iterations (loop
, &max_back_edges
))
1037 max_ni
= wi::smin (max_ni
, max_back_edges
+ 1);
1039 /* Account for rgroup masks, in which each bit is replicated N times. */
1040 max_ni
*= vect_get_max_nscalars_per_iter (loop_vinfo
);
1042 /* Work out how many bits we need to represent the limit. */
1043 min_ni_width
= wi::min_precision (max_ni
, UNSIGNED
);
1045 /* Find a scalar mode for which WHILE_ULT is supported. */
1046 opt_scalar_int_mode cmp_mode_iter
;
1047 tree cmp_type
= NULL_TREE
;
1048 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
1050 unsigned int cmp_bits
= GET_MODE_BITSIZE (cmp_mode_iter
.require ());
1051 if (cmp_bits
>= min_ni_width
1052 && targetm
.scalar_mode_supported_p (cmp_mode_iter
.require ()))
1054 tree this_type
= build_nonstandard_integer_type (cmp_bits
, true);
1056 && can_produce_all_loop_masks_p (loop_vinfo
, this_type
))
1058 /* Although we could stop as soon as we find a valid mode,
1059 it's often better to continue until we hit Pmode, since the
1060 operands to the WHILE are more likely to be reusable in
1061 address calculations. */
1062 cmp_type
= this_type
;
1063 if (cmp_bits
>= GET_MODE_BITSIZE (Pmode
))
1072 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo
) = cmp_type
;
1076 /* Calculate the cost of one scalar iteration of the loop. */
1078 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo
)
1080 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1081 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1082 int nbbs
= loop
->num_nodes
, factor
;
1083 int innerloop_iters
, i
;
1085 /* Gather costs for statements in the scalar loop. */
1088 innerloop_iters
= 1;
1090 innerloop_iters
= 50; /* FIXME */
1092 for (i
= 0; i
< nbbs
; i
++)
1094 gimple_stmt_iterator si
;
1095 basic_block bb
= bbs
[i
];
1097 if (bb
->loop_father
== loop
->inner
)
1098 factor
= innerloop_iters
;
1102 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1104 gimple
*stmt
= gsi_stmt (si
);
1105 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
1107 if (!is_gimple_assign (stmt
) && !is_gimple_call (stmt
))
1110 /* Skip stmts that are not vectorized inside the loop. */
1112 && !STMT_VINFO_RELEVANT_P (stmt_info
)
1113 && (!STMT_VINFO_LIVE_P (stmt_info
)
1114 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1115 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
1118 vect_cost_for_stmt kind
;
1119 if (STMT_VINFO_DATA_REF (stmt_info
))
1121 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
1124 kind
= scalar_store
;
1129 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
1130 factor
, kind
, stmt_info
, 0, vect_prologue
);
1134 /* Now accumulate cost. */
1135 void *target_cost_data
= init_cost (loop
);
1136 stmt_info_for_cost
*si
;
1138 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
1141 struct _stmt_vec_info
*stmt_info
1142 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
1143 (void) add_stmt_cost (target_cost_data
, si
->count
,
1144 si
->kind
, stmt_info
, si
->misalign
,
1147 unsigned dummy
, body_cost
= 0;
1148 finish_cost (target_cost_data
, &dummy
, &body_cost
, &dummy
);
1149 destroy_cost_data (target_cost_data
);
1150 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo
) = body_cost
;
1154 /* Function vect_analyze_loop_form_1.
1156 Verify that certain CFG restrictions hold, including:
1157 - the loop has a pre-header
1158 - the loop has a single entry and exit
1159 - the loop exit condition is simple enough
1160 - the number of iterations can be analyzed, i.e, a countable loop. The
1161 niter could be analyzed under some assumptions. */
1164 vect_analyze_loop_form_1 (struct loop
*loop
, gcond
**loop_cond
,
1165 tree
*assumptions
, tree
*number_of_iterationsm1
,
1166 tree
*number_of_iterations
, gcond
**inner_loop_cond
)
1168 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1170 /* Different restrictions apply when we are considering an inner-most loop,
1171 vs. an outer (nested) loop.
1172 (FORNOW. May want to relax some of these restrictions in the future). */
1176 /* Inner-most loop. We currently require that the number of BBs is
1177 exactly 2 (the header and latch). Vectorizable inner-most loops
1188 if (loop
->num_nodes
!= 2)
1190 if (dump_enabled_p ())
1191 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1192 "not vectorized: control flow in loop.\n");
1196 if (empty_block_p (loop
->header
))
1198 if (dump_enabled_p ())
1199 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1200 "not vectorized: empty loop.\n");
1206 struct loop
*innerloop
= loop
->inner
;
1209 /* Nested loop. We currently require that the loop is doubly-nested,
1210 contains a single inner loop, and the number of BBs is exactly 5.
1211 Vectorizable outer-loops look like this:
1223 The inner-loop has the properties expected of inner-most loops
1224 as described above. */
1226 if ((loop
->inner
)->inner
|| (loop
->inner
)->next
)
1228 if (dump_enabled_p ())
1229 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1230 "not vectorized: multiple nested loops.\n");
1234 if (loop
->num_nodes
!= 5)
1236 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1238 "not vectorized: control flow in loop.\n");
1242 entryedge
= loop_preheader_edge (innerloop
);
1243 if (entryedge
->src
!= loop
->header
1244 || !single_exit (innerloop
)
1245 || single_exit (innerloop
)->dest
!= EDGE_PRED (loop
->latch
, 0)->src
)
1247 if (dump_enabled_p ())
1248 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1249 "not vectorized: unsupported outerloop form.\n");
1253 /* Analyze the inner-loop. */
1254 tree inner_niterm1
, inner_niter
, inner_assumptions
;
1255 if (! vect_analyze_loop_form_1 (loop
->inner
, inner_loop_cond
,
1256 &inner_assumptions
, &inner_niterm1
,
1258 /* Don't support analyzing niter under assumptions for inner
1260 || !integer_onep (inner_assumptions
))
1262 if (dump_enabled_p ())
1263 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1264 "not vectorized: Bad inner loop.\n");
1268 if (!expr_invariant_in_loop_p (loop
, inner_niter
))
1270 if (dump_enabled_p ())
1271 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1272 "not vectorized: inner-loop count not"
1277 if (dump_enabled_p ())
1278 dump_printf_loc (MSG_NOTE
, vect_location
,
1279 "Considering outer-loop vectorization.\n");
1282 if (!single_exit (loop
)
1283 || EDGE_COUNT (loop
->header
->preds
) != 2)
1285 if (dump_enabled_p ())
1287 if (!single_exit (loop
))
1288 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1289 "not vectorized: multiple exits.\n");
1290 else if (EDGE_COUNT (loop
->header
->preds
) != 2)
1291 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1292 "not vectorized: too many incoming edges.\n");
1297 /* We assume that the loop exit condition is at the end of the loop. i.e,
1298 that the loop is represented as a do-while (with a proper if-guard
1299 before the loop if needed), where the loop header contains all the
1300 executable statements, and the latch is empty. */
1301 if (!empty_block_p (loop
->latch
)
1302 || !gimple_seq_empty_p (phi_nodes (loop
->latch
)))
1304 if (dump_enabled_p ())
1305 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1306 "not vectorized: latch block not empty.\n");
1310 /* Make sure the exit is not abnormal. */
1311 edge e
= single_exit (loop
);
1312 if (e
->flags
& EDGE_ABNORMAL
)
1314 if (dump_enabled_p ())
1315 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1316 "not vectorized: abnormal loop exit edge.\n");
1320 *loop_cond
= vect_get_loop_niters (loop
, assumptions
, number_of_iterations
,
1321 number_of_iterationsm1
);
1324 if (dump_enabled_p ())
1325 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1326 "not vectorized: complicated exit condition.\n");
1330 if (integer_zerop (*assumptions
)
1331 || !*number_of_iterations
1332 || chrec_contains_undetermined (*number_of_iterations
))
1334 if (dump_enabled_p ())
1335 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1336 "not vectorized: number of iterations cannot be "
1341 if (integer_zerop (*number_of_iterations
))
1343 if (dump_enabled_p ())
1344 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1345 "not vectorized: number of iterations = 0.\n");
1352 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1355 vect_analyze_loop_form (struct loop
*loop
, vec_info_shared
*shared
)
1357 tree assumptions
, number_of_iterations
, number_of_iterationsm1
;
1358 gcond
*loop_cond
, *inner_loop_cond
= NULL
;
1360 if (! vect_analyze_loop_form_1 (loop
, &loop_cond
,
1361 &assumptions
, &number_of_iterationsm1
,
1362 &number_of_iterations
, &inner_loop_cond
))
1365 loop_vec_info loop_vinfo
= new _loop_vec_info (loop
, shared
);
1366 LOOP_VINFO_NITERSM1 (loop_vinfo
) = number_of_iterationsm1
;
1367 LOOP_VINFO_NITERS (loop_vinfo
) = number_of_iterations
;
1368 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = number_of_iterations
;
1369 if (!integer_onep (assumptions
))
1371 /* We consider to vectorize this loop by versioning it under
1372 some assumptions. In order to do this, we need to clear
1373 existing information computed by scev and niter analyzer. */
1375 free_numbers_of_iterations_estimates (loop
);
1376 /* Also set flag for this loop so that following scev and niter
1377 analysis are done under the assumptions. */
1378 loop_constraint_set (loop
, LOOP_C_FINITE
);
1379 /* Also record the assumptions for versioning. */
1380 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo
) = assumptions
;
1383 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1385 if (dump_enabled_p ())
1387 dump_printf_loc (MSG_NOTE
, vect_location
,
1388 "Symbolic number of iterations is ");
1389 dump_generic_expr (MSG_NOTE
, TDF_DETAILS
, number_of_iterations
);
1390 dump_printf (MSG_NOTE
, "\n");
1394 stmt_vec_info loop_cond_info
= loop_vinfo
->lookup_stmt (loop_cond
);
1395 STMT_VINFO_TYPE (loop_cond_info
) = loop_exit_ctrl_vec_info_type
;
1396 if (inner_loop_cond
)
1398 stmt_vec_info inner_loop_cond_info
1399 = loop_vinfo
->lookup_stmt (inner_loop_cond
);
1400 STMT_VINFO_TYPE (inner_loop_cond_info
) = loop_exit_ctrl_vec_info_type
;
1403 gcc_assert (!loop
->aux
);
1404 loop
->aux
= loop_vinfo
;
1410 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1411 statements update the vectorization factor. */
1414 vect_update_vf_for_slp (loop_vec_info loop_vinfo
)
1416 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1417 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1418 int nbbs
= loop
->num_nodes
;
1419 poly_uint64 vectorization_factor
;
1422 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1424 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1425 gcc_assert (known_ne (vectorization_factor
, 0U));
1427 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1428 vectorization factor of the loop is the unrolling factor required by
1429 the SLP instances. If that unrolling factor is 1, we say, that we
1430 perform pure SLP on loop - cross iteration parallelism is not
1432 bool only_slp_in_loop
= true;
1433 for (i
= 0; i
< nbbs
; i
++)
1435 basic_block bb
= bbs
[i
];
1436 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
1439 gimple
*stmt
= gsi_stmt (si
);
1440 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
1441 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
1442 && STMT_VINFO_RELATED_STMT (stmt_info
))
1444 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1445 stmt_info
= vinfo_for_stmt (stmt
);
1447 if ((STMT_VINFO_RELEVANT_P (stmt_info
)
1448 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1449 && !PURE_SLP_STMT (stmt_info
))
1450 /* STMT needs both SLP and loop-based vectorization. */
1451 only_slp_in_loop
= false;
1455 if (only_slp_in_loop
)
1457 dump_printf_loc (MSG_NOTE
, vect_location
,
1458 "Loop contains only SLP stmts\n");
1459 vectorization_factor
= LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
);
1463 dump_printf_loc (MSG_NOTE
, vect_location
,
1464 "Loop contains SLP and non-SLP stmts\n");
1465 /* Both the vectorization factor and unroll factor have the form
1466 current_vector_size * X for some rational X, so they must have
1467 a common multiple. */
1468 vectorization_factor
1469 = force_common_multiple (vectorization_factor
,
1470 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
));
1473 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
1474 if (dump_enabled_p ())
1476 dump_printf_loc (MSG_NOTE
, vect_location
,
1477 "Updating vectorization factor to ");
1478 dump_dec (MSG_NOTE
, vectorization_factor
);
1479 dump_printf (MSG_NOTE
, ".\n");
1483 /* Return true if STMT_INFO describes a double reduction phi and if
1484 the other phi in the reduction is also relevant for vectorization.
1485 This rejects cases such as:
1488 x_1 = PHI <x_3(outer2), ...>;
1496 x_3 = PHI <x_2(inner)>;
1498 if nothing in x_2 or elsewhere makes x_1 relevant. */
1501 vect_active_double_reduction_p (stmt_vec_info stmt_info
)
1503 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_double_reduction_def
)
1506 gimple
*other_phi
= STMT_VINFO_REDUC_DEF (stmt_info
);
1507 return STMT_VINFO_RELEVANT_P (vinfo_for_stmt (other_phi
));
1510 /* Function vect_analyze_loop_operations.
1512 Scan the loop stmts and make sure they are all vectorizable. */
1515 vect_analyze_loop_operations (loop_vec_info loop_vinfo
)
1517 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1518 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1519 int nbbs
= loop
->num_nodes
;
1521 stmt_vec_info stmt_info
;
1522 bool need_to_vectorize
= false;
1525 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1527 stmt_vector_for_cost cost_vec
;
1528 cost_vec
.create (2);
1530 for (i
= 0; i
< nbbs
; i
++)
1532 basic_block bb
= bbs
[i
];
1534 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
1537 gphi
*phi
= si
.phi ();
1540 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
1541 if (dump_enabled_p ())
1543 dump_printf_loc (MSG_NOTE
, vect_location
, "examining phi: ");
1544 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
1546 if (virtual_operand_p (gimple_phi_result (phi
)))
1549 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1550 (i.e., a phi in the tail of the outer-loop). */
1551 if (! is_loop_header_bb_p (bb
))
1553 /* FORNOW: we currently don't support the case that these phis
1554 are not used in the outerloop (unless it is double reduction,
1555 i.e., this phi is vect_reduction_def), cause this case
1556 requires to actually do something here. */
1557 if (STMT_VINFO_LIVE_P (stmt_info
)
1558 && !vect_active_double_reduction_p (stmt_info
))
1560 if (dump_enabled_p ())
1561 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1562 "Unsupported loop-closed phi in "
1567 /* If PHI is used in the outer loop, we check that its operand
1568 is defined in the inner loop. */
1569 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1573 if (gimple_phi_num_args (phi
) != 1)
1576 phi_op
= PHI_ARG_DEF (phi
, 0);
1577 stmt_vec_info op_def_info
= loop_vinfo
->lookup_def (phi_op
);
1581 if (STMT_VINFO_RELEVANT (op_def_info
) != vect_used_in_outer
1582 && (STMT_VINFO_RELEVANT (op_def_info
)
1583 != vect_used_in_outer_by_reduction
))
1590 gcc_assert (stmt_info
);
1592 if ((STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
1593 || STMT_VINFO_LIVE_P (stmt_info
))
1594 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
1596 /* A scalar-dependence cycle that we don't support. */
1597 if (dump_enabled_p ())
1598 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1599 "not vectorized: scalar dependence cycle.\n");
1603 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1605 need_to_vectorize
= true;
1606 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
1607 && ! PURE_SLP_STMT (stmt_info
))
1608 ok
= vectorizable_induction (phi
, NULL
, NULL
, NULL
, &cost_vec
);
1609 else if ((STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
1610 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
)
1611 && ! PURE_SLP_STMT (stmt_info
))
1612 ok
= vectorizable_reduction (phi
, NULL
, NULL
, NULL
, NULL
,
1616 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1618 && STMT_VINFO_LIVE_P (stmt_info
)
1619 && !PURE_SLP_STMT (stmt_info
))
1620 ok
= vectorizable_live_operation (phi
, NULL
, NULL
, -1, NULL
,
1625 if (dump_enabled_p ())
1627 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1628 "not vectorized: relevant phi not "
1630 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, phi
, 0);
1636 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
1639 gimple
*stmt
= gsi_stmt (si
);
1640 if (!gimple_clobber_p (stmt
)
1641 && !vect_analyze_stmt (stmt
, &need_to_vectorize
, NULL
, NULL
,
1647 add_stmt_costs (loop_vinfo
->target_cost_data
, &cost_vec
);
1648 cost_vec
.release ();
1650 /* All operations in the loop are either irrelevant (deal with loop
1651 control, or dead), or only used outside the loop and can be moved
1652 out of the loop (e.g. invariants, inductions). The loop can be
1653 optimized away by scalar optimizations. We're better off not
1654 touching this loop. */
1655 if (!need_to_vectorize
)
1657 if (dump_enabled_p ())
1658 dump_printf_loc (MSG_NOTE
, vect_location
,
1659 "All the computation can be taken out of the loop.\n");
1660 if (dump_enabled_p ())
1661 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1662 "not vectorized: redundant loop. no profit to "
1670 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1671 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1672 definitely no, or -1 if it's worth retrying. */
1675 vect_analyze_loop_costing (loop_vec_info loop_vinfo
)
1677 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1678 unsigned int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
1680 /* Only fully-masked loops can have iteration counts less than the
1681 vectorization factor. */
1682 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
1684 HOST_WIDE_INT max_niter
;
1686 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1687 max_niter
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
1689 max_niter
= max_stmt_executions_int (loop
);
1692 && (unsigned HOST_WIDE_INT
) max_niter
< assumed_vf
)
1694 if (dump_enabled_p ())
1695 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1696 "not vectorized: iteration count smaller than "
1697 "vectorization factor.\n");
1702 int min_profitable_iters
, min_profitable_estimate
;
1703 vect_estimate_min_profitable_iters (loop_vinfo
, &min_profitable_iters
,
1704 &min_profitable_estimate
);
1706 if (min_profitable_iters
< 0)
1708 if (dump_enabled_p ())
1709 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1710 "not vectorized: vectorization not profitable.\n");
1711 if (dump_enabled_p ())
1712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1713 "not vectorized: vector version will never be "
1718 int min_scalar_loop_bound
= (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
1721 /* Use the cost model only if it is more conservative than user specified
1723 unsigned int th
= (unsigned) MAX (min_scalar_loop_bound
,
1724 min_profitable_iters
);
1726 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = th
;
1728 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1729 && LOOP_VINFO_INT_NITERS (loop_vinfo
) < th
)
1731 if (dump_enabled_p ())
1732 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1733 "not vectorized: vectorization not profitable.\n");
1734 if (dump_enabled_p ())
1735 dump_printf_loc (MSG_NOTE
, vect_location
,
1736 "not vectorized: iteration count smaller than user "
1737 "specified loop bound parameter or minimum profitable "
1738 "iterations (whichever is more conservative).\n");
1742 HOST_WIDE_INT estimated_niter
= estimated_stmt_executions_int (loop
);
1743 if (estimated_niter
== -1)
1744 estimated_niter
= likely_max_stmt_executions_int (loop
);
1745 if (estimated_niter
!= -1
1746 && ((unsigned HOST_WIDE_INT
) estimated_niter
1747 < MAX (th
, (unsigned) min_profitable_estimate
)))
1749 if (dump_enabled_p ())
1750 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1751 "not vectorized: estimated iteration count too "
1753 if (dump_enabled_p ())
1754 dump_printf_loc (MSG_NOTE
, vect_location
,
1755 "not vectorized: estimated iteration count smaller "
1756 "than specified loop bound parameter or minimum "
1757 "profitable iterations (whichever is more "
1758 "conservative).\n");
1766 vect_get_datarefs_in_loop (loop_p loop
, basic_block
*bbs
,
1767 vec
<data_reference_p
> *datarefs
,
1768 unsigned int *n_stmts
)
1771 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
1772 for (gimple_stmt_iterator gsi
= gsi_start_bb (bbs
[i
]);
1773 !gsi_end_p (gsi
); gsi_next (&gsi
))
1775 gimple
*stmt
= gsi_stmt (gsi
);
1776 if (is_gimple_debug (stmt
))
1779 if (!vect_find_stmt_data_reference (loop
, stmt
, datarefs
))
1781 if (is_gimple_call (stmt
) && loop
->safelen
)
1783 tree fndecl
= gimple_call_fndecl (stmt
), op
;
1784 if (fndecl
!= NULL_TREE
)
1786 cgraph_node
*node
= cgraph_node::get (fndecl
);
1787 if (node
!= NULL
&& node
->simd_clones
!= NULL
)
1789 unsigned int j
, n
= gimple_call_num_args (stmt
);
1790 for (j
= 0; j
< n
; j
++)
1792 op
= gimple_call_arg (stmt
, j
);
1794 || (REFERENCE_CLASS_P (op
)
1795 && get_base_address (op
)))
1798 op
= gimple_call_lhs (stmt
);
1799 /* Ignore #pragma omp declare simd functions
1800 if they don't have data references in the
1801 call stmt itself. */
1805 || (REFERENCE_CLASS_P (op
)
1806 && get_base_address (op
)))))
1813 /* If dependence analysis will give up due to the limit on the
1814 number of datarefs stop here and fail fatally. */
1815 if (datarefs
->length ()
1816 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS
))
1822 /* Function vect_analyze_loop_2.
1824 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1825 for it. The different analyses will record information in the
1826 loop_vec_info struct. */
1828 vect_analyze_loop_2 (loop_vec_info loop_vinfo
, bool &fatal
, unsigned *n_stmts
)
1832 unsigned int max_vf
= MAX_VECTORIZATION_FACTOR
;
1833 poly_uint64 min_vf
= 2;
1835 /* The first group of checks is independent of the vector size. */
1838 /* Find all data references in the loop (which correspond to vdefs/vuses)
1839 and analyze their evolution in the loop. */
1841 loop_p loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1843 /* Gather the data references and count stmts in the loop. */
1844 if (!LOOP_VINFO_DATAREFS (loop_vinfo
).exists ())
1846 if (!vect_get_datarefs_in_loop (loop
, LOOP_VINFO_BBS (loop_vinfo
),
1847 &LOOP_VINFO_DATAREFS (loop_vinfo
),
1850 if (dump_enabled_p ())
1851 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1852 "not vectorized: loop contains function "
1853 "calls or data references that cannot "
1857 loop_vinfo
->shared
->save_datarefs ();
1860 loop_vinfo
->shared
->check_datarefs ();
1862 /* Analyze the data references and also adjust the minimal
1863 vectorization factor according to the loads and stores. */
1865 ok
= vect_analyze_data_refs (loop_vinfo
, &min_vf
);
1868 if (dump_enabled_p ())
1869 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1870 "bad data references.\n");
1874 /* Classify all cross-iteration scalar data-flow cycles.
1875 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1876 vect_analyze_scalar_cycles (loop_vinfo
);
1878 vect_pattern_recog (loop_vinfo
);
1880 vect_fixup_scalar_cycles_with_patterns (loop_vinfo
);
1882 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1883 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1885 ok
= vect_analyze_data_ref_accesses (loop_vinfo
);
1888 if (dump_enabled_p ())
1889 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1890 "bad data access.\n");
1894 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1896 ok
= vect_mark_stmts_to_be_vectorized (loop_vinfo
);
1899 if (dump_enabled_p ())
1900 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1901 "unexpected pattern.\n");
1905 /* While the rest of the analysis below depends on it in some way. */
1908 /* Analyze data dependences between the data-refs in the loop
1909 and adjust the maximum vectorization factor according to
1911 FORNOW: fail at the first data dependence that we encounter. */
1913 ok
= vect_analyze_data_ref_dependences (loop_vinfo
, &max_vf
);
1915 || (max_vf
!= MAX_VECTORIZATION_FACTOR
1916 && maybe_lt (max_vf
, min_vf
)))
1918 if (dump_enabled_p ())
1919 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1920 "bad data dependence.\n");
1923 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo
) = max_vf
;
1925 ok
= vect_determine_vectorization_factor (loop_vinfo
);
1928 if (dump_enabled_p ())
1929 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1930 "can't determine vectorization factor.\n");
1933 if (max_vf
!= MAX_VECTORIZATION_FACTOR
1934 && maybe_lt (max_vf
, LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
1936 if (dump_enabled_p ())
1937 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1938 "bad data dependence.\n");
1942 /* Compute the scalar iteration cost. */
1943 vect_compute_single_scalar_iteration_cost (loop_vinfo
);
1945 poly_uint64 saved_vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1948 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1949 ok
= vect_analyze_slp (loop_vinfo
, *n_stmts
);
1953 /* If there are any SLP instances mark them as pure_slp. */
1954 bool slp
= vect_make_slp_decision (loop_vinfo
);
1957 /* Find stmts that need to be both vectorized and SLPed. */
1958 vect_detect_hybrid_slp (loop_vinfo
);
1960 /* Update the vectorization factor based on the SLP decision. */
1961 vect_update_vf_for_slp (loop_vinfo
);
1964 bool saved_can_fully_mask_p
= LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
);
1966 /* We don't expect to have to roll back to anything other than an empty
1968 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo
).is_empty ());
1970 /* This is the point where we can re-start analysis with SLP forced off. */
1973 /* Now the vectorization factor is final. */
1974 poly_uint64 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1975 gcc_assert (known_ne (vectorization_factor
, 0U));
1977 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
) && dump_enabled_p ())
1979 dump_printf_loc (MSG_NOTE
, vect_location
,
1980 "vectorization_factor = ");
1981 dump_dec (MSG_NOTE
, vectorization_factor
);
1982 dump_printf (MSG_NOTE
, ", niters = " HOST_WIDE_INT_PRINT_DEC
"\n",
1983 LOOP_VINFO_INT_NITERS (loop_vinfo
));
1986 HOST_WIDE_INT max_niter
1987 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo
));
1989 /* Analyze the alignment of the data-refs in the loop.
1990 Fail if a data reference is found that cannot be vectorized. */
1992 ok
= vect_analyze_data_refs_alignment (loop_vinfo
);
1995 if (dump_enabled_p ())
1996 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1997 "bad data alignment.\n");
2001 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
2002 It is important to call pruning after vect_analyze_data_ref_accesses,
2003 since we use grouping information gathered by interleaving analysis. */
2004 ok
= vect_prune_runtime_alias_test_list (loop_vinfo
);
2008 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2010 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
2012 /* This pass will decide on using loop versioning and/or loop peeling in
2013 order to enhance the alignment of data references in the loop. */
2014 ok
= vect_enhance_data_refs_alignment (loop_vinfo
);
2017 if (dump_enabled_p ())
2018 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2019 "bad data alignment.\n");
2026 /* Analyze operations in the SLP instances. Note this may
2027 remove unsupported SLP instances which makes the above
2028 SLP kind detection invalid. */
2029 unsigned old_size
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).length ();
2030 vect_slp_analyze_operations (loop_vinfo
);
2031 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).length () != old_size
)
2035 /* Scan all the remaining operations in the loop that are not subject
2036 to SLP and make sure they are vectorizable. */
2037 ok
= vect_analyze_loop_operations (loop_vinfo
);
2040 if (dump_enabled_p ())
2041 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2042 "bad operation or unsupported loop bound.\n");
2046 /* Decide whether to use a fully-masked loop for this vectorization
2048 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
2049 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
)
2050 && vect_verify_full_masking (loop_vinfo
));
2051 if (dump_enabled_p ())
2053 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2054 dump_printf_loc (MSG_NOTE
, vect_location
,
2055 "using a fully-masked loop.\n");
2057 dump_printf_loc (MSG_NOTE
, vect_location
,
2058 "not using a fully-masked loop.\n");
2061 /* If epilog loop is required because of data accesses with gaps,
2062 one additional iteration needs to be peeled. Check if there is
2063 enough iterations for vectorization. */
2064 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2065 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2066 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2068 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2069 tree scalar_niters
= LOOP_VINFO_NITERSM1 (loop_vinfo
);
2071 if (known_lt (wi::to_widest (scalar_niters
), vf
))
2073 if (dump_enabled_p ())
2074 dump_printf_loc (MSG_NOTE
, vect_location
,
2075 "loop has no enough iterations to support"
2076 " peeling for gaps.\n");
2081 /* Check the costings of the loop make vectorizing worthwhile. */
2082 res
= vect_analyze_loop_costing (loop_vinfo
);
2087 if (dump_enabled_p ())
2088 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2089 "Loop costings not worthwhile.\n");
2093 /* Decide whether we need to create an epilogue loop to handle
2094 remaining scalar iterations. */
2095 th
= LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
);
2097 unsigned HOST_WIDE_INT const_vf
;
2098 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2099 /* The main loop handles all iterations. */
2100 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = false;
2101 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2102 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) > 0)
2104 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo
)
2105 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
),
2106 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
2107 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
2109 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
)
2110 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&const_vf
)
2111 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo
))
2112 < (unsigned) exact_log2 (const_vf
))
2113 /* In case of versioning, check if the maximum number of
2114 iterations is greater than th. If they are identical,
2115 the epilogue is unnecessary. */
2116 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo
)
2117 || ((unsigned HOST_WIDE_INT
) max_niter
2118 > (th
/ const_vf
) * const_vf
))))
2119 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
2121 /* If an epilogue loop is required make sure we can create one. */
2122 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2123 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
))
2125 if (dump_enabled_p ())
2126 dump_printf_loc (MSG_NOTE
, vect_location
, "epilog loop required\n");
2127 if (!vect_can_advance_ivs_p (loop_vinfo
)
2128 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo
),
2129 single_exit (LOOP_VINFO_LOOP
2132 if (dump_enabled_p ())
2133 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2134 "not vectorized: can't create required "
2140 /* During peeling, we need to check if number of loop iterations is
2141 enough for both peeled prolog loop and vector loop. This check
2142 can be merged along with threshold check of loop versioning, so
2143 increase threshold for this case if necessary. */
2144 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
2146 poly_uint64 niters_th
= 0;
2148 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo
))
2150 /* Niters for peeled prolog loop. */
2151 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
2153 struct data_reference
*dr
= LOOP_VINFO_UNALIGNED_DR (loop_vinfo
);
2155 = STMT_VINFO_VECTYPE (vinfo_for_stmt (vect_dr_stmt (dr
)));
2156 niters_th
+= TYPE_VECTOR_SUBPARTS (vectype
) - 1;
2159 niters_th
+= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
2162 /* Niters for at least one iteration of vectorized loop. */
2163 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2164 niters_th
+= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2165 /* One additional iteration because of peeling for gap. */
2166 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
2168 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
) = niters_th
;
2171 gcc_assert (known_eq (vectorization_factor
,
2172 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)));
2174 /* Ok to vectorize! */
2178 /* Try again with SLP forced off but if we didn't do any SLP there is
2179 no point in re-trying. */
2183 /* If there are reduction chains re-trying will fail anyway. */
2184 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).is_empty ())
2187 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2188 via interleaving or lane instructions. */
2189 slp_instance instance
;
2192 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), i
, instance
)
2194 stmt_vec_info vinfo
;
2195 vinfo
= vinfo_for_stmt
2196 (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance
))[0]);
2197 if (! STMT_VINFO_GROUPED_ACCESS (vinfo
))
2199 vinfo
= vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo
));
2200 unsigned int size
= DR_GROUP_SIZE (vinfo
);
2201 tree vectype
= STMT_VINFO_VECTYPE (vinfo
);
2202 if (! vect_store_lanes_supported (vectype
, size
, false)
2203 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype
), 1U)
2204 && ! vect_grouped_store_supported (vectype
, size
))
2206 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), j
, node
)
2208 vinfo
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node
)[0]);
2209 vinfo
= vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo
));
2210 bool single_element_p
= !DR_GROUP_NEXT_ELEMENT (vinfo
);
2211 size
= DR_GROUP_SIZE (vinfo
);
2212 vectype
= STMT_VINFO_VECTYPE (vinfo
);
2213 if (! vect_load_lanes_supported (vectype
, size
, false)
2214 && ! vect_grouped_load_supported (vectype
, single_element_p
,
2220 if (dump_enabled_p ())
2221 dump_printf_loc (MSG_NOTE
, vect_location
,
2222 "re-trying with SLP disabled\n");
2224 /* Roll back state appropriately. No SLP this time. */
2226 /* Restore vectorization factor as it were without SLP. */
2227 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = saved_vectorization_factor
;
2228 /* Free the SLP instances. */
2229 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), j
, instance
)
2230 vect_free_slp_instance (instance
, false);
2231 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
2232 /* Reset SLP type to loop_vect on all stmts. */
2233 for (i
= 0; i
< LOOP_VINFO_LOOP (loop_vinfo
)->num_nodes
; ++i
)
2235 basic_block bb
= LOOP_VINFO_BBS (loop_vinfo
)[i
];
2236 for (gimple_stmt_iterator si
= gsi_start_phis (bb
);
2237 !gsi_end_p (si
); gsi_next (&si
))
2239 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
2240 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2242 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
2243 !gsi_end_p (si
); gsi_next (&si
))
2245 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
2246 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2247 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
2249 gimple
*pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
2250 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
2251 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2252 for (gimple_stmt_iterator pi
= gsi_start (pattern_def_seq
);
2253 !gsi_end_p (pi
); gsi_next (&pi
))
2254 STMT_SLP_TYPE (loop_vinfo
->lookup_stmt (gsi_stmt (pi
)))
2259 /* Free optimized alias test DDRS. */
2260 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
).truncate (0);
2261 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).release ();
2262 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo
).release ();
2263 /* Reset target cost data. */
2264 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
));
2265 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
)
2266 = init_cost (LOOP_VINFO_LOOP (loop_vinfo
));
2267 /* Reset accumulated rgroup information. */
2268 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo
));
2269 /* Reset assorted flags. */
2270 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = false;
2271 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = false;
2272 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = 0;
2273 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
) = 0;
2274 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = saved_can_fully_mask_p
;
2279 /* Function vect_analyze_loop.
2281 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2282 for it. The different analyses will record information in the
2283 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2286 vect_analyze_loop (struct loop
*loop
, loop_vec_info orig_loop_vinfo
,
2287 vec_info_shared
*shared
)
2289 loop_vec_info loop_vinfo
;
2290 auto_vector_sizes vector_sizes
;
2292 /* Autodetect first vector size we try. */
2293 current_vector_size
= 0;
2294 targetm
.vectorize
.autovectorize_vector_sizes (&vector_sizes
);
2295 unsigned int next_size
= 0;
2297 DUMP_VECT_SCOPE ("analyze_loop_nest");
2299 if (loop_outer (loop
)
2300 && loop_vec_info_for_loop (loop_outer (loop
))
2301 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop
))))
2303 if (dump_enabled_p ())
2304 dump_printf_loc (MSG_NOTE
, vect_location
,
2305 "outer-loop already vectorized.\n");
2309 if (!find_loop_nest (loop
, &shared
->loop_nest
))
2311 if (dump_enabled_p ())
2312 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2313 "not vectorized: loop nest containing two "
2314 "or more consecutive inner loops cannot be "
2319 unsigned n_stmts
= 0;
2320 poly_uint64 autodetected_vector_size
= 0;
2323 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2324 loop_vinfo
= vect_analyze_loop_form (loop
, shared
);
2327 if (dump_enabled_p ())
2328 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2329 "bad loop form.\n");
2335 if (orig_loop_vinfo
)
2336 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo
) = orig_loop_vinfo
;
2338 if (vect_analyze_loop_2 (loop_vinfo
, fatal
, &n_stmts
))
2340 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
) = 1;
2348 autodetected_vector_size
= current_vector_size
;
2350 if (next_size
< vector_sizes
.length ()
2351 && known_eq (vector_sizes
[next_size
], autodetected_vector_size
))
2355 || next_size
== vector_sizes
.length ()
2356 || known_eq (current_vector_size
, 0U))
2359 /* Try the next biggest vector size. */
2360 current_vector_size
= vector_sizes
[next_size
++];
2361 if (dump_enabled_p ())
2363 dump_printf_loc (MSG_NOTE
, vect_location
,
2364 "***** Re-trying analysis with "
2366 dump_dec (MSG_NOTE
, current_vector_size
);
2367 dump_printf (MSG_NOTE
, "\n");
2372 /* Return true if there is an in-order reduction function for CODE, storing
2373 it in *REDUC_FN if so. */
2376 fold_left_reduction_fn (tree_code code
, internal_fn
*reduc_fn
)
2381 *reduc_fn
= IFN_FOLD_LEFT_PLUS
;
2389 /* Function reduction_fn_for_scalar_code
2392 CODE - tree_code of a reduction operations.
2395 REDUC_FN - the corresponding internal function to be used to reduce the
2396 vector of partial results into a single scalar result, or IFN_LAST
2397 if the operation is a supported reduction operation, but does not have
2398 such an internal function.
2400 Return FALSE if CODE currently cannot be vectorized as reduction. */
2403 reduction_fn_for_scalar_code (enum tree_code code
, internal_fn
*reduc_fn
)
2408 *reduc_fn
= IFN_REDUC_MAX
;
2412 *reduc_fn
= IFN_REDUC_MIN
;
2416 *reduc_fn
= IFN_REDUC_PLUS
;
2420 *reduc_fn
= IFN_REDUC_AND
;
2424 *reduc_fn
= IFN_REDUC_IOR
;
2428 *reduc_fn
= IFN_REDUC_XOR
;
2433 *reduc_fn
= IFN_LAST
;
2441 /* If there is a neutral value X such that SLP reduction NODE would not
2442 be affected by the introduction of additional X elements, return that X,
2443 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2444 is true if the SLP statements perform a single reduction, false if each
2445 statement performs an independent reduction. */
2448 neutral_op_for_slp_reduction (slp_tree slp_node
, tree_code code
,
2451 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2452 gimple
*stmt
= stmts
[0];
2453 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2454 tree vector_type
= STMT_VINFO_VECTYPE (stmt_vinfo
);
2455 tree scalar_type
= TREE_TYPE (vector_type
);
2456 struct loop
*loop
= gimple_bb (stmt
)->loop_father
;
2461 case WIDEN_SUM_EXPR
:
2468 return build_zero_cst (scalar_type
);
2471 return build_one_cst (scalar_type
);
2474 return build_all_ones_cst (scalar_type
);
2478 /* For MIN/MAX the initial values are neutral. A reduction chain
2479 has only a single initial value, so that value is neutral for
2482 return PHI_ARG_DEF_FROM_EDGE (stmt
, loop_preheader_edge (loop
));
2490 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2491 STMT is printed with a message MSG. */
2494 report_vect_op (dump_flags_t msg_type
, gimple
*stmt
, const char *msg
)
2496 dump_printf_loc (msg_type
, vect_location
, "%s", msg
);
2497 dump_gimple_stmt (msg_type
, TDF_SLIM
, stmt
, 0);
2500 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2501 operation. Return true if the results of DEF_STMT_INFO are something
2502 that can be accumulated by such a reduction. */
2505 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info
)
2507 return (is_gimple_assign (def_stmt_info
->stmt
)
2508 || is_gimple_call (def_stmt_info
->stmt
)
2509 || STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_induction_def
2510 || (gimple_code (def_stmt_info
->stmt
) == GIMPLE_PHI
2511 && STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_internal_def
2512 && !is_loop_header_bb_p (gimple_bb (def_stmt_info
->stmt
))));
2515 /* Detect SLP reduction of the form:
2525 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2526 FIRST_STMT is the first reduction stmt in the chain
2527 (a2 = operation (a1)).
2529 Return TRUE if a reduction chain was detected. */
2532 vect_is_slp_reduction (loop_vec_info loop_info
, gimple
*phi
,
2535 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2536 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2537 enum tree_code code
;
2538 gimple
*current_stmt
= NULL
, *loop_use_stmt
= NULL
, *first
, *next_stmt
;
2539 stmt_vec_info use_stmt_info
, current_stmt_info
;
2541 imm_use_iterator imm_iter
;
2542 use_operand_p use_p
;
2543 int nloop_uses
, size
= 0, n_out_of_loop_uses
;
2546 if (loop
!= vect_loop
)
2549 lhs
= PHI_RESULT (phi
);
2550 code
= gimple_assign_rhs_code (first_stmt
);
2554 n_out_of_loop_uses
= 0;
2555 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2557 gimple
*use_stmt
= USE_STMT (use_p
);
2558 if (is_gimple_debug (use_stmt
))
2561 /* Check if we got back to the reduction phi. */
2562 if (use_stmt
== phi
)
2564 loop_use_stmt
= use_stmt
;
2569 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2571 loop_use_stmt
= use_stmt
;
2575 n_out_of_loop_uses
++;
2577 /* There are can be either a single use in the loop or two uses in
2579 if (nloop_uses
> 1 || (n_out_of_loop_uses
&& nloop_uses
))
2586 /* We reached a statement with no loop uses. */
2587 if (nloop_uses
== 0)
2590 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2591 if (gimple_code (loop_use_stmt
) == GIMPLE_PHI
)
2594 if (!is_gimple_assign (loop_use_stmt
)
2595 || code
!= gimple_assign_rhs_code (loop_use_stmt
)
2596 || !flow_bb_inside_loop_p (loop
, gimple_bb (loop_use_stmt
)))
2599 /* Insert USE_STMT into reduction chain. */
2600 use_stmt_info
= loop_info
->lookup_stmt (loop_use_stmt
);
2603 current_stmt_info
= vinfo_for_stmt (current_stmt
);
2604 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info
) = loop_use_stmt
;
2605 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info
)
2606 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info
);
2609 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info
) = loop_use_stmt
;
2611 lhs
= gimple_assign_lhs (loop_use_stmt
);
2612 current_stmt
= loop_use_stmt
;
2616 if (!found
|| loop_use_stmt
!= phi
|| size
< 2)
2619 /* Swap the operands, if needed, to make the reduction operand be the second
2621 lhs
= PHI_RESULT (phi
);
2622 next_stmt
= REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2625 if (gimple_assign_rhs2 (next_stmt
) == lhs
)
2627 tree op
= gimple_assign_rhs1 (next_stmt
);
2628 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (op
);
2630 /* Check that the other def is either defined in the loop
2631 ("vect_internal_def"), or it's an induction (defined by a
2632 loop-header phi-node). */
2634 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
))
2635 && vect_valid_reduction_input_p (def_stmt_info
))
2637 lhs
= gimple_assign_lhs (next_stmt
);
2638 next_stmt
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2646 tree op
= gimple_assign_rhs2 (next_stmt
);
2647 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (op
);
2649 /* Check that the other def is either defined in the loop
2650 ("vect_internal_def"), or it's an induction (defined by a
2651 loop-header phi-node). */
2653 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
))
2654 && vect_valid_reduction_input_p (def_stmt_info
))
2656 if (dump_enabled_p ())
2658 dump_printf_loc (MSG_NOTE
, vect_location
, "swapping oprnds: ");
2659 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, next_stmt
, 0);
2662 swap_ssa_operands (next_stmt
,
2663 gimple_assign_rhs1_ptr (next_stmt
),
2664 gimple_assign_rhs2_ptr (next_stmt
));
2665 update_stmt (next_stmt
);
2667 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt
)))
2668 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
2674 lhs
= gimple_assign_lhs (next_stmt
);
2675 next_stmt
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2678 /* Save the chain for further analysis in SLP detection. */
2679 first
= REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2680 LOOP_VINFO_REDUCTION_CHAINS (loop_info
).safe_push (first
);
2681 REDUC_GROUP_SIZE (vinfo_for_stmt (first
)) = size
;
2686 /* Return true if we need an in-order reduction for operation CODE
2687 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2688 overflow must wrap. */
2691 needs_fold_left_reduction_p (tree type
, tree_code code
,
2692 bool need_wrapping_integral_overflow
)
2694 /* CHECKME: check for !flag_finite_math_only too? */
2695 if (SCALAR_FLOAT_TYPE_P (type
))
2703 return !flag_associative_math
;
2706 if (INTEGRAL_TYPE_P (type
))
2708 if (!operation_no_trapping_overflow (type
, code
))
2710 if (need_wrapping_integral_overflow
2711 && !TYPE_OVERFLOW_WRAPS (type
)
2712 && operation_can_overflow (code
))
2717 if (SAT_FIXED_POINT_TYPE_P (type
))
2723 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2724 reduction operation CODE has a handled computation expression. */
2727 check_reduction_path (dump_user_location_t loc
, loop_p loop
, gphi
*phi
,
2728 tree loop_arg
, enum tree_code code
)
2730 auto_vec
<std::pair
<ssa_op_iter
, use_operand_p
> > path
;
2731 auto_bitmap visited
;
2732 tree lookfor
= PHI_RESULT (phi
);
2734 use_operand_p curr
= op_iter_init_phiuse (&curri
, phi
, SSA_OP_USE
);
2735 while (USE_FROM_PTR (curr
) != loop_arg
)
2736 curr
= op_iter_next_use (&curri
);
2737 curri
.i
= curri
.numops
;
2740 path
.safe_push (std::make_pair (curri
, curr
));
2741 tree use
= USE_FROM_PTR (curr
);
2744 gimple
*def
= SSA_NAME_DEF_STMT (use
);
2745 if (gimple_nop_p (def
)
2746 || ! flow_bb_inside_loop_p (loop
, gimple_bb (def
)))
2751 std::pair
<ssa_op_iter
, use_operand_p
> x
= path
.pop ();
2755 curr
= op_iter_next_use (&curri
);
2756 /* Skip already visited or non-SSA operands (from iterating
2758 while (curr
!= NULL_USE_OPERAND_P
2759 && (TREE_CODE (USE_FROM_PTR (curr
)) != SSA_NAME
2760 || ! bitmap_set_bit (visited
,
2762 (USE_FROM_PTR (curr
)))));
2764 while (curr
== NULL_USE_OPERAND_P
&& ! path
.is_empty ());
2765 if (curr
== NULL_USE_OPERAND_P
)
2770 if (gimple_code (def
) == GIMPLE_PHI
)
2771 curr
= op_iter_init_phiuse (&curri
, as_a
<gphi
*>(def
), SSA_OP_USE
);
2773 curr
= op_iter_init_use (&curri
, def
, SSA_OP_USE
);
2774 while (curr
!= NULL_USE_OPERAND_P
2775 && (TREE_CODE (USE_FROM_PTR (curr
)) != SSA_NAME
2776 || ! bitmap_set_bit (visited
,
2778 (USE_FROM_PTR (curr
)))))
2779 curr
= op_iter_next_use (&curri
);
2780 if (curr
== NULL_USE_OPERAND_P
)
2785 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2787 dump_printf_loc (MSG_NOTE
, loc
, "reduction path: ");
2789 std::pair
<ssa_op_iter
, use_operand_p
> *x
;
2790 FOR_EACH_VEC_ELT (path
, i
, x
)
2792 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, USE_FROM_PTR (x
->second
));
2793 dump_printf (MSG_NOTE
, " ");
2795 dump_printf (MSG_NOTE
, "\n");
2798 /* Check whether the reduction path detected is valid. */
2799 bool fail
= path
.length () == 0;
2801 for (unsigned i
= 1; i
< path
.length (); ++i
)
2803 gimple
*use_stmt
= USE_STMT (path
[i
].second
);
2804 tree op
= USE_FROM_PTR (path
[i
].second
);
2805 if (! has_single_use (op
)
2806 || ! is_gimple_assign (use_stmt
))
2811 if (gimple_assign_rhs_code (use_stmt
) != code
)
2813 if (code
== PLUS_EXPR
2814 && gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2816 /* Track whether we negate the reduction value each iteration. */
2817 if (gimple_assign_rhs2 (use_stmt
) == op
)
2827 return ! fail
&& ! neg
;
2831 /* Function vect_is_simple_reduction
2833 (1) Detect a cross-iteration def-use cycle that represents a simple
2834 reduction computation. We look for the following pattern:
2839 a2 = operation (a3, a1)
2846 a2 = operation (a3, a1)
2849 1. operation is commutative and associative and it is safe to
2850 change the order of the computation
2851 2. no uses for a2 in the loop (a2 is used out of the loop)
2852 3. no uses of a1 in the loop besides the reduction operation
2853 4. no uses of a1 outside the loop.
2855 Conditions 1,4 are tested here.
2856 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2858 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2861 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2865 inner loop (def of a3)
2868 (4) Detect condition expressions, ie:
2869 for (int i = 0; i < N; i++)
2876 vect_is_simple_reduction (loop_vec_info loop_info
, gimple
*phi
,
2878 bool need_wrapping_integral_overflow
,
2879 enum vect_reduction_type
*v_reduc_type
)
2881 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2882 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2883 gimple
*def_stmt
, *phi_use_stmt
= NULL
;
2884 enum tree_code orig_code
, code
;
2885 tree op1
, op2
, op3
= NULL_TREE
, op4
= NULL_TREE
;
2889 imm_use_iterator imm_iter
;
2890 use_operand_p use_p
;
2893 *double_reduc
= false;
2894 *v_reduc_type
= TREE_CODE_REDUCTION
;
2896 tree phi_name
= PHI_RESULT (phi
);
2897 /* ??? If there are no uses of the PHI result the inner loop reduction
2898 won't be detected as possibly double-reduction by vectorizable_reduction
2899 because that tries to walk the PHI arg from the preheader edge which
2900 can be constant. See PR60382. */
2901 if (has_zero_uses (phi_name
))
2904 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, phi_name
)
2906 gimple
*use_stmt
= USE_STMT (use_p
);
2907 if (is_gimple_debug (use_stmt
))
2910 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2912 if (dump_enabled_p ())
2913 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2914 "intermediate value used outside loop.\n");
2922 if (dump_enabled_p ())
2923 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2924 "reduction value used in loop.\n");
2928 phi_use_stmt
= use_stmt
;
2931 edge latch_e
= loop_latch_edge (loop
);
2932 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
2933 if (TREE_CODE (loop_arg
) != SSA_NAME
)
2935 if (dump_enabled_p ())
2937 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2938 "reduction: not ssa_name: ");
2939 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, loop_arg
);
2940 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2945 def_stmt
= SSA_NAME_DEF_STMT (loop_arg
);
2946 if (is_gimple_assign (def_stmt
))
2948 name
= gimple_assign_lhs (def_stmt
);
2951 else if (gimple_code (def_stmt
) == GIMPLE_PHI
)
2953 name
= PHI_RESULT (def_stmt
);
2958 if (dump_enabled_p ())
2960 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2961 "reduction: unhandled reduction operation: ");
2962 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, def_stmt
, 0);
2967 if (! flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
2971 auto_vec
<gphi
*, 3> lcphis
;
2972 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2974 gimple
*use_stmt
= USE_STMT (use_p
);
2975 if (is_gimple_debug (use_stmt
))
2977 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2980 /* We can have more than one loop-closed PHI. */
2981 lcphis
.safe_push (as_a
<gphi
*> (use_stmt
));
2984 if (dump_enabled_p ())
2985 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2986 "reduction used in loop.\n");
2991 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2992 defined in the inner loop. */
2995 op1
= PHI_ARG_DEF (def_stmt
, 0);
2997 if (gimple_phi_num_args (def_stmt
) != 1
2998 || TREE_CODE (op1
) != SSA_NAME
)
3000 if (dump_enabled_p ())
3001 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3002 "unsupported phi node definition.\n");
3007 gimple
*def1
= SSA_NAME_DEF_STMT (op1
);
3008 if (gimple_bb (def1
)
3009 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
3011 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (def1
))
3012 && is_gimple_assign (def1
)
3013 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (phi_use_stmt
)))
3015 if (dump_enabled_p ())
3016 report_vect_op (MSG_NOTE
, def_stmt
,
3017 "detected double reduction: ");
3019 *double_reduc
= true;
3026 /* If we are vectorizing an inner reduction we are executing that
3027 in the original order only in case we are not dealing with a
3028 double reduction. */
3029 bool check_reduction
= true;
3030 if (flow_loop_nested_p (vect_loop
, loop
))
3034 check_reduction
= false;
3035 FOR_EACH_VEC_ELT (lcphis
, i
, lcphi
)
3036 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_phi_result (lcphi
))
3038 gimple
*use_stmt
= USE_STMT (use_p
);
3039 if (is_gimple_debug (use_stmt
))
3041 if (! flow_bb_inside_loop_p (vect_loop
, gimple_bb (use_stmt
)))
3042 check_reduction
= true;
3046 bool nested_in_vect_loop
= flow_loop_nested_p (vect_loop
, loop
);
3047 code
= orig_code
= gimple_assign_rhs_code (def_stmt
);
3049 /* We can handle "res -= x[i]", which is non-associative by
3050 simply rewriting this into "res += -x[i]". Avoid changing
3051 gimple instruction for the first simple tests and only do this
3052 if we're allowed to change code at all. */
3053 if (code
== MINUS_EXPR
&& gimple_assign_rhs2 (def_stmt
) != phi_name
)
3056 if (code
== COND_EXPR
)
3058 if (! nested_in_vect_loop
)
3059 *v_reduc_type
= COND_REDUCTION
;
3061 op3
= gimple_assign_rhs1 (def_stmt
);
3062 if (COMPARISON_CLASS_P (op3
))
3064 op4
= TREE_OPERAND (op3
, 1);
3065 op3
= TREE_OPERAND (op3
, 0);
3067 if (op3
== phi_name
|| op4
== phi_name
)
3069 if (dump_enabled_p ())
3070 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3071 "reduction: condition depends on previous"
3076 op1
= gimple_assign_rhs2 (def_stmt
);
3077 op2
= gimple_assign_rhs3 (def_stmt
);
3079 else if (!commutative_tree_code (code
) || !associative_tree_code (code
))
3081 if (dump_enabled_p ())
3082 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3083 "reduction: not commutative/associative: ");
3086 else if (get_gimple_rhs_class (code
) == GIMPLE_BINARY_RHS
)
3088 op1
= gimple_assign_rhs1 (def_stmt
);
3089 op2
= gimple_assign_rhs2 (def_stmt
);
3093 if (dump_enabled_p ())
3094 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3095 "reduction: not handled operation: ");
3099 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
3101 if (dump_enabled_p ())
3102 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3103 "reduction: both uses not ssa_names: ");
3108 type
= TREE_TYPE (gimple_assign_lhs (def_stmt
));
3109 if ((TREE_CODE (op1
) == SSA_NAME
3110 && !types_compatible_p (type
,TREE_TYPE (op1
)))
3111 || (TREE_CODE (op2
) == SSA_NAME
3112 && !types_compatible_p (type
, TREE_TYPE (op2
)))
3113 || (op3
&& TREE_CODE (op3
) == SSA_NAME
3114 && !types_compatible_p (type
, TREE_TYPE (op3
)))
3115 || (op4
&& TREE_CODE (op4
) == SSA_NAME
3116 && !types_compatible_p (type
, TREE_TYPE (op4
))))
3118 if (dump_enabled_p ())
3120 dump_printf_loc (MSG_NOTE
, vect_location
,
3121 "reduction: multiple types: operation type: ");
3122 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, type
);
3123 dump_printf (MSG_NOTE
, ", operands types: ");
3124 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3126 dump_printf (MSG_NOTE
, ",");
3127 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3131 dump_printf (MSG_NOTE
, ",");
3132 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3138 dump_printf (MSG_NOTE
, ",");
3139 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3142 dump_printf (MSG_NOTE
, "\n");
3148 /* Check whether it's ok to change the order of the computation.
3149 Generally, when vectorizing a reduction we change the order of the
3150 computation. This may change the behavior of the program in some
3151 cases, so we need to check that this is ok. One exception is when
3152 vectorizing an outer-loop: the inner-loop is executed sequentially,
3153 and therefore vectorizing reductions in the inner-loop during
3154 outer-loop vectorization is safe. */
3156 && *v_reduc_type
== TREE_CODE_REDUCTION
3157 && needs_fold_left_reduction_p (type
, code
,
3158 need_wrapping_integral_overflow
))
3159 *v_reduc_type
= FOLD_LEFT_REDUCTION
;
3161 /* Reduction is safe. We're dealing with one of the following:
3162 1) integer arithmetic and no trapv
3163 2) floating point arithmetic, and special flags permit this optimization
3164 3) nested cycle (i.e., outer loop vectorization). */
3165 stmt_vec_info def1_info
= loop_info
->lookup_def (op1
);
3166 stmt_vec_info def2_info
= loop_info
->lookup_def (op2
);
3167 if (code
!= COND_EXPR
&& !def1_info
&& !def2_info
)
3169 if (dump_enabled_p ())
3170 report_vect_op (MSG_NOTE
, def_stmt
, "reduction: no defs for operands: ");
3174 /* Check that one def is the reduction def, defined by PHI,
3175 the other def is either defined in the loop ("vect_internal_def"),
3176 or it's an induction (defined by a loop-header phi-node). */
3179 && def2_info
->stmt
== phi
3180 && (code
== COND_EXPR
3182 || vect_valid_reduction_input_p (def1_info
)))
3184 if (dump_enabled_p ())
3185 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
3190 && def1_info
->stmt
== phi
3191 && (code
== COND_EXPR
3193 || vect_valid_reduction_input_p (def2_info
)))
3195 if (! nested_in_vect_loop
&& orig_code
!= MINUS_EXPR
)
3197 /* Check if we can swap operands (just for simplicity - so that
3198 the rest of the code can assume that the reduction variable
3199 is always the last (second) argument). */
3200 if (code
== COND_EXPR
)
3202 /* Swap cond_expr by inverting the condition. */
3203 tree cond_expr
= gimple_assign_rhs1 (def_stmt
);
3204 enum tree_code invert_code
= ERROR_MARK
;
3205 enum tree_code cond_code
= TREE_CODE (cond_expr
);
3207 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
3209 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
, 0));
3210 invert_code
= invert_tree_comparison (cond_code
, honor_nans
);
3212 if (invert_code
!= ERROR_MARK
)
3214 TREE_SET_CODE (cond_expr
, invert_code
);
3215 swap_ssa_operands (def_stmt
,
3216 gimple_assign_rhs2_ptr (def_stmt
),
3217 gimple_assign_rhs3_ptr (def_stmt
));
3221 if (dump_enabled_p ())
3222 report_vect_op (MSG_NOTE
, def_stmt
,
3223 "detected reduction: cannot swap operands "
3229 swap_ssa_operands (def_stmt
, gimple_assign_rhs1_ptr (def_stmt
),
3230 gimple_assign_rhs2_ptr (def_stmt
));
3232 if (dump_enabled_p ())
3233 report_vect_op (MSG_NOTE
, def_stmt
,
3234 "detected reduction: need to swap operands: ");
3236 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt
)))
3237 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
3241 if (dump_enabled_p ())
3242 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
3248 /* Try to find SLP reduction chain. */
3249 if (! nested_in_vect_loop
3250 && code
!= COND_EXPR
3251 && orig_code
!= MINUS_EXPR
3252 && vect_is_slp_reduction (loop_info
, phi
, def_stmt
))
3254 if (dump_enabled_p ())
3255 report_vect_op (MSG_NOTE
, def_stmt
,
3256 "reduction: detected reduction chain: ");
3261 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3262 gimple
*first
= REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt
));
3265 gimple
*next
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first
));
3266 REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first
)) = NULL
;
3267 REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first
)) = NULL
;
3271 /* Look for the expression computing loop_arg from loop PHI result. */
3272 if (check_reduction_path (vect_location
, loop
, as_a
<gphi
*> (phi
), loop_arg
,
3276 if (dump_enabled_p ())
3278 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3279 "reduction: unknown pattern: ");
3285 /* Wrapper around vect_is_simple_reduction, which will modify code
3286 in-place if it enables detection of more reductions. Arguments
3290 vect_force_simple_reduction (loop_vec_info loop_info
, gimple
*phi
,
3292 bool need_wrapping_integral_overflow
)
3294 enum vect_reduction_type v_reduc_type
;
3295 gimple
*def
= vect_is_simple_reduction (loop_info
, phi
, double_reduc
,
3296 need_wrapping_integral_overflow
,
3300 stmt_vec_info reduc_def_info
= vinfo_for_stmt (phi
);
3301 STMT_VINFO_REDUC_TYPE (reduc_def_info
) = v_reduc_type
;
3302 STMT_VINFO_REDUC_DEF (reduc_def_info
) = def
;
3303 reduc_def_info
= vinfo_for_stmt (def
);
3304 STMT_VINFO_REDUC_TYPE (reduc_def_info
) = v_reduc_type
;
3305 STMT_VINFO_REDUC_DEF (reduc_def_info
) = phi
;
3310 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3312 vect_get_known_peeling_cost (loop_vec_info loop_vinfo
, int peel_iters_prologue
,
3313 int *peel_iters_epilogue
,
3314 stmt_vector_for_cost
*scalar_cost_vec
,
3315 stmt_vector_for_cost
*prologue_cost_vec
,
3316 stmt_vector_for_cost
*epilogue_cost_vec
)
3319 int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
3321 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
3323 *peel_iters_epilogue
= assumed_vf
/ 2;
3324 if (dump_enabled_p ())
3325 dump_printf_loc (MSG_NOTE
, vect_location
,
3326 "cost model: epilogue peel iters set to vf/2 "
3327 "because loop iterations are unknown .\n");
3329 /* If peeled iterations are known but number of scalar loop
3330 iterations are unknown, count a taken branch per peeled loop. */
3331 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
3332 NULL
, 0, vect_prologue
);
3333 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
3334 NULL
, 0, vect_epilogue
);
3338 int niters
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
3339 peel_iters_prologue
= niters
< peel_iters_prologue
?
3340 niters
: peel_iters_prologue
;
3341 *peel_iters_epilogue
= (niters
- peel_iters_prologue
) % assumed_vf
;
3342 /* If we need to peel for gaps, but no peeling is required, we have to
3343 peel VF iterations. */
3344 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) && !*peel_iters_epilogue
)
3345 *peel_iters_epilogue
= assumed_vf
;
3348 stmt_info_for_cost
*si
;
3350 if (peel_iters_prologue
)
3351 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
3353 stmt_vec_info stmt_info
3354 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3355 retval
+= record_stmt_cost (prologue_cost_vec
,
3356 si
->count
* peel_iters_prologue
,
3357 si
->kind
, stmt_info
, si
->misalign
,
3360 if (*peel_iters_epilogue
)
3361 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
3363 stmt_vec_info stmt_info
3364 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3365 retval
+= record_stmt_cost (epilogue_cost_vec
,
3366 si
->count
* *peel_iters_epilogue
,
3367 si
->kind
, stmt_info
, si
->misalign
,
3374 /* Function vect_estimate_min_profitable_iters
3376 Return the number of iterations required for the vector version of the
3377 loop to be profitable relative to the cost of the scalar version of the
3380 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3381 of iterations for vectorization. -1 value means loop vectorization
3382 is not profitable. This returned value may be used for dynamic
3383 profitability check.
3385 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3386 for static check against estimated number of iterations. */
3389 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo
,
3390 int *ret_min_profitable_niters
,
3391 int *ret_min_profitable_estimate
)
3393 int min_profitable_iters
;
3394 int min_profitable_estimate
;
3395 int peel_iters_prologue
;
3396 int peel_iters_epilogue
;
3397 unsigned vec_inside_cost
= 0;
3398 int vec_outside_cost
= 0;
3399 unsigned vec_prologue_cost
= 0;
3400 unsigned vec_epilogue_cost
= 0;
3401 int scalar_single_iter_cost
= 0;
3402 int scalar_outside_cost
= 0;
3403 int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
3404 int npeel
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
3405 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3407 /* Cost model disabled. */
3408 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
3410 dump_printf_loc (MSG_NOTE
, vect_location
, "cost model disabled.\n");
3411 *ret_min_profitable_niters
= 0;
3412 *ret_min_profitable_estimate
= 0;
3416 /* Requires loop versioning tests to handle misalignment. */
3417 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
3419 /* FIXME: Make cost depend on complexity of individual check. */
3420 unsigned len
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).length ();
3421 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
3423 dump_printf (MSG_NOTE
,
3424 "cost model: Adding cost of checks for loop "
3425 "versioning to treat misalignment.\n");
3428 /* Requires loop versioning with alias checks. */
3429 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
3431 /* FIXME: Make cost depend on complexity of individual check. */
3432 unsigned len
= LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).length ();
3433 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
3435 len
= LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo
).length ();
3437 /* Count LEN - 1 ANDs and LEN comparisons. */
3438 (void) add_stmt_cost (target_cost_data
, len
* 2 - 1, scalar_stmt
,
3439 NULL
, 0, vect_prologue
);
3440 len
= LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
).length ();
3443 /* Count LEN - 1 ANDs and LEN comparisons. */
3444 unsigned int nstmts
= len
* 2 - 1;
3445 /* +1 for each bias that needs adding. */
3446 for (unsigned int i
= 0; i
< len
; ++i
)
3447 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
)[i
].unsigned_p
)
3449 (void) add_stmt_cost (target_cost_data
, nstmts
, scalar_stmt
,
3450 NULL
, 0, vect_prologue
);
3452 dump_printf (MSG_NOTE
,
3453 "cost model: Adding cost of checks for loop "
3454 "versioning aliasing.\n");
3457 /* Requires loop versioning with niter checks. */
3458 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo
))
3460 /* FIXME: Make cost depend on complexity of individual check. */
3461 (void) add_stmt_cost (target_cost_data
, 1, vector_stmt
, NULL
, 0,
3463 dump_printf (MSG_NOTE
,
3464 "cost model: Adding cost of checks for loop "
3465 "versioning niters.\n");
3468 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3469 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
, NULL
, 0,
3472 /* Count statements in scalar loop. Using this as scalar cost for a single
3475 TODO: Add outer loop support.
3477 TODO: Consider assigning different costs to different scalar
3480 scalar_single_iter_cost
3481 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo
);
3483 /* Add additional cost for the peeled instructions in prologue and epilogue
3484 loop. (For fully-masked loops there will be no peeling.)
3486 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3487 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3489 TODO: Build an expression that represents peel_iters for prologue and
3490 epilogue to be used in a run-time test. */
3492 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
3494 peel_iters_prologue
= 0;
3495 peel_iters_epilogue
= 0;
3497 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
3499 /* We need to peel exactly one iteration. */
3500 peel_iters_epilogue
+= 1;
3501 stmt_info_for_cost
*si
;
3503 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
3506 struct _stmt_vec_info
*stmt_info
3507 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3508 (void) add_stmt_cost (target_cost_data
, si
->count
,
3509 si
->kind
, stmt_info
, si
->misalign
,
3516 peel_iters_prologue
= assumed_vf
/ 2;
3517 dump_printf (MSG_NOTE
, "cost model: "
3518 "prologue peel iters set to vf/2.\n");
3520 /* If peeling for alignment is unknown, loop bound of main loop becomes
3522 peel_iters_epilogue
= assumed_vf
/ 2;
3523 dump_printf (MSG_NOTE
, "cost model: "
3524 "epilogue peel iters set to vf/2 because "
3525 "peeling for alignment is unknown.\n");
3527 /* If peeled iterations are unknown, count a taken branch and a not taken
3528 branch per peeled loop. Even if scalar loop iterations are known,
3529 vector iterations are not known since peeled prologue iterations are
3530 not known. Hence guards remain the same. */
3531 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
3532 NULL
, 0, vect_prologue
);
3533 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
3534 NULL
, 0, vect_prologue
);
3535 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
3536 NULL
, 0, vect_epilogue
);
3537 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
3538 NULL
, 0, vect_epilogue
);
3539 stmt_info_for_cost
*si
;
3541 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
), j
, si
)
3543 struct _stmt_vec_info
*stmt_info
3544 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3545 (void) add_stmt_cost (target_cost_data
,
3546 si
->count
* peel_iters_prologue
,
3547 si
->kind
, stmt_info
, si
->misalign
,
3549 (void) add_stmt_cost (target_cost_data
,
3550 si
->count
* peel_iters_epilogue
,
3551 si
->kind
, stmt_info
, si
->misalign
,
3557 stmt_vector_for_cost prologue_cost_vec
, epilogue_cost_vec
;
3558 stmt_info_for_cost
*si
;
3560 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3562 prologue_cost_vec
.create (2);
3563 epilogue_cost_vec
.create (2);
3564 peel_iters_prologue
= npeel
;
3566 (void) vect_get_known_peeling_cost (loop_vinfo
, peel_iters_prologue
,
3567 &peel_iters_epilogue
,
3568 &LOOP_VINFO_SCALAR_ITERATION_COST
3571 &epilogue_cost_vec
);
3573 FOR_EACH_VEC_ELT (prologue_cost_vec
, j
, si
)
3575 struct _stmt_vec_info
*stmt_info
3576 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3577 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
3578 si
->misalign
, vect_prologue
);
3581 FOR_EACH_VEC_ELT (epilogue_cost_vec
, j
, si
)
3583 struct _stmt_vec_info
*stmt_info
3584 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3585 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
3586 si
->misalign
, vect_epilogue
);
3589 prologue_cost_vec
.release ();
3590 epilogue_cost_vec
.release ();
3593 /* FORNOW: The scalar outside cost is incremented in one of the
3596 1. The vectorizer checks for alignment and aliasing and generates
3597 a condition that allows dynamic vectorization. A cost model
3598 check is ANDED with the versioning condition. Hence scalar code
3599 path now has the added cost of the versioning check.
3601 if (cost > th & versioning_check)
3604 Hence run-time scalar is incremented by not-taken branch cost.
3606 2. The vectorizer then checks if a prologue is required. If the
3607 cost model check was not done before during versioning, it has to
3608 be done before the prologue check.
3611 prologue = scalar_iters
3616 if (prologue == num_iters)
3619 Hence the run-time scalar cost is incremented by a taken branch,
3620 plus a not-taken branch, plus a taken branch cost.
3622 3. The vectorizer then checks if an epilogue is required. If the
3623 cost model check was not done before during prologue check, it
3624 has to be done with the epilogue check.
3630 if (prologue == num_iters)
3633 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3636 Hence the run-time scalar cost should be incremented by 2 taken
3639 TODO: The back end may reorder the BBS's differently and reverse
3640 conditions/branch directions. Change the estimates below to
3641 something more reasonable. */
3643 /* If the number of iterations is known and we do not do versioning, we can
3644 decide whether to vectorize at compile time. Hence the scalar version
3645 do not carry cost model guard costs. */
3646 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
3647 || LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3649 /* Cost model check occurs at versioning. */
3650 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3651 scalar_outside_cost
+= vect_get_stmt_cost (cond_branch_not_taken
);
3654 /* Cost model check occurs at prologue generation. */
3655 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
3656 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
)
3657 + vect_get_stmt_cost (cond_branch_not_taken
);
3658 /* Cost model check occurs at epilogue generation. */
3660 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
);
3664 /* Complete the target-specific cost calculations. */
3665 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
), &vec_prologue_cost
,
3666 &vec_inside_cost
, &vec_epilogue_cost
);
3668 vec_outside_cost
= (int)(vec_prologue_cost
+ vec_epilogue_cost
);
3670 if (dump_enabled_p ())
3672 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
3673 dump_printf (MSG_NOTE
, " Vector inside of loop cost: %d\n",
3675 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n",
3677 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n",
3679 dump_printf (MSG_NOTE
, " Scalar iteration cost: %d\n",
3680 scalar_single_iter_cost
);
3681 dump_printf (MSG_NOTE
, " Scalar outside cost: %d\n",
3682 scalar_outside_cost
);
3683 dump_printf (MSG_NOTE
, " Vector outside cost: %d\n",
3685 dump_printf (MSG_NOTE
, " prologue iterations: %d\n",
3686 peel_iters_prologue
);
3687 dump_printf (MSG_NOTE
, " epilogue iterations: %d\n",
3688 peel_iters_epilogue
);
3691 /* Calculate number of iterations required to make the vector version
3692 profitable, relative to the loop bodies only. The following condition
3694 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3696 SIC = scalar iteration cost, VIC = vector iteration cost,
3697 VOC = vector outside cost, VF = vectorization factor,
3698 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3699 SOC = scalar outside cost for run time cost model check. */
3701 if ((scalar_single_iter_cost
* assumed_vf
) > (int) vec_inside_cost
)
3703 min_profitable_iters
= ((vec_outside_cost
- scalar_outside_cost
)
3705 - vec_inside_cost
* peel_iters_prologue
3706 - vec_inside_cost
* peel_iters_epilogue
);
3707 if (min_profitable_iters
<= 0)
3708 min_profitable_iters
= 0;
3711 min_profitable_iters
/= ((scalar_single_iter_cost
* assumed_vf
)
3714 if ((scalar_single_iter_cost
* assumed_vf
* min_profitable_iters
)
3715 <= (((int) vec_inside_cost
* min_profitable_iters
)
3716 + (((int) vec_outside_cost
- scalar_outside_cost
)
3718 min_profitable_iters
++;
3721 /* vector version will never be profitable. */
3724 if (LOOP_VINFO_LOOP (loop_vinfo
)->force_vectorize
)
3725 warning_at (vect_location
.get_location_t (), OPT_Wopenmp_simd
,
3726 "vectorization did not happen for a simd loop");
3728 if (dump_enabled_p ())
3729 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3730 "cost model: the vector iteration cost = %d "
3731 "divided by the scalar iteration cost = %d "
3732 "is greater or equal to the vectorization factor = %d"
3734 vec_inside_cost
, scalar_single_iter_cost
, assumed_vf
);
3735 *ret_min_profitable_niters
= -1;
3736 *ret_min_profitable_estimate
= -1;
3740 dump_printf (MSG_NOTE
,
3741 " Calculated minimum iters for profitability: %d\n",
3742 min_profitable_iters
);
3744 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
3745 && min_profitable_iters
< (assumed_vf
+ peel_iters_prologue
))
3746 /* We want the vectorized loop to execute at least once. */
3747 min_profitable_iters
= assumed_vf
+ peel_iters_prologue
;
3749 if (dump_enabled_p ())
3750 dump_printf_loc (MSG_NOTE
, vect_location
,
3751 " Runtime profitability threshold = %d\n",
3752 min_profitable_iters
);
3754 *ret_min_profitable_niters
= min_profitable_iters
;
3756 /* Calculate number of iterations required to make the vector version
3757 profitable, relative to the loop bodies only.
3759 Non-vectorized variant is SIC * niters and it must win over vector
3760 variant on the expected loop trip count. The following condition must hold true:
3761 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3763 if (vec_outside_cost
<= 0)
3764 min_profitable_estimate
= 0;
3767 min_profitable_estimate
= ((vec_outside_cost
+ scalar_outside_cost
)
3769 - vec_inside_cost
* peel_iters_prologue
3770 - vec_inside_cost
* peel_iters_epilogue
)
3771 / ((scalar_single_iter_cost
* assumed_vf
)
3774 min_profitable_estimate
= MAX (min_profitable_estimate
, min_profitable_iters
);
3775 if (dump_enabled_p ())
3776 dump_printf_loc (MSG_NOTE
, vect_location
,
3777 " Static estimate profitability threshold = %d\n",
3778 min_profitable_estimate
);
3780 *ret_min_profitable_estimate
= min_profitable_estimate
;
3783 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3784 vector elements (not bits) for a vector with NELT elements. */
3786 calc_vec_perm_mask_for_shift (unsigned int offset
, unsigned int nelt
,
3787 vec_perm_builder
*sel
)
3789 /* The encoding is a single stepped pattern. Any wrap-around is handled
3790 by vec_perm_indices. */
3791 sel
->new_vector (nelt
, 1, 3);
3792 for (unsigned int i
= 0; i
< 3; i
++)
3793 sel
->quick_push (i
+ offset
);
3796 /* Checks whether the target supports whole-vector shifts for vectors of mode
3797 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3798 it supports vec_perm_const with masks for all necessary shift amounts. */
3800 have_whole_vector_shift (machine_mode mode
)
3802 if (optab_handler (vec_shr_optab
, mode
) != CODE_FOR_nothing
)
3805 /* Variable-length vectors should be handled via the optab. */
3807 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
3810 vec_perm_builder sel
;
3811 vec_perm_indices indices
;
3812 for (unsigned int i
= nelt
/ 2; i
>= 1; i
/= 2)
3814 calc_vec_perm_mask_for_shift (i
, nelt
, &sel
);
3815 indices
.new_vector (sel
, 2, nelt
);
3816 if (!can_vec_perm_const_p (mode
, indices
, false))
3822 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3823 functions. Design better to avoid maintenance issues. */
3825 /* Function vect_model_reduction_cost.
3827 Models cost for a reduction operation, including the vector ops
3828 generated within the strip-mine loop, the initial definition before
3829 the loop, and the epilogue code that must be generated. */
3832 vect_model_reduction_cost (stmt_vec_info stmt_info
, internal_fn reduc_fn
,
3833 int ncopies
, stmt_vector_for_cost
*cost_vec
)
3835 int prologue_cost
= 0, epilogue_cost
= 0, inside_cost
;
3836 enum tree_code code
;
3841 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3842 struct loop
*loop
= NULL
;
3845 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3847 /* Condition reductions generate two reductions in the loop. */
3848 vect_reduction_type reduction_type
3849 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
3850 if (reduction_type
== COND_REDUCTION
)
3853 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3854 mode
= TYPE_MODE (vectype
);
3855 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3858 orig_stmt
= STMT_VINFO_STMT (stmt_info
);
3860 code
= gimple_assign_rhs_code (orig_stmt
);
3862 if (reduction_type
== EXTRACT_LAST_REDUCTION
3863 || reduction_type
== FOLD_LEFT_REDUCTION
)
3865 /* No extra instructions needed in the prologue. */
3868 if (reduction_type
== EXTRACT_LAST_REDUCTION
|| reduc_fn
!= IFN_LAST
)
3869 /* Count one reduction-like operation per vector. */
3870 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vec_to_scalar
,
3871 stmt_info
, 0, vect_body
);
3874 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3875 unsigned int nelements
= ncopies
* vect_nunits_for_cost (vectype
);
3876 inside_cost
= record_stmt_cost (cost_vec
, nelements
,
3877 vec_to_scalar
, stmt_info
, 0,
3879 inside_cost
+= record_stmt_cost (cost_vec
, nelements
,
3880 scalar_stmt
, stmt_info
, 0,
3886 /* Add in cost for initial definition.
3887 For cond reduction we have four vectors: initial index, step,
3888 initial result of the data reduction, initial value of the index
3890 int prologue_stmts
= reduction_type
== COND_REDUCTION
? 4 : 1;
3891 prologue_cost
+= record_stmt_cost (cost_vec
, prologue_stmts
,
3892 scalar_to_vec
, stmt_info
, 0,
3895 /* Cost of reduction op inside loop. */
3896 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vector_stmt
,
3897 stmt_info
, 0, vect_body
);
3900 /* Determine cost of epilogue code.
3902 We have a reduction operator that will reduce the vector in one statement.
3903 Also requires scalar extract. */
3905 if (!loop
|| !nested_in_vect_loop_p (loop
, orig_stmt
))
3907 if (reduc_fn
!= IFN_LAST
)
3909 if (reduction_type
== COND_REDUCTION
)
3911 /* An EQ stmt and an COND_EXPR stmt. */
3912 epilogue_cost
+= record_stmt_cost (cost_vec
, 2,
3913 vector_stmt
, stmt_info
, 0,
3915 /* Reduction of the max index and a reduction of the found
3917 epilogue_cost
+= record_stmt_cost (cost_vec
, 2,
3918 vec_to_scalar
, stmt_info
, 0,
3920 /* A broadcast of the max value. */
3921 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
3922 scalar_to_vec
, stmt_info
, 0,
3927 epilogue_cost
+= record_stmt_cost (cost_vec
, 1, vector_stmt
,
3928 stmt_info
, 0, vect_epilogue
);
3929 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
3930 vec_to_scalar
, stmt_info
, 0,
3934 else if (reduction_type
== COND_REDUCTION
)
3936 unsigned estimated_nunits
= vect_nunits_for_cost (vectype
);
3937 /* Extraction of scalar elements. */
3938 epilogue_cost
+= record_stmt_cost (cost_vec
,
3939 2 * estimated_nunits
,
3940 vec_to_scalar
, stmt_info
, 0,
3942 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3943 epilogue_cost
+= record_stmt_cost (cost_vec
,
3944 2 * estimated_nunits
- 3,
3945 scalar_stmt
, stmt_info
, 0,
3948 else if (reduction_type
== EXTRACT_LAST_REDUCTION
3949 || reduction_type
== FOLD_LEFT_REDUCTION
)
3950 /* No extra instructions need in the epilogue. */
3954 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
3956 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt
)));
3957 int element_bitsize
= tree_to_uhwi (bitsize
);
3958 int nelements
= vec_size_in_bits
/ element_bitsize
;
3960 if (code
== COND_EXPR
)
3963 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
3965 /* We have a whole vector shift available. */
3966 if (optab
!= unknown_optab
3967 && VECTOR_MODE_P (mode
)
3968 && optab_handler (optab
, mode
) != CODE_FOR_nothing
3969 && have_whole_vector_shift (mode
))
3971 /* Final reduction via vector shifts and the reduction operator.
3972 Also requires scalar extract. */
3973 epilogue_cost
+= record_stmt_cost (cost_vec
,
3974 exact_log2 (nelements
) * 2,
3975 vector_stmt
, stmt_info
, 0,
3977 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
3978 vec_to_scalar
, stmt_info
, 0,
3982 /* Use extracts and reduction op for final reduction. For N
3983 elements, we have N extracts and N-1 reduction ops. */
3984 epilogue_cost
+= record_stmt_cost (cost_vec
,
3985 nelements
+ nelements
- 1,
3986 vector_stmt
, stmt_info
, 0,
3991 if (dump_enabled_p ())
3992 dump_printf (MSG_NOTE
,
3993 "vect_model_reduction_cost: inside_cost = %d, "
3994 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost
,
3995 prologue_cost
, epilogue_cost
);
3999 /* Function vect_model_induction_cost.
4001 Models cost for induction operations. */
4004 vect_model_induction_cost (stmt_vec_info stmt_info
, int ncopies
,
4005 stmt_vector_for_cost
*cost_vec
)
4007 unsigned inside_cost
, prologue_cost
;
4009 if (PURE_SLP_STMT (stmt_info
))
4012 /* loop cost for vec_loop. */
4013 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vector_stmt
,
4014 stmt_info
, 0, vect_body
);
4016 /* prologue cost for vec_init and vec_step. */
4017 prologue_cost
= record_stmt_cost (cost_vec
, 2, scalar_to_vec
,
4018 stmt_info
, 0, vect_prologue
);
4020 if (dump_enabled_p ())
4021 dump_printf_loc (MSG_NOTE
, vect_location
,
4022 "vect_model_induction_cost: inside_cost = %d, "
4023 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
4028 /* Function get_initial_def_for_reduction
4031 STMT - a stmt that performs a reduction operation in the loop.
4032 INIT_VAL - the initial value of the reduction variable
4035 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4036 of the reduction (used for adjusting the epilog - see below).
4037 Return a vector variable, initialized according to the operation that STMT
4038 performs. This vector will be used as the initial value of the
4039 vector of partial results.
4041 Option1 (adjust in epilog): Initialize the vector as follows:
4042 add/bit or/xor: [0,0,...,0,0]
4043 mult/bit and: [1,1,...,1,1]
4044 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4045 and when necessary (e.g. add/mult case) let the caller know
4046 that it needs to adjust the result by init_val.
4048 Option2: Initialize the vector as follows:
4049 add/bit or/xor: [init_val,0,0,...,0]
4050 mult/bit and: [init_val,1,1,...,1]
4051 min/max/cond_expr: [init_val,init_val,...,init_val]
4052 and no adjustments are needed.
4054 For example, for the following code:
4060 STMT is 's = s + a[i]', and the reduction variable is 's'.
4061 For a vector of 4 units, we want to return either [0,0,0,init_val],
4062 or [0,0,0,0] and let the caller know that it needs to adjust
4063 the result at the end by 'init_val'.
4065 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4066 initialization vector is simpler (same element in all entries), if
4067 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4069 A cost model should help decide between these two schemes. */
4072 get_initial_def_for_reduction (gimple
*stmt
, tree init_val
,
4073 tree
*adjustment_def
)
4075 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
4076 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
4077 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4078 tree scalar_type
= TREE_TYPE (init_val
);
4079 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
4080 enum tree_code code
= gimple_assign_rhs_code (stmt
);
4083 REAL_VALUE_TYPE real_init_val
= dconst0
;
4084 int int_init_val
= 0;
4085 gimple_seq stmts
= NULL
;
4087 gcc_assert (vectype
);
4089 gcc_assert (POINTER_TYPE_P (scalar_type
) || INTEGRAL_TYPE_P (scalar_type
)
4090 || SCALAR_FLOAT_TYPE_P (scalar_type
));
4092 gcc_assert (nested_in_vect_loop_p (loop
, stmt
)
4093 || loop
== (gimple_bb (stmt
))->loop_father
);
4095 vect_reduction_type reduction_type
4096 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo
);
4100 case WIDEN_SUM_EXPR
:
4110 /* ADJUSTMENT_DEF is NULL when called from
4111 vect_create_epilog_for_reduction to vectorize double reduction. */
4113 *adjustment_def
= init_val
;
4115 if (code
== MULT_EXPR
)
4117 real_init_val
= dconst1
;
4121 if (code
== BIT_AND_EXPR
)
4124 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
4125 def_for_init
= build_real (scalar_type
, real_init_val
);
4127 def_for_init
= build_int_cst (scalar_type
, int_init_val
);
4130 /* Option1: the first element is '0' or '1' as well. */
4131 init_def
= gimple_build_vector_from_val (&stmts
, vectype
,
4133 else if (!TYPE_VECTOR_SUBPARTS (vectype
).is_constant ())
4135 /* Option2 (variable length): the first element is INIT_VAL. */
4136 init_def
= gimple_build_vector_from_val (&stmts
, vectype
,
4138 init_def
= gimple_build (&stmts
, CFN_VEC_SHL_INSERT
,
4139 vectype
, init_def
, init_val
);
4143 /* Option2: the first element is INIT_VAL. */
4144 tree_vector_builder
elts (vectype
, 1, 2);
4145 elts
.quick_push (init_val
);
4146 elts
.quick_push (def_for_init
);
4147 init_def
= gimple_build_vector (&stmts
, &elts
);
4158 *adjustment_def
= NULL_TREE
;
4159 if (reduction_type
!= COND_REDUCTION
4160 && reduction_type
!= EXTRACT_LAST_REDUCTION
)
4162 init_def
= vect_get_vec_def_for_operand (init_val
, stmt
);
4166 init_val
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_val
);
4167 init_def
= gimple_build_vector_from_val (&stmts
, vectype
, init_val
);
4176 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
4180 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4181 NUMBER_OF_VECTORS is the number of vector defs to create.
4182 If NEUTRAL_OP is nonnull, introducing extra elements of that
4183 value will not change the result. */
4186 get_initial_defs_for_reduction (slp_tree slp_node
,
4187 vec
<tree
> *vec_oprnds
,
4188 unsigned int number_of_vectors
,
4189 bool reduc_chain
, tree neutral_op
)
4191 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4192 gimple
*stmt
= stmts
[0];
4193 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
4194 unsigned HOST_WIDE_INT nunits
;
4195 unsigned j
, number_of_places_left_in_vector
;
4198 int group_size
= stmts
.length ();
4199 unsigned int vec_num
, i
;
4200 unsigned number_of_copies
= 1;
4202 voprnds
.create (number_of_vectors
);
4204 auto_vec
<tree
, 16> permute_results
;
4206 vector_type
= STMT_VINFO_VECTYPE (stmt_vinfo
);
4208 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
);
4210 loop
= (gimple_bb (stmt
))->loop_father
;
4212 edge pe
= loop_preheader_edge (loop
);
4214 gcc_assert (!reduc_chain
|| neutral_op
);
4216 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4217 created vectors. It is greater than 1 if unrolling is performed.
4219 For example, we have two scalar operands, s1 and s2 (e.g., group of
4220 strided accesses of size two), while NUNITS is four (i.e., four scalars
4221 of this type can be packed in a vector). The output vector will contain
4222 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4225 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4226 vectors containing the operands.
4228 For example, NUNITS is four as before, and the group size is 8
4229 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4230 {s5, s6, s7, s8}. */
4232 if (!TYPE_VECTOR_SUBPARTS (vector_type
).is_constant (&nunits
))
4233 nunits
= group_size
;
4235 number_of_copies
= nunits
* number_of_vectors
/ group_size
;
4237 number_of_places_left_in_vector
= nunits
;
4238 bool constant_p
= true;
4239 tree_vector_builder
elts (vector_type
, nunits
, 1);
4240 elts
.quick_grow (nunits
);
4241 for (j
= 0; j
< number_of_copies
; j
++)
4243 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
4246 /* Get the def before the loop. In reduction chain we have only
4247 one initial value. */
4248 if ((j
!= (number_of_copies
- 1)
4249 || (reduc_chain
&& i
!= 0))
4253 op
= PHI_ARG_DEF_FROM_EDGE (stmt
, pe
);
4255 /* Create 'vect_ = {op0,op1,...,opn}'. */
4256 number_of_places_left_in_vector
--;
4257 elts
[number_of_places_left_in_vector
] = op
;
4258 if (!CONSTANT_CLASS_P (op
))
4261 if (number_of_places_left_in_vector
== 0)
4263 gimple_seq ctor_seq
= NULL
;
4265 if (constant_p
&& !neutral_op
4266 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type
), nunits
)
4267 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type
), nunits
))
4268 /* Build the vector directly from ELTS. */
4269 init
= gimple_build_vector (&ctor_seq
, &elts
);
4270 else if (neutral_op
)
4272 /* Build a vector of the neutral value and shift the
4273 other elements into place. */
4274 init
= gimple_build_vector_from_val (&ctor_seq
, vector_type
,
4277 while (k
> 0 && elts
[k
- 1] == neutral_op
)
4282 init
= gimple_build (&ctor_seq
, CFN_VEC_SHL_INSERT
,
4283 vector_type
, init
, elts
[k
]);
4288 /* First time round, duplicate ELTS to fill the
4289 required number of vectors, then cherry pick the
4290 appropriate result for each iteration. */
4291 if (vec_oprnds
->is_empty ())
4292 duplicate_and_interleave (&ctor_seq
, vector_type
, elts
,
4295 init
= permute_results
[number_of_vectors
- j
- 1];
4297 if (ctor_seq
!= NULL
)
4298 gsi_insert_seq_on_edge_immediate (pe
, ctor_seq
);
4299 voprnds
.quick_push (init
);
4301 number_of_places_left_in_vector
= nunits
;
4302 elts
.new_vector (vector_type
, nunits
, 1);
4303 elts
.quick_grow (nunits
);
4309 /* Since the vectors are created in the reverse order, we should invert
4311 vec_num
= voprnds
.length ();
4312 for (j
= vec_num
; j
!= 0; j
--)
4314 vop
= voprnds
[j
- 1];
4315 vec_oprnds
->quick_push (vop
);
4320 /* In case that VF is greater than the unrolling factor needed for the SLP
4321 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4322 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4323 to replicate the vectors. */
4324 tree neutral_vec
= NULL
;
4325 while (number_of_vectors
> vec_oprnds
->length ())
4331 gimple_seq ctor_seq
= NULL
;
4332 neutral_vec
= gimple_build_vector_from_val
4333 (&ctor_seq
, vector_type
, neutral_op
);
4334 if (ctor_seq
!= NULL
)
4335 gsi_insert_seq_on_edge_immediate (pe
, ctor_seq
);
4337 vec_oprnds
->quick_push (neutral_vec
);
4341 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
4342 vec_oprnds
->quick_push (vop
);
4348 /* Function vect_create_epilog_for_reduction
4350 Create code at the loop-epilog to finalize the result of a reduction
4353 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4354 reduction statements.
4355 STMT is the scalar reduction stmt that is being vectorized.
4356 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4357 number of elements that we can fit in a vectype (nunits). In this case
4358 we have to generate more than one vector stmt - i.e - we need to "unroll"
4359 the vector stmt by a factor VF/nunits. For more details see documentation
4360 in vectorizable_operation.
4361 REDUC_FN is the internal function for the epilog reduction.
4362 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4364 REDUC_INDEX is the index of the operand in the right hand side of the
4365 statement that is defined by REDUCTION_PHI.
4366 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4367 SLP_NODE is an SLP node containing a group of reduction statements. The
4368 first one in this group is STMT.
4369 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4370 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4371 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4372 any value of the IV in the loop.
4373 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4374 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4375 null if this is not an SLP reduction
4378 1. Creates the reduction def-use cycles: sets the arguments for
4380 The loop-entry argument is the vectorized initial-value of the reduction.
4381 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4383 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4384 by calling the function specified by REDUC_FN if available, or by
4385 other means (whole-vector shifts or a scalar loop).
4386 The function also creates a new phi node at the loop exit to preserve
4387 loop-closed form, as illustrated below.
4389 The flow at the entry to this function:
4392 vec_def = phi <null, null> # REDUCTION_PHI
4393 VECT_DEF = vector_stmt # vectorized form of STMT
4394 s_loop = scalar_stmt # (scalar) STMT
4396 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4400 The above is transformed by this function into:
4403 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4404 VECT_DEF = vector_stmt # vectorized form of STMT
4405 s_loop = scalar_stmt # (scalar) STMT
4407 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4408 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4409 v_out2 = reduce <v_out1>
4410 s_out3 = extract_field <v_out2, 0>
4411 s_out4 = adjust_result <s_out3>
4417 vect_create_epilog_for_reduction (vec
<tree
> vect_defs
, gimple
*stmt
,
4418 gimple
*reduc_def_stmt
,
4419 int ncopies
, internal_fn reduc_fn
,
4420 vec
<gimple
*> reduction_phis
,
4423 slp_instance slp_node_instance
,
4424 tree induc_val
, enum tree_code induc_code
,
4427 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4428 stmt_vec_info prev_phi_info
;
4431 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4432 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
), *outer_loop
= NULL
;
4433 basic_block exit_bb
;
4436 gimple
*new_phi
= NULL
, *phi
;
4437 gimple_stmt_iterator exit_gsi
;
4439 tree new_temp
= NULL_TREE
, new_dest
, new_name
, new_scalar_dest
;
4440 gimple
*epilog_stmt
= NULL
;
4441 enum tree_code code
= gimple_assign_rhs_code (stmt
);
4444 tree adjustment_def
= NULL
;
4445 tree vec_initial_def
= NULL
;
4446 tree expr
, def
, initial_def
= NULL
;
4447 tree orig_name
, scalar_result
;
4448 imm_use_iterator imm_iter
, phi_imm_iter
;
4449 use_operand_p use_p
, phi_use_p
;
4450 gimple
*use_stmt
, *orig_stmt
, *reduction_phi
= NULL
;
4451 bool nested_in_vect_loop
= false;
4452 auto_vec
<gimple
*> new_phis
;
4453 auto_vec
<gimple
*> inner_phis
;
4454 enum vect_def_type dt
= vect_unknown_def_type
;
4456 auto_vec
<tree
> scalar_results
;
4457 unsigned int group_size
= 1, k
, ratio
;
4458 auto_vec
<tree
> vec_initial_defs
;
4459 auto_vec
<gimple
*> phis
;
4460 bool slp_reduc
= false;
4461 bool direct_slp_reduc
;
4462 tree new_phi_result
;
4463 gimple
*inner_phi
= NULL
;
4464 tree induction_index
= NULL_TREE
;
4467 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
4469 if (nested_in_vect_loop_p (loop
, stmt
))
4473 nested_in_vect_loop
= true;
4474 gcc_assert (!slp_node
);
4477 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4478 gcc_assert (vectype
);
4479 mode
= TYPE_MODE (vectype
);
4481 /* 1. Create the reduction def-use cycle:
4482 Set the arguments of REDUCTION_PHIS, i.e., transform
4485 vec_def = phi <null, null> # REDUCTION_PHI
4486 VECT_DEF = vector_stmt # vectorized form of STMT
4492 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4493 VECT_DEF = vector_stmt # vectorized form of STMT
4496 (in case of SLP, do it for all the phis). */
4498 /* Get the loop-entry arguments. */
4499 enum vect_def_type initial_def_dt
= vect_unknown_def_type
;
4502 unsigned vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
4503 vec_initial_defs
.reserve (vec_num
);
4504 get_initial_defs_for_reduction (slp_node_instance
->reduc_phis
,
4505 &vec_initial_defs
, vec_num
,
4506 REDUC_GROUP_FIRST_ELEMENT (stmt_info
),
4511 /* Get at the scalar def before the loop, that defines the initial value
4512 of the reduction variable. */
4513 initial_def
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
4514 loop_preheader_edge (loop
));
4515 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4516 and we can't use zero for induc_val, use initial_def. Similarly
4517 for REDUC_MIN and initial_def larger than the base. */
4518 if (TREE_CODE (initial_def
) == INTEGER_CST
4519 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
4520 == INTEGER_INDUC_COND_REDUCTION
)
4521 && !integer_zerop (induc_val
)
4522 && ((induc_code
== MAX_EXPR
4523 && tree_int_cst_lt (initial_def
, induc_val
))
4524 || (induc_code
== MIN_EXPR
4525 && tree_int_cst_lt (induc_val
, initial_def
))))
4526 induc_val
= initial_def
;
4529 /* In case of double reduction we only create a vector variable
4530 to be put in the reduction phi node. The actual statement
4531 creation is done later in this function. */
4532 vec_initial_def
= vect_create_destination_var (initial_def
, vectype
);
4533 else if (nested_in_vect_loop
)
4535 /* Do not use an adjustment def as that case is not supported
4536 correctly if ncopies is not one. */
4537 vect_is_simple_use (initial_def
, loop_vinfo
, &initial_def_dt
);
4538 vec_initial_def
= vect_get_vec_def_for_operand (initial_def
, stmt
);
4541 vec_initial_def
= get_initial_def_for_reduction (stmt
, initial_def
,
4543 vec_initial_defs
.create (1);
4544 vec_initial_defs
.quick_push (vec_initial_def
);
4547 /* Set phi nodes arguments. */
4548 FOR_EACH_VEC_ELT (reduction_phis
, i
, phi
)
4550 tree vec_init_def
= vec_initial_defs
[i
];
4551 tree def
= vect_defs
[i
];
4552 for (j
= 0; j
< ncopies
; j
++)
4556 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
4557 if (nested_in_vect_loop
)
4559 = vect_get_vec_def_for_stmt_copy (initial_def_dt
,
4563 /* Set the loop-entry arg of the reduction-phi. */
4565 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
4566 == INTEGER_INDUC_COND_REDUCTION
)
4568 /* Initialise the reduction phi to zero. This prevents initial
4569 values of non-zero interferring with the reduction op. */
4570 gcc_assert (ncopies
== 1);
4571 gcc_assert (i
== 0);
4573 tree vec_init_def_type
= TREE_TYPE (vec_init_def
);
4575 = build_vector_from_val (vec_init_def_type
, induc_val
);
4577 add_phi_arg (as_a
<gphi
*> (phi
), induc_val_vec
,
4578 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4581 add_phi_arg (as_a
<gphi
*> (phi
), vec_init_def
,
4582 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4584 /* Set the loop-latch arg for the reduction-phi. */
4586 def
= vect_get_vec_def_for_stmt_copy (vect_unknown_def_type
, def
);
4588 add_phi_arg (as_a
<gphi
*> (phi
), def
, loop_latch_edge (loop
),
4591 if (dump_enabled_p ())
4593 dump_printf_loc (MSG_NOTE
, vect_location
,
4594 "transform reduction: created def-use cycle: ");
4595 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
4596 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, SSA_NAME_DEF_STMT (def
), 0);
4601 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4602 which is updated with the current index of the loop for every match of
4603 the original loop's cond_expr (VEC_STMT). This results in a vector
4604 containing the last time the condition passed for that vector lane.
4605 The first match will be a 1 to allow 0 to be used for non-matching
4606 indexes. If there are no matches at all then the vector will be all
4608 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
)
4610 tree indx_before_incr
, indx_after_incr
;
4611 poly_uint64 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype
);
4613 gimple
*vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4614 gcc_assert (gimple_assign_rhs_code (vec_stmt
) == VEC_COND_EXPR
);
4616 int scalar_precision
4617 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype
)));
4618 tree cr_index_scalar_type
= make_unsigned_type (scalar_precision
);
4619 tree cr_index_vector_type
= build_vector_type
4620 (cr_index_scalar_type
, TYPE_VECTOR_SUBPARTS (vectype
));
4622 /* First we create a simple vector induction variable which starts
4623 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4624 vector size (STEP). */
4626 /* Create a {1,2,3,...} vector. */
4627 tree series_vect
= build_index_vector (cr_index_vector_type
, 1, 1);
4629 /* Create a vector of the step value. */
4630 tree step
= build_int_cst (cr_index_scalar_type
, nunits_out
);
4631 tree vec_step
= build_vector_from_val (cr_index_vector_type
, step
);
4633 /* Create an induction variable. */
4634 gimple_stmt_iterator incr_gsi
;
4636 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
4637 create_iv (series_vect
, vec_step
, NULL_TREE
, loop
, &incr_gsi
,
4638 insert_after
, &indx_before_incr
, &indx_after_incr
);
4640 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4641 filled with zeros (VEC_ZERO). */
4643 /* Create a vector of 0s. */
4644 tree zero
= build_zero_cst (cr_index_scalar_type
);
4645 tree vec_zero
= build_vector_from_val (cr_index_vector_type
, zero
);
4647 /* Create a vector phi node. */
4648 tree new_phi_tree
= make_ssa_name (cr_index_vector_type
);
4649 new_phi
= create_phi_node (new_phi_tree
, loop
->header
);
4650 loop_vinfo
->add_stmt (new_phi
);
4651 add_phi_arg (as_a
<gphi
*> (new_phi
), vec_zero
,
4652 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4654 /* Now take the condition from the loops original cond_expr
4655 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4656 every match uses values from the induction variable
4657 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4659 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4660 the new cond_expr (INDEX_COND_EXPR). */
4662 /* Duplicate the condition from vec_stmt. */
4663 tree ccompare
= unshare_expr (gimple_assign_rhs1 (vec_stmt
));
4665 /* Create a conditional, where the condition is taken from vec_stmt
4666 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4667 else is the phi (NEW_PHI_TREE). */
4668 tree index_cond_expr
= build3 (VEC_COND_EXPR
, cr_index_vector_type
,
4669 ccompare
, indx_before_incr
,
4671 induction_index
= make_ssa_name (cr_index_vector_type
);
4672 gimple
*index_condition
= gimple_build_assign (induction_index
,
4674 gsi_insert_before (&incr_gsi
, index_condition
, GSI_SAME_STMT
);
4675 stmt_vec_info index_vec_info
= loop_vinfo
->add_stmt (index_condition
);
4676 STMT_VINFO_VECTYPE (index_vec_info
) = cr_index_vector_type
;
4678 /* Update the phi with the vec cond. */
4679 add_phi_arg (as_a
<gphi
*> (new_phi
), induction_index
,
4680 loop_latch_edge (loop
), UNKNOWN_LOCATION
);
4683 /* 2. Create epilog code.
4684 The reduction epilog code operates across the elements of the vector
4685 of partial results computed by the vectorized loop.
4686 The reduction epilog code consists of:
4688 step 1: compute the scalar result in a vector (v_out2)
4689 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4690 step 3: adjust the scalar result (s_out3) if needed.
4692 Step 1 can be accomplished using one the following three schemes:
4693 (scheme 1) using reduc_fn, if available.
4694 (scheme 2) using whole-vector shifts, if available.
4695 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4698 The overall epilog code looks like this:
4700 s_out0 = phi <s_loop> # original EXIT_PHI
4701 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4702 v_out2 = reduce <v_out1> # step 1
4703 s_out3 = extract_field <v_out2, 0> # step 2
4704 s_out4 = adjust_result <s_out3> # step 3
4706 (step 3 is optional, and steps 1 and 2 may be combined).
4707 Lastly, the uses of s_out0 are replaced by s_out4. */
4710 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4711 v_out1 = phi <VECT_DEF>
4712 Store them in NEW_PHIS. */
4714 exit_bb
= single_exit (loop
)->dest
;
4715 prev_phi_info
= NULL
;
4716 new_phis
.create (vect_defs
.length ());
4717 FOR_EACH_VEC_ELT (vect_defs
, i
, def
)
4719 for (j
= 0; j
< ncopies
; j
++)
4721 tree new_def
= copy_ssa_name (def
);
4722 phi
= create_phi_node (new_def
, exit_bb
);
4723 stmt_vec_info phi_info
= loop_vinfo
->add_stmt (phi
);
4725 new_phis
.quick_push (phi
);
4728 def
= vect_get_vec_def_for_stmt_copy (dt
, def
);
4729 STMT_VINFO_RELATED_STMT (prev_phi_info
) = phi
;
4732 SET_PHI_ARG_DEF (phi
, single_exit (loop
)->dest_idx
, def
);
4733 prev_phi_info
= phi_info
;
4737 /* The epilogue is created for the outer-loop, i.e., for the loop being
4738 vectorized. Create exit phis for the outer loop. */
4742 exit_bb
= single_exit (loop
)->dest
;
4743 inner_phis
.create (vect_defs
.length ());
4744 FOR_EACH_VEC_ELT (new_phis
, i
, phi
)
4746 tree new_result
= copy_ssa_name (PHI_RESULT (phi
));
4747 gphi
*outer_phi
= create_phi_node (new_result
, exit_bb
);
4748 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4750 prev_phi_info
= loop_vinfo
->add_stmt (outer_phi
);
4751 inner_phis
.quick_push (phi
);
4752 new_phis
[i
] = outer_phi
;
4753 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
)))
4755 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
4756 new_result
= copy_ssa_name (PHI_RESULT (phi
));
4757 outer_phi
= create_phi_node (new_result
, exit_bb
);
4758 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4760 stmt_vec_info outer_phi_info
= loop_vinfo
->add_stmt (outer_phi
);
4761 STMT_VINFO_RELATED_STMT (prev_phi_info
) = outer_phi
;
4762 prev_phi_info
= outer_phi_info
;
4767 exit_gsi
= gsi_after_labels (exit_bb
);
4769 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4770 (i.e. when reduc_fn is not available) and in the final adjustment
4771 code (if needed). Also get the original scalar reduction variable as
4772 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4773 represents a reduction pattern), the tree-code and scalar-def are
4774 taken from the original stmt that the pattern-stmt (STMT) replaces.
4775 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4776 are taken from STMT. */
4778 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
4781 /* Regular reduction */
4786 /* Reduction pattern */
4787 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt
);
4788 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
));
4789 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
) == stmt
);
4792 code
= gimple_assign_rhs_code (orig_stmt
);
4793 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4794 partial results are added and not subtracted. */
4795 if (code
== MINUS_EXPR
)
4798 scalar_dest
= gimple_assign_lhs (orig_stmt
);
4799 scalar_type
= TREE_TYPE (scalar_dest
);
4800 scalar_results
.create (group_size
);
4801 new_scalar_dest
= vect_create_destination_var (scalar_dest
, NULL
);
4802 bitsize
= TYPE_SIZE (scalar_type
);
4804 /* In case this is a reduction in an inner-loop while vectorizing an outer
4805 loop - we don't need to extract a single scalar result at the end of the
4806 inner-loop (unless it is double reduction, i.e., the use of reduction is
4807 outside the outer-loop). The final vector of partial results will be used
4808 in the vectorized outer-loop, or reduced to a scalar result at the end of
4810 if (nested_in_vect_loop
&& !double_reduc
)
4811 goto vect_finalize_reduction
;
4813 /* SLP reduction without reduction chain, e.g.,
4817 b2 = operation (b1) */
4818 slp_reduc
= (slp_node
&& !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)));
4820 /* True if we should implement SLP_REDUC using native reduction operations
4821 instead of scalar operations. */
4822 direct_slp_reduc
= (reduc_fn
!= IFN_LAST
4824 && !TYPE_VECTOR_SUBPARTS (vectype
).is_constant ());
4826 /* In case of reduction chain, e.g.,
4829 a3 = operation (a2),
4831 we may end up with more than one vector result. Here we reduce them to
4833 if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) || direct_slp_reduc
)
4835 tree first_vect
= PHI_RESULT (new_phis
[0]);
4836 gassign
*new_vec_stmt
= NULL
;
4837 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4838 for (k
= 1; k
< new_phis
.length (); k
++)
4840 gimple
*next_phi
= new_phis
[k
];
4841 tree second_vect
= PHI_RESULT (next_phi
);
4842 tree tem
= make_ssa_name (vec_dest
, new_vec_stmt
);
4843 new_vec_stmt
= gimple_build_assign (tem
, code
,
4844 first_vect
, second_vect
);
4845 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4849 new_phi_result
= first_vect
;
4852 new_phis
.truncate (0);
4853 new_phis
.safe_push (new_vec_stmt
);
4856 /* Likewise if we couldn't use a single defuse cycle. */
4857 else if (ncopies
> 1)
4859 gcc_assert (new_phis
.length () == 1);
4860 tree first_vect
= PHI_RESULT (new_phis
[0]);
4861 gassign
*new_vec_stmt
= NULL
;
4862 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4863 gimple
*next_phi
= new_phis
[0];
4864 for (int k
= 1; k
< ncopies
; ++k
)
4866 next_phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next_phi
));
4867 tree second_vect
= PHI_RESULT (next_phi
);
4868 tree tem
= make_ssa_name (vec_dest
, new_vec_stmt
);
4869 new_vec_stmt
= gimple_build_assign (tem
, code
,
4870 first_vect
, second_vect
);
4871 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4874 new_phi_result
= first_vect
;
4875 new_phis
.truncate (0);
4876 new_phis
.safe_push (new_vec_stmt
);
4879 new_phi_result
= PHI_RESULT (new_phis
[0]);
4881 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
4882 && reduc_fn
!= IFN_LAST
)
4884 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4885 various data values where the condition matched and another vector
4886 (INDUCTION_INDEX) containing all the indexes of those matches. We
4887 need to extract the last matching index (which will be the index with
4888 highest value) and use this to index into the data vector.
4889 For the case where there were no matches, the data vector will contain
4890 all default values and the index vector will be all zeros. */
4892 /* Get various versions of the type of the vector of indexes. */
4893 tree index_vec_type
= TREE_TYPE (induction_index
);
4894 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type
));
4895 tree index_scalar_type
= TREE_TYPE (index_vec_type
);
4896 tree index_vec_cmp_type
= build_same_sized_truth_vector_type
4899 /* Get an unsigned integer version of the type of the data vector. */
4900 int scalar_precision
4901 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type
));
4902 tree scalar_type_unsigned
= make_unsigned_type (scalar_precision
);
4903 tree vectype_unsigned
= build_vector_type
4904 (scalar_type_unsigned
, TYPE_VECTOR_SUBPARTS (vectype
));
4906 /* First we need to create a vector (ZERO_VEC) of zeros and another
4907 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4908 can create using a MAX reduction and then expanding.
4909 In the case where the loop never made any matches, the max index will
4912 /* Vector of {0, 0, 0,...}. */
4913 tree zero_vec
= make_ssa_name (vectype
);
4914 tree zero_vec_rhs
= build_zero_cst (vectype
);
4915 gimple
*zero_vec_stmt
= gimple_build_assign (zero_vec
, zero_vec_rhs
);
4916 gsi_insert_before (&exit_gsi
, zero_vec_stmt
, GSI_SAME_STMT
);
4918 /* Find maximum value from the vector of found indexes. */
4919 tree max_index
= make_ssa_name (index_scalar_type
);
4920 gcall
*max_index_stmt
= gimple_build_call_internal (IFN_REDUC_MAX
,
4921 1, induction_index
);
4922 gimple_call_set_lhs (max_index_stmt
, max_index
);
4923 gsi_insert_before (&exit_gsi
, max_index_stmt
, GSI_SAME_STMT
);
4925 /* Vector of {max_index, max_index, max_index,...}. */
4926 tree max_index_vec
= make_ssa_name (index_vec_type
);
4927 tree max_index_vec_rhs
= build_vector_from_val (index_vec_type
,
4929 gimple
*max_index_vec_stmt
= gimple_build_assign (max_index_vec
,
4931 gsi_insert_before (&exit_gsi
, max_index_vec_stmt
, GSI_SAME_STMT
);
4933 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4934 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4935 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4936 otherwise. Only one value should match, resulting in a vector
4937 (VEC_COND) with one data value and the rest zeros.
4938 In the case where the loop never made any matches, every index will
4939 match, resulting in a vector with all data values (which will all be
4940 the default value). */
4942 /* Compare the max index vector to the vector of found indexes to find
4943 the position of the max value. */
4944 tree vec_compare
= make_ssa_name (index_vec_cmp_type
);
4945 gimple
*vec_compare_stmt
= gimple_build_assign (vec_compare
, EQ_EXPR
,
4948 gsi_insert_before (&exit_gsi
, vec_compare_stmt
, GSI_SAME_STMT
);
4950 /* Use the compare to choose either values from the data vector or
4952 tree vec_cond
= make_ssa_name (vectype
);
4953 gimple
*vec_cond_stmt
= gimple_build_assign (vec_cond
, VEC_COND_EXPR
,
4954 vec_compare
, new_phi_result
,
4956 gsi_insert_before (&exit_gsi
, vec_cond_stmt
, GSI_SAME_STMT
);
4958 /* Finally we need to extract the data value from the vector (VEC_COND)
4959 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4960 reduction, but because this doesn't exist, we can use a MAX reduction
4961 instead. The data value might be signed or a float so we need to cast
4963 In the case where the loop never made any matches, the data values are
4964 all identical, and so will reduce down correctly. */
4966 /* Make the matched data values unsigned. */
4967 tree vec_cond_cast
= make_ssa_name (vectype_unsigned
);
4968 tree vec_cond_cast_rhs
= build1 (VIEW_CONVERT_EXPR
, vectype_unsigned
,
4970 gimple
*vec_cond_cast_stmt
= gimple_build_assign (vec_cond_cast
,
4973 gsi_insert_before (&exit_gsi
, vec_cond_cast_stmt
, GSI_SAME_STMT
);
4975 /* Reduce down to a scalar value. */
4976 tree data_reduc
= make_ssa_name (scalar_type_unsigned
);
4977 gcall
*data_reduc_stmt
= gimple_build_call_internal (IFN_REDUC_MAX
,
4979 gimple_call_set_lhs (data_reduc_stmt
, data_reduc
);
4980 gsi_insert_before (&exit_gsi
, data_reduc_stmt
, GSI_SAME_STMT
);
4982 /* Convert the reduced value back to the result type and set as the
4984 gimple_seq stmts
= NULL
;
4985 new_temp
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, scalar_type
,
4987 gsi_insert_seq_before (&exit_gsi
, stmts
, GSI_SAME_STMT
);
4988 scalar_results
.safe_push (new_temp
);
4990 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
4991 && reduc_fn
== IFN_LAST
)
4993 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4995 idx_val = induction_index[0];
4996 val = data_reduc[0];
4997 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4998 if (induction_index[i] > idx_val)
4999 val = data_reduc[i], idx_val = induction_index[i];
5002 tree data_eltype
= TREE_TYPE (TREE_TYPE (new_phi_result
));
5003 tree idx_eltype
= TREE_TYPE (TREE_TYPE (induction_index
));
5004 unsigned HOST_WIDE_INT el_size
= tree_to_uhwi (TYPE_SIZE (idx_eltype
));
5005 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index
));
5006 /* Enforced by vectorizable_reduction, which ensures we have target
5007 support before allowing a conditional reduction on variable-length
5009 unsigned HOST_WIDE_INT v_size
= el_size
* nunits
.to_constant ();
5010 tree idx_val
= NULL_TREE
, val
= NULL_TREE
;
5011 for (unsigned HOST_WIDE_INT off
= 0; off
< v_size
; off
+= el_size
)
5013 tree old_idx_val
= idx_val
;
5015 idx_val
= make_ssa_name (idx_eltype
);
5016 epilog_stmt
= gimple_build_assign (idx_val
, BIT_FIELD_REF
,
5017 build3 (BIT_FIELD_REF
, idx_eltype
,
5019 bitsize_int (el_size
),
5020 bitsize_int (off
)));
5021 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5022 val
= make_ssa_name (data_eltype
);
5023 epilog_stmt
= gimple_build_assign (val
, BIT_FIELD_REF
,
5024 build3 (BIT_FIELD_REF
,
5027 bitsize_int (el_size
),
5028 bitsize_int (off
)));
5029 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5032 tree new_idx_val
= idx_val
;
5034 if (off
!= v_size
- el_size
)
5036 new_idx_val
= make_ssa_name (idx_eltype
);
5037 epilog_stmt
= gimple_build_assign (new_idx_val
,
5040 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5042 new_val
= make_ssa_name (data_eltype
);
5043 epilog_stmt
= gimple_build_assign (new_val
,
5050 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5051 idx_val
= new_idx_val
;
5055 /* Convert the reduced value back to the result type and set as the
5057 gimple_seq stmts
= NULL
;
5058 val
= gimple_convert (&stmts
, scalar_type
, val
);
5059 gsi_insert_seq_before (&exit_gsi
, stmts
, GSI_SAME_STMT
);
5060 scalar_results
.safe_push (val
);
5063 /* 2.3 Create the reduction code, using one of the three schemes described
5064 above. In SLP we simply need to extract all the elements from the
5065 vector (without reducing them), so we use scalar shifts. */
5066 else if (reduc_fn
!= IFN_LAST
&& !slp_reduc
)
5072 v_out2 = reduc_expr <v_out1> */
5074 if (dump_enabled_p ())
5075 dump_printf_loc (MSG_NOTE
, vect_location
,
5076 "Reduce using direct vector reduction.\n");
5078 vec_elem_type
= TREE_TYPE (TREE_TYPE (new_phi_result
));
5079 if (!useless_type_conversion_p (scalar_type
, vec_elem_type
))
5082 = vect_create_destination_var (scalar_dest
, vec_elem_type
);
5083 epilog_stmt
= gimple_build_call_internal (reduc_fn
, 1,
5085 gimple_set_lhs (epilog_stmt
, tmp_dest
);
5086 new_temp
= make_ssa_name (tmp_dest
, epilog_stmt
);
5087 gimple_set_lhs (epilog_stmt
, new_temp
);
5088 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5090 epilog_stmt
= gimple_build_assign (new_scalar_dest
, NOP_EXPR
,
5095 epilog_stmt
= gimple_build_call_internal (reduc_fn
, 1,
5097 gimple_set_lhs (epilog_stmt
, new_scalar_dest
);
5100 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5101 gimple_set_lhs (epilog_stmt
, new_temp
);
5102 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5104 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5105 == INTEGER_INDUC_COND_REDUCTION
)
5106 && !operand_equal_p (initial_def
, induc_val
, 0))
5108 /* Earlier we set the initial value to be a vector if induc_val
5109 values. Check the result and if it is induc_val then replace
5110 with the original initial value, unless induc_val is
5111 the same as initial_def already. */
5112 tree zcompare
= build2 (EQ_EXPR
, boolean_type_node
, new_temp
,
5115 tmp
= make_ssa_name (new_scalar_dest
);
5116 epilog_stmt
= gimple_build_assign (tmp
, COND_EXPR
, zcompare
,
5117 initial_def
, new_temp
);
5118 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5122 scalar_results
.safe_push (new_temp
);
5124 else if (direct_slp_reduc
)
5126 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5127 with the elements for other SLP statements replaced with the
5128 neutral value. We can then do a normal reduction on each vector. */
5130 /* Enforced by vectorizable_reduction. */
5131 gcc_assert (new_phis
.length () == 1);
5132 gcc_assert (pow2p_hwi (group_size
));
5134 slp_tree orig_phis_slp_node
= slp_node_instance
->reduc_phis
;
5135 vec
<gimple
*> orig_phis
= SLP_TREE_SCALAR_STMTS (orig_phis_slp_node
);
5136 gimple_seq seq
= NULL
;
5138 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5139 and the same element size as VECTYPE. */
5140 tree index
= build_index_vector (vectype
, 0, 1);
5141 tree index_type
= TREE_TYPE (index
);
5142 tree index_elt_type
= TREE_TYPE (index_type
);
5143 tree mask_type
= build_same_sized_truth_vector_type (index_type
);
5145 /* Create a vector that, for each element, identifies which of
5146 the REDUC_GROUP_SIZE results should use it. */
5147 tree index_mask
= build_int_cst (index_elt_type
, group_size
- 1);
5148 index
= gimple_build (&seq
, BIT_AND_EXPR
, index_type
, index
,
5149 build_vector_from_val (index_type
, index_mask
));
5151 /* Get a neutral vector value. This is simply a splat of the neutral
5152 scalar value if we have one, otherwise the initial scalar value
5153 is itself a neutral value. */
5154 tree vector_identity
= NULL_TREE
;
5156 vector_identity
= gimple_build_vector_from_val (&seq
, vectype
,
5158 for (unsigned int i
= 0; i
< group_size
; ++i
)
5160 /* If there's no univeral neutral value, we can use the
5161 initial scalar value from the original PHI. This is used
5162 for MIN and MAX reduction, for example. */
5166 = PHI_ARG_DEF_FROM_EDGE (orig_phis
[i
],
5167 loop_preheader_edge (loop
));
5168 vector_identity
= gimple_build_vector_from_val (&seq
, vectype
,
5172 /* Calculate the equivalent of:
5174 sel[j] = (index[j] == i);
5176 which selects the elements of NEW_PHI_RESULT that should
5177 be included in the result. */
5178 tree compare_val
= build_int_cst (index_elt_type
, i
);
5179 compare_val
= build_vector_from_val (index_type
, compare_val
);
5180 tree sel
= gimple_build (&seq
, EQ_EXPR
, mask_type
,
5181 index
, compare_val
);
5183 /* Calculate the equivalent of:
5185 vec = seq ? new_phi_result : vector_identity;
5187 VEC is now suitable for a full vector reduction. */
5188 tree vec
= gimple_build (&seq
, VEC_COND_EXPR
, vectype
,
5189 sel
, new_phi_result
, vector_identity
);
5191 /* Do the reduction and convert it to the appropriate type. */
5192 tree scalar
= gimple_build (&seq
, as_combined_fn (reduc_fn
),
5193 TREE_TYPE (vectype
), vec
);
5194 scalar
= gimple_convert (&seq
, scalar_type
, scalar
);
5195 scalar_results
.safe_push (scalar
);
5197 gsi_insert_seq_before (&exit_gsi
, seq
, GSI_SAME_STMT
);
5201 bool reduce_with_shift
;
5204 /* COND reductions all do the final reduction with MAX_EXPR
5206 if (code
== COND_EXPR
)
5208 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5209 == INTEGER_INDUC_COND_REDUCTION
)
5215 /* See if the target wants to do the final (shift) reduction
5216 in a vector mode of smaller size and first reduce upper/lower
5217 halves against each other. */
5218 enum machine_mode mode1
= mode
;
5219 tree vectype1
= vectype
;
5220 unsigned sz
= tree_to_uhwi (TYPE_SIZE_UNIT (vectype
));
5223 && (mode1
= targetm
.vectorize
.split_reduction (mode
)) != mode
)
5224 sz1
= GET_MODE_SIZE (mode1
).to_constant ();
5226 vectype1
= get_vectype_for_scalar_type_and_size (scalar_type
, sz1
);
5227 reduce_with_shift
= have_whole_vector_shift (mode1
);
5228 if (!VECTOR_MODE_P (mode1
))
5229 reduce_with_shift
= false;
5232 optab optab
= optab_for_tree_code (code
, vectype1
, optab_default
);
5233 if (optab_handler (optab
, mode1
) == CODE_FOR_nothing
)
5234 reduce_with_shift
= false;
5237 /* First reduce the vector to the desired vector size we should
5238 do shift reduction on by combining upper and lower halves. */
5239 new_temp
= new_phi_result
;
5242 gcc_assert (!slp_reduc
);
5244 vectype1
= get_vectype_for_scalar_type_and_size (scalar_type
, sz
);
5246 /* The target has to make sure we support lowpart/highpart
5247 extraction, either via direct vector extract or through
5248 an integer mode punning. */
5250 if (convert_optab_handler (vec_extract_optab
,
5251 TYPE_MODE (TREE_TYPE (new_temp
)),
5252 TYPE_MODE (vectype1
))
5253 != CODE_FOR_nothing
)
5255 /* Extract sub-vectors directly once vec_extract becomes
5256 a conversion optab. */
5257 dst1
= make_ssa_name (vectype1
);
5259 = gimple_build_assign (dst1
, BIT_FIELD_REF
,
5260 build3 (BIT_FIELD_REF
, vectype1
,
5261 new_temp
, TYPE_SIZE (vectype1
),
5263 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5264 dst2
= make_ssa_name (vectype1
);
5266 = gimple_build_assign (dst2
, BIT_FIELD_REF
,
5267 build3 (BIT_FIELD_REF
, vectype1
,
5268 new_temp
, TYPE_SIZE (vectype1
),
5269 bitsize_int (sz
* BITS_PER_UNIT
)));
5270 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5274 /* Extract via punning to appropriately sized integer mode
5276 tree eltype
= build_nonstandard_integer_type (sz
* BITS_PER_UNIT
,
5278 tree etype
= build_vector_type (eltype
, 2);
5279 gcc_assert (convert_optab_handler (vec_extract_optab
,
5282 != CODE_FOR_nothing
);
5283 tree tem
= make_ssa_name (etype
);
5284 epilog_stmt
= gimple_build_assign (tem
, VIEW_CONVERT_EXPR
,
5285 build1 (VIEW_CONVERT_EXPR
,
5287 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5289 tem
= make_ssa_name (eltype
);
5291 = gimple_build_assign (tem
, BIT_FIELD_REF
,
5292 build3 (BIT_FIELD_REF
, eltype
,
5293 new_temp
, TYPE_SIZE (eltype
),
5295 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5296 dst1
= make_ssa_name (vectype1
);
5297 epilog_stmt
= gimple_build_assign (dst1
, VIEW_CONVERT_EXPR
,
5298 build1 (VIEW_CONVERT_EXPR
,
5300 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5301 tem
= make_ssa_name (eltype
);
5303 = gimple_build_assign (tem
, BIT_FIELD_REF
,
5304 build3 (BIT_FIELD_REF
, eltype
,
5305 new_temp
, TYPE_SIZE (eltype
),
5306 bitsize_int (sz
* BITS_PER_UNIT
)));
5307 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5308 dst2
= make_ssa_name (vectype1
);
5309 epilog_stmt
= gimple_build_assign (dst2
, VIEW_CONVERT_EXPR
,
5310 build1 (VIEW_CONVERT_EXPR
,
5312 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5315 new_temp
= make_ssa_name (vectype1
);
5316 epilog_stmt
= gimple_build_assign (new_temp
, code
, dst1
, dst2
);
5317 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5320 if (reduce_with_shift
&& !slp_reduc
)
5322 int element_bitsize
= tree_to_uhwi (bitsize
);
5323 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5324 for variable-length vectors and also requires direct target support
5325 for loop reductions. */
5326 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype1
));
5327 int nelements
= vec_size_in_bits
/ element_bitsize
;
5328 vec_perm_builder sel
;
5329 vec_perm_indices indices
;
5333 tree zero_vec
= build_zero_cst (vectype1
);
5335 for (offset = nelements/2; offset >= 1; offset/=2)
5337 Create: va' = vec_shift <va, offset>
5338 Create: va = vop <va, va'>
5343 if (dump_enabled_p ())
5344 dump_printf_loc (MSG_NOTE
, vect_location
,
5345 "Reduce using vector shifts\n");
5347 mode1
= TYPE_MODE (vectype1
);
5348 vec_dest
= vect_create_destination_var (scalar_dest
, vectype1
);
5349 for (elt_offset
= nelements
/ 2;
5353 calc_vec_perm_mask_for_shift (elt_offset
, nelements
, &sel
);
5354 indices
.new_vector (sel
, 2, nelements
);
5355 tree mask
= vect_gen_perm_mask_any (vectype1
, indices
);
5356 epilog_stmt
= gimple_build_assign (vec_dest
, VEC_PERM_EXPR
,
5357 new_temp
, zero_vec
, mask
);
5358 new_name
= make_ssa_name (vec_dest
, epilog_stmt
);
5359 gimple_assign_set_lhs (epilog_stmt
, new_name
);
5360 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5362 epilog_stmt
= gimple_build_assign (vec_dest
, code
, new_name
,
5364 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
5365 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5366 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5369 /* 2.4 Extract the final scalar result. Create:
5370 s_out3 = extract_field <v_out2, bitpos> */
5372 if (dump_enabled_p ())
5373 dump_printf_loc (MSG_NOTE
, vect_location
,
5374 "extract scalar result\n");
5376 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
,
5377 bitsize
, bitsize_zero_node
);
5378 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5379 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5380 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5381 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5382 scalar_results
.safe_push (new_temp
);
5387 s = extract_field <v_out2, 0>
5388 for (offset = element_size;
5389 offset < vector_size;
5390 offset += element_size;)
5392 Create: s' = extract_field <v_out2, offset>
5393 Create: s = op <s, s'> // For non SLP cases
5396 if (dump_enabled_p ())
5397 dump_printf_loc (MSG_NOTE
, vect_location
,
5398 "Reduce using scalar code.\n");
5400 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype1
));
5401 int element_bitsize
= tree_to_uhwi (bitsize
);
5402 FOR_EACH_VEC_ELT (new_phis
, i
, new_phi
)
5405 if (gimple_code (new_phi
) == GIMPLE_PHI
)
5406 vec_temp
= PHI_RESULT (new_phi
);
5408 vec_temp
= gimple_assign_lhs (new_phi
);
5409 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
, bitsize
,
5411 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5412 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5413 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5414 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5416 /* In SLP we don't need to apply reduction operation, so we just
5417 collect s' values in SCALAR_RESULTS. */
5419 scalar_results
.safe_push (new_temp
);
5421 for (bit_offset
= element_bitsize
;
5422 bit_offset
< vec_size_in_bits
;
5423 bit_offset
+= element_bitsize
)
5425 tree bitpos
= bitsize_int (bit_offset
);
5426 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
,
5429 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5430 new_name
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5431 gimple_assign_set_lhs (epilog_stmt
, new_name
);
5432 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5436 /* In SLP we don't need to apply reduction operation, so
5437 we just collect s' values in SCALAR_RESULTS. */
5438 new_temp
= new_name
;
5439 scalar_results
.safe_push (new_name
);
5443 epilog_stmt
= gimple_build_assign (new_scalar_dest
, code
,
5444 new_name
, new_temp
);
5445 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5446 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5447 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5452 /* The only case where we need to reduce scalar results in SLP, is
5453 unrolling. If the size of SCALAR_RESULTS is greater than
5454 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5455 REDUC_GROUP_SIZE. */
5458 tree res
, first_res
, new_res
;
5461 /* Reduce multiple scalar results in case of SLP unrolling. */
5462 for (j
= group_size
; scalar_results
.iterate (j
, &res
);
5465 first_res
= scalar_results
[j
% group_size
];
5466 new_stmt
= gimple_build_assign (new_scalar_dest
, code
,
5468 new_res
= make_ssa_name (new_scalar_dest
, new_stmt
);
5469 gimple_assign_set_lhs (new_stmt
, new_res
);
5470 gsi_insert_before (&exit_gsi
, new_stmt
, GSI_SAME_STMT
);
5471 scalar_results
[j
% group_size
] = new_res
;
5475 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5476 scalar_results
.safe_push (new_temp
);
5479 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5480 == INTEGER_INDUC_COND_REDUCTION
)
5481 && !operand_equal_p (initial_def
, induc_val
, 0))
5483 /* Earlier we set the initial value to be a vector if induc_val
5484 values. Check the result and if it is induc_val then replace
5485 with the original initial value, unless induc_val is
5486 the same as initial_def already. */
5487 tree zcompare
= build2 (EQ_EXPR
, boolean_type_node
, new_temp
,
5490 tree tmp
= make_ssa_name (new_scalar_dest
);
5491 epilog_stmt
= gimple_build_assign (tmp
, COND_EXPR
, zcompare
,
5492 initial_def
, new_temp
);
5493 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5494 scalar_results
[0] = tmp
;
5498 vect_finalize_reduction
:
5503 /* 2.5 Adjust the final result by the initial value of the reduction
5504 variable. (When such adjustment is not needed, then
5505 'adjustment_def' is zero). For example, if code is PLUS we create:
5506 new_temp = loop_exit_def + adjustment_def */
5510 gcc_assert (!slp_reduc
);
5511 if (nested_in_vect_loop
)
5513 new_phi
= new_phis
[0];
5514 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) == VECTOR_TYPE
);
5515 expr
= build2 (code
, vectype
, PHI_RESULT (new_phi
), adjustment_def
);
5516 new_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5520 new_temp
= scalar_results
[0];
5521 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) != VECTOR_TYPE
);
5522 expr
= build2 (code
, scalar_type
, new_temp
, adjustment_def
);
5523 new_dest
= vect_create_destination_var (scalar_dest
, scalar_type
);
5526 epilog_stmt
= gimple_build_assign (new_dest
, expr
);
5527 new_temp
= make_ssa_name (new_dest
, epilog_stmt
);
5528 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5529 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5530 if (nested_in_vect_loop
)
5532 stmt_vec_info epilog_stmt_info
= loop_vinfo
->add_stmt (epilog_stmt
);
5533 STMT_VINFO_RELATED_STMT (epilog_stmt_info
)
5534 = STMT_VINFO_RELATED_STMT (loop_vinfo
->lookup_stmt (new_phi
));
5537 scalar_results
.quick_push (new_temp
);
5539 scalar_results
[0] = new_temp
;
5542 scalar_results
[0] = new_temp
;
5544 new_phis
[0] = epilog_stmt
;
5547 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5548 phis with new adjusted scalar results, i.e., replace use <s_out0>
5553 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5554 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5555 v_out2 = reduce <v_out1>
5556 s_out3 = extract_field <v_out2, 0>
5557 s_out4 = adjust_result <s_out3>
5564 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5565 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5566 v_out2 = reduce <v_out1>
5567 s_out3 = extract_field <v_out2, 0>
5568 s_out4 = adjust_result <s_out3>
5573 /* In SLP reduction chain we reduce vector results into one vector if
5574 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5575 LHS of the last stmt in the reduction chain, since we are looking for
5576 the loop exit phi node. */
5577 if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
5579 gimple
*dest_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1];
5580 /* Handle reduction patterns. */
5581 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt
)))
5582 dest_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt
));
5584 scalar_dest
= gimple_assign_lhs (dest_stmt
);
5588 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5589 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5590 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5591 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5592 correspond to the first vector stmt, etc.
5593 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5594 if (group_size
> new_phis
.length ())
5596 ratio
= group_size
/ new_phis
.length ();
5597 gcc_assert (!(group_size
% new_phis
.length ()));
5602 for (k
= 0; k
< group_size
; k
++)
5606 epilog_stmt
= new_phis
[k
/ ratio
];
5607 reduction_phi
= reduction_phis
[k
/ ratio
];
5609 inner_phi
= inner_phis
[k
/ ratio
];
5614 gimple
*current_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[k
];
5616 orig_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt
));
5617 /* SLP statements can't participate in patterns. */
5618 gcc_assert (!orig_stmt
);
5619 scalar_dest
= gimple_assign_lhs (current_stmt
);
5623 /* Find the loop-closed-use at the loop exit of the original scalar
5624 result. (The reduction result is expected to have two immediate uses -
5625 one at the latch block, and one at the loop exit). */
5626 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5627 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
)))
5628 && !is_gimple_debug (USE_STMT (use_p
)))
5629 phis
.safe_push (USE_STMT (use_p
));
5631 /* While we expect to have found an exit_phi because of loop-closed-ssa
5632 form we can end up without one if the scalar cycle is dead. */
5634 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
5638 stmt_vec_info exit_phi_vinfo
5639 = loop_vinfo
->lookup_stmt (exit_phi
);
5642 /* FORNOW. Currently not supporting the case that an inner-loop
5643 reduction is not used in the outer-loop (but only outside the
5644 outer-loop), unless it is double reduction. */
5645 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
5646 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
))
5650 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = inner_phi
;
5652 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = epilog_stmt
;
5654 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo
)
5655 != vect_double_reduction_def
)
5658 /* Handle double reduction:
5660 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5661 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5662 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5663 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5665 At that point the regular reduction (stmt2 and stmt3) is
5666 already vectorized, as well as the exit phi node, stmt4.
5667 Here we vectorize the phi node of double reduction, stmt1, and
5668 update all relevant statements. */
5670 /* Go through all the uses of s2 to find double reduction phi
5671 node, i.e., stmt1 above. */
5672 orig_name
= PHI_RESULT (exit_phi
);
5673 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
5675 stmt_vec_info use_stmt_vinfo
;
5676 tree vect_phi_init
, preheader_arg
, vect_phi_res
;
5677 basic_block bb
= gimple_bb (use_stmt
);
5680 /* Check that USE_STMT is really double reduction phi
5682 if (gimple_code (use_stmt
) != GIMPLE_PHI
5683 || gimple_phi_num_args (use_stmt
) != 2
5684 || bb
->loop_father
!= outer_loop
)
5686 use_stmt_vinfo
= loop_vinfo
->lookup_stmt (use_stmt
);
5688 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo
)
5689 != vect_double_reduction_def
)
5692 /* Create vector phi node for double reduction:
5693 vs1 = phi <vs0, vs2>
5694 vs1 was created previously in this function by a call to
5695 vect_get_vec_def_for_operand and is stored in
5697 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5698 vs0 is created here. */
5700 /* Create vector phi node. */
5701 vect_phi
= create_phi_node (vec_initial_def
, bb
);
5702 loop_vec_info_for_loop (outer_loop
)->add_stmt (vect_phi
);
5704 /* Create vs0 - initial def of the double reduction phi. */
5705 preheader_arg
= PHI_ARG_DEF_FROM_EDGE (use_stmt
,
5706 loop_preheader_edge (outer_loop
));
5707 vect_phi_init
= get_initial_def_for_reduction
5708 (stmt
, preheader_arg
, NULL
);
5710 /* Update phi node arguments with vs0 and vs2. */
5711 add_phi_arg (vect_phi
, vect_phi_init
,
5712 loop_preheader_edge (outer_loop
),
5714 add_phi_arg (vect_phi
, PHI_RESULT (inner_phi
),
5715 loop_latch_edge (outer_loop
), UNKNOWN_LOCATION
);
5716 if (dump_enabled_p ())
5718 dump_printf_loc (MSG_NOTE
, vect_location
,
5719 "created double reduction phi node: ");
5720 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vect_phi
, 0);
5723 vect_phi_res
= PHI_RESULT (vect_phi
);
5725 /* Replace the use, i.e., set the correct vs1 in the regular
5726 reduction phi node. FORNOW, NCOPIES is always 1, so the
5727 loop is redundant. */
5728 use
= reduction_phi
;
5729 for (j
= 0; j
< ncopies
; j
++)
5731 edge pr_edge
= loop_preheader_edge (loop
);
5732 SET_PHI_ARG_DEF (use
, pr_edge
->dest_idx
, vect_phi_res
);
5733 use
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use
));
5740 if (nested_in_vect_loop
)
5749 /* Find the loop-closed-use at the loop exit of the original scalar
5750 result. (The reduction result is expected to have two immediate uses,
5751 one at the latch block, and one at the loop exit). For double
5752 reductions we are looking for exit phis of the outer loop. */
5753 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5755 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
5757 if (!is_gimple_debug (USE_STMT (use_p
)))
5758 phis
.safe_push (USE_STMT (use_p
));
5762 if (double_reduc
&& gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
)
5764 tree phi_res
= PHI_RESULT (USE_STMT (use_p
));
5766 FOR_EACH_IMM_USE_FAST (phi_use_p
, phi_imm_iter
, phi_res
)
5768 if (!flow_bb_inside_loop_p (loop
,
5769 gimple_bb (USE_STMT (phi_use_p
)))
5770 && !is_gimple_debug (USE_STMT (phi_use_p
)))
5771 phis
.safe_push (USE_STMT (phi_use_p
));
5777 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
5779 /* Replace the uses: */
5780 orig_name
= PHI_RESULT (exit_phi
);
5781 scalar_result
= scalar_results
[k
];
5782 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
5783 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
5784 SET_USE (use_p
, scalar_result
);
5791 /* Return a vector of type VECTYPE that is equal to the vector select
5792 operation "MASK ? VEC : IDENTITY". Insert the select statements
5796 merge_with_identity (gimple_stmt_iterator
*gsi
, tree mask
, tree vectype
,
5797 tree vec
, tree identity
)
5799 tree cond
= make_temp_ssa_name (vectype
, NULL
, "cond");
5800 gimple
*new_stmt
= gimple_build_assign (cond
, VEC_COND_EXPR
,
5801 mask
, vec
, identity
);
5802 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
5806 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5807 order, starting with LHS. Insert the extraction statements before GSI and
5808 associate the new scalar SSA names with variable SCALAR_DEST.
5809 Return the SSA name for the result. */
5812 vect_expand_fold_left (gimple_stmt_iterator
*gsi
, tree scalar_dest
,
5813 tree_code code
, tree lhs
, tree vector_rhs
)
5815 tree vectype
= TREE_TYPE (vector_rhs
);
5816 tree scalar_type
= TREE_TYPE (vectype
);
5817 tree bitsize
= TYPE_SIZE (scalar_type
);
5818 unsigned HOST_WIDE_INT vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
5819 unsigned HOST_WIDE_INT element_bitsize
= tree_to_uhwi (bitsize
);
5821 for (unsigned HOST_WIDE_INT bit_offset
= 0;
5822 bit_offset
< vec_size_in_bits
;
5823 bit_offset
+= element_bitsize
)
5825 tree bitpos
= bitsize_int (bit_offset
);
5826 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vector_rhs
,
5829 gassign
*stmt
= gimple_build_assign (scalar_dest
, rhs
);
5830 rhs
= make_ssa_name (scalar_dest
, stmt
);
5831 gimple_assign_set_lhs (stmt
, rhs
);
5832 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
5834 stmt
= gimple_build_assign (scalar_dest
, code
, lhs
, rhs
);
5835 tree new_name
= make_ssa_name (scalar_dest
, stmt
);
5836 gimple_assign_set_lhs (stmt
, new_name
);
5837 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
5843 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the
5844 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5845 statement. CODE is the operation performed by STMT and OPS are
5846 its scalar operands. REDUC_INDEX is the index of the operand in
5847 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5848 implements in-order reduction, or IFN_LAST if we should open-code it.
5849 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5850 that should be used to control the operation in a fully-masked loop. */
5853 vectorize_fold_left_reduction (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5854 gimple
**vec_stmt
, slp_tree slp_node
,
5855 gimple
*reduc_def_stmt
,
5856 tree_code code
, internal_fn reduc_fn
,
5857 tree ops
[3], tree vectype_in
,
5858 int reduc_index
, vec_loop_masks
*masks
)
5860 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5861 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5862 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5863 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5864 gimple
*new_stmt
= NULL
;
5870 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
5872 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5873 gcc_assert (ncopies
== 1);
5874 gcc_assert (TREE_CODE_LENGTH (code
) == binary_op
);
5875 gcc_assert (reduc_index
== (code
== MINUS_EXPR
? 0 : 1));
5876 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5877 == FOLD_LEFT_REDUCTION
);
5880 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out
),
5881 TYPE_VECTOR_SUBPARTS (vectype_in
)));
5883 tree op0
= ops
[1 - reduc_index
];
5886 gimple
*scalar_dest_def
;
5887 auto_vec
<tree
> vec_oprnds0
;
5890 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
5891 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
5892 scalar_dest_def
= SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1];
5896 tree loop_vec_def0
= vect_get_vec_def_for_operand (op0
, stmt
);
5897 vec_oprnds0
.create (1);
5898 vec_oprnds0
.quick_push (loop_vec_def0
);
5899 scalar_dest_def
= stmt
;
5902 tree scalar_dest
= gimple_assign_lhs (scalar_dest_def
);
5903 tree scalar_type
= TREE_TYPE (scalar_dest
);
5904 tree reduc_var
= gimple_phi_result (reduc_def_stmt
);
5906 int vec_num
= vec_oprnds0
.length ();
5907 gcc_assert (vec_num
== 1 || slp_node
);
5908 tree vec_elem_type
= TREE_TYPE (vectype_out
);
5909 gcc_checking_assert (useless_type_conversion_p (scalar_type
, vec_elem_type
));
5911 tree vector_identity
= NULL_TREE
;
5912 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
5913 vector_identity
= build_zero_cst (vectype_out
);
5915 tree scalar_dest_var
= vect_create_destination_var (scalar_dest
, NULL
);
5918 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
5920 tree mask
= NULL_TREE
;
5921 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
5922 mask
= vect_get_loop_mask (gsi
, masks
, vec_num
, vectype_in
, i
);
5924 /* Handle MINUS by adding the negative. */
5925 if (reduc_fn
!= IFN_LAST
&& code
== MINUS_EXPR
)
5927 tree negated
= make_ssa_name (vectype_out
);
5928 new_stmt
= gimple_build_assign (negated
, NEGATE_EXPR
, def0
);
5929 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
5934 def0
= merge_with_identity (gsi
, mask
, vectype_out
, def0
,
5937 /* On the first iteration the input is simply the scalar phi
5938 result, and for subsequent iterations it is the output of
5939 the preceding operation. */
5940 if (reduc_fn
!= IFN_LAST
)
5942 new_stmt
= gimple_build_call_internal (reduc_fn
, 2, reduc_var
, def0
);
5943 /* For chained SLP reductions the output of the previous reduction
5944 operation serves as the input of the next. For the final statement
5945 the output cannot be a temporary - we reuse the original
5946 scalar destination of the last statement. */
5947 if (i
!= vec_num
- 1)
5949 gimple_set_lhs (new_stmt
, scalar_dest_var
);
5950 reduc_var
= make_ssa_name (scalar_dest_var
, new_stmt
);
5951 gimple_set_lhs (new_stmt
, reduc_var
);
5956 reduc_var
= vect_expand_fold_left (gsi
, scalar_dest_var
, code
,
5958 new_stmt
= SSA_NAME_DEF_STMT (reduc_var
);
5959 /* Remove the statement, so that we can use the same code paths
5960 as for statements that we've just created. */
5961 gimple_stmt_iterator tmp_gsi
= gsi_for_stmt (new_stmt
);
5962 gsi_remove (&tmp_gsi
, false);
5965 if (i
== vec_num
- 1)
5967 gimple_set_lhs (new_stmt
, scalar_dest
);
5968 vect_finish_replace_stmt (scalar_dest_def
, new_stmt
);
5971 vect_finish_stmt_generation (scalar_dest_def
, new_stmt
, gsi
);
5974 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5978 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5983 /* Function is_nonwrapping_integer_induction.
5985 Check if STMT (which is part of loop LOOP) both increments and
5986 does not cause overflow. */
5989 is_nonwrapping_integer_induction (gimple
*stmt
, struct loop
*loop
)
5991 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
5992 tree base
= STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
);
5993 tree step
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
);
5994 tree lhs_type
= TREE_TYPE (gimple_phi_result (stmt
));
5995 widest_int ni
, max_loop_value
, lhs_max
;
5996 wi::overflow_type overflow
= wi::OVF_NONE
;
5998 /* Make sure the loop is integer based. */
5999 if (TREE_CODE (base
) != INTEGER_CST
6000 || TREE_CODE (step
) != INTEGER_CST
)
6003 /* Check that the max size of the loop will not wrap. */
6005 if (TYPE_OVERFLOW_UNDEFINED (lhs_type
))
6008 if (! max_stmt_executions (loop
, &ni
))
6011 max_loop_value
= wi::mul (wi::to_widest (step
), ni
, TYPE_SIGN (lhs_type
),
6016 max_loop_value
= wi::add (wi::to_widest (base
), max_loop_value
,
6017 TYPE_SIGN (lhs_type
), &overflow
);
6021 return (wi::min_precision (max_loop_value
, TYPE_SIGN (lhs_type
))
6022 <= TYPE_PRECISION (lhs_type
));
6025 /* Function vectorizable_reduction.
6027 Check if STMT performs a reduction operation that can be vectorized.
6028 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6029 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6030 Return FALSE if not a vectorizable STMT, TRUE otherwise.
6032 This function also handles reduction idioms (patterns) that have been
6033 recognized in advance during vect_pattern_recog. In this case, STMT may be
6035 X = pattern_expr (arg0, arg1, ..., X)
6036 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
6037 sequence that had been detected and replaced by the pattern-stmt (STMT).
6039 This function also handles reduction of condition expressions, for example:
6040 for (int i = 0; i < N; i++)
6043 This is handled by vectorising the loop and creating an additional vector
6044 containing the loop indexes for which "a[i] < value" was true. In the
6045 function epilogue this is reduced to a single max value and then used to
6046 index into the vector of results.
6048 In some cases of reduction patterns, the type of the reduction variable X is
6049 different than the type of the other arguments of STMT.
6050 In such cases, the vectype that is used when transforming STMT into a vector
6051 stmt is different than the vectype that is used to determine the
6052 vectorization factor, because it consists of a different number of elements
6053 than the actual number of elements that are being operated upon in parallel.
6055 For example, consider an accumulation of shorts into an int accumulator.
6056 On some targets it's possible to vectorize this pattern operating on 8
6057 shorts at a time (hence, the vectype for purposes of determining the
6058 vectorization factor should be V8HI); on the other hand, the vectype that
6059 is used to create the vector form is actually V4SI (the type of the result).
6061 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6062 indicates what is the actual level of parallelism (V8HI in the example), so
6063 that the right vectorization factor would be derived. This vectype
6064 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6065 be used to create the vectorized stmt. The right vectype for the vectorized
6066 stmt is obtained from the type of the result X:
6067 get_vectype_for_scalar_type (TREE_TYPE (X))
6069 This means that, contrary to "regular" reductions (or "regular" stmts in
6070 general), the following equation:
6071 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6072 does *NOT* necessarily hold for reduction patterns. */
6075 vectorizable_reduction (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
6076 gimple
**vec_stmt
, slp_tree slp_node
,
6077 slp_instance slp_node_instance
,
6078 stmt_vector_for_cost
*cost_vec
)
6082 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6083 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
6084 tree vectype_in
= NULL_TREE
;
6085 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6086 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6087 enum tree_code code
, orig_code
;
6088 internal_fn reduc_fn
;
6089 machine_mode vec_mode
;
6092 tree new_temp
= NULL_TREE
;
6093 enum vect_def_type dt
, cond_reduc_dt
= vect_unknown_def_type
;
6094 gimple
*cond_reduc_def_stmt
= NULL
;
6095 enum tree_code cond_reduc_op_code
= ERROR_MARK
;
6099 stmt_vec_info orig_stmt_info
= NULL
;
6103 stmt_vec_info prev_stmt_info
, prev_phi_info
;
6104 bool single_defuse_cycle
= false;
6105 gimple
*new_stmt
= NULL
;
6108 enum vect_def_type dts
[3];
6109 bool nested_cycle
= false, found_nested_cycle_def
= false;
6110 bool double_reduc
= false;
6112 struct loop
* def_stmt_loop
;
6114 auto_vec
<tree
> vec_oprnds0
;
6115 auto_vec
<tree
> vec_oprnds1
;
6116 auto_vec
<tree
> vec_oprnds2
;
6117 auto_vec
<tree
> vect_defs
;
6118 auto_vec
<gimple
*> phis
;
6121 tree cr_index_scalar_type
= NULL_TREE
, cr_index_vector_type
= NULL_TREE
;
6122 tree cond_reduc_val
= NULL_TREE
;
6124 /* Make sure it was already recognized as a reduction computation. */
6125 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) != vect_reduction_def
6126 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) != vect_nested_cycle
)
6129 if (nested_in_vect_loop_p (loop
, stmt
))
6132 nested_cycle
= true;
6135 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info
))
6136 gcc_assert (slp_node
&& REDUC_GROUP_FIRST_ELEMENT (stmt_info
) == stmt
);
6138 if (gimple_code (stmt
) == GIMPLE_PHI
)
6140 tree phi_result
= gimple_phi_result (stmt
);
6141 /* Analysis is fully done on the reduction stmt invocation. */
6145 slp_node_instance
->reduc_phis
= slp_node
;
6147 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
6151 if (STMT_VINFO_REDUC_TYPE (stmt_info
) == FOLD_LEFT_REDUCTION
)
6152 /* Leave the scalar phi in place. Note that checking
6153 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6154 for reductions involving a single statement. */
6157 gimple
*reduc_stmt
= STMT_VINFO_REDUC_DEF (stmt_info
);
6158 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (reduc_stmt
)))
6159 reduc_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt
));
6161 stmt_vec_info reduc_stmt_info
= vinfo_for_stmt (reduc_stmt
);
6162 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info
)
6163 == EXTRACT_LAST_REDUCTION
)
6164 /* Leave the scalar phi in place. */
6167 gcc_assert (is_gimple_assign (reduc_stmt
));
6168 for (unsigned k
= 1; k
< gimple_num_ops (reduc_stmt
); ++k
)
6170 tree op
= gimple_op (reduc_stmt
, k
);
6171 if (op
== gimple_phi_result (stmt
))
6174 && gimple_assign_rhs_code (reduc_stmt
) == COND_EXPR
)
6177 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in
)))
6178 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op
)))))
6179 vectype_in
= get_vectype_for_scalar_type (TREE_TYPE (op
));
6182 gcc_assert (vectype_in
);
6187 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
6189 stmt_vec_info use_stmt_info
;
6191 && STMT_VINFO_RELEVANT (reduc_stmt_info
) <= vect_used_only_live
6192 && (use_stmt_info
= loop_vinfo
->lookup_single_use (phi_result
))
6193 && (use_stmt_info
== reduc_stmt_info
6194 || STMT_VINFO_RELATED_STMT (use_stmt_info
) == reduc_stmt
))
6195 single_defuse_cycle
= true;
6197 /* Create the destination vector */
6198 scalar_dest
= gimple_assign_lhs (reduc_stmt
);
6199 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
6202 /* The size vect_schedule_slp_instance computes is off for us. */
6203 vec_num
= vect_get_num_vectors
6204 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6205 * SLP_TREE_SCALAR_STMTS (slp_node
).length (),
6210 /* Generate the reduction PHIs upfront. */
6211 prev_phi_info
= NULL
;
6212 for (j
= 0; j
< ncopies
; j
++)
6214 if (j
== 0 || !single_defuse_cycle
)
6216 for (i
= 0; i
< vec_num
; i
++)
6218 /* Create the reduction-phi that defines the reduction
6220 gimple
*new_phi
= create_phi_node (vec_dest
, loop
->header
);
6221 stmt_vec_info new_phi_info
= loop_vinfo
->add_stmt (new_phi
);
6224 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_phi
);
6228 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_phi
;
6230 STMT_VINFO_RELATED_STMT (prev_phi_info
) = new_phi
;
6231 prev_phi_info
= new_phi_info
;
6240 /* 1. Is vectorizable reduction? */
6241 /* Not supportable if the reduction variable is used in the loop, unless
6242 it's a reduction chain. */
6243 if (STMT_VINFO_RELEVANT (stmt_info
) > vect_used_in_outer
6244 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info
))
6247 /* Reductions that are not used even in an enclosing outer-loop,
6248 are expected to be "live" (used out of the loop). */
6249 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
6250 && !STMT_VINFO_LIVE_P (stmt_info
))
6253 /* 2. Has this been recognized as a reduction pattern?
6255 Check if STMT represents a pattern that has been recognized
6256 in earlier analysis stages. For stmts that represent a pattern,
6257 the STMT_VINFO_RELATED_STMT field records the last stmt in
6258 the original sequence that constitutes the pattern. */
6260 orig_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
6263 orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
6264 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
6265 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info
));
6268 /* 3. Check the operands of the operation. The first operands are defined
6269 inside the loop body. The last operand is the reduction variable,
6270 which is defined by the loop-header-phi. */
6272 gcc_assert (is_gimple_assign (stmt
));
6275 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
6277 case GIMPLE_BINARY_RHS
:
6278 code
= gimple_assign_rhs_code (stmt
);
6279 op_type
= TREE_CODE_LENGTH (code
);
6280 gcc_assert (op_type
== binary_op
);
6281 ops
[0] = gimple_assign_rhs1 (stmt
);
6282 ops
[1] = gimple_assign_rhs2 (stmt
);
6285 case GIMPLE_TERNARY_RHS
:
6286 code
= gimple_assign_rhs_code (stmt
);
6287 op_type
= TREE_CODE_LENGTH (code
);
6288 gcc_assert (op_type
== ternary_op
);
6289 ops
[0] = gimple_assign_rhs1 (stmt
);
6290 ops
[1] = gimple_assign_rhs2 (stmt
);
6291 ops
[2] = gimple_assign_rhs3 (stmt
);
6294 case GIMPLE_UNARY_RHS
:
6301 if (code
== COND_EXPR
&& slp_node
)
6304 scalar_dest
= gimple_assign_lhs (stmt
);
6305 scalar_type
= TREE_TYPE (scalar_dest
);
6306 if (!POINTER_TYPE_P (scalar_type
) && !INTEGRAL_TYPE_P (scalar_type
)
6307 && !SCALAR_FLOAT_TYPE_P (scalar_type
))
6310 /* Do not try to vectorize bit-precision reductions. */
6311 if (!type_has_mode_precision_p (scalar_type
))
6314 /* All uses but the last are expected to be defined in the loop.
6315 The last use is the reduction variable. In case of nested cycle this
6316 assumption is not true: we use reduc_index to record the index of the
6317 reduction variable. */
6318 gimple
*reduc_def_stmt
= NULL
;
6319 int reduc_index
= -1;
6320 for (i
= 0; i
< op_type
; i
++)
6322 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6323 if (i
== 0 && code
== COND_EXPR
)
6326 stmt_vec_info def_stmt_info
;
6327 is_simple_use
= vect_is_simple_use (ops
[i
], loop_vinfo
, &dts
[i
], &tem
,
6330 gcc_assert (is_simple_use
);
6331 if (dt
== vect_reduction_def
)
6333 reduc_def_stmt
= def_stmt_info
;
6339 /* To properly compute ncopies we are interested in the widest
6340 input type in case we're looking at a widening accumulation. */
6342 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in
)))
6343 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem
)))))
6347 if (dt
!= vect_internal_def
6348 && dt
!= vect_external_def
6349 && dt
!= vect_constant_def
6350 && dt
!= vect_induction_def
6351 && !(dt
== vect_nested_cycle
&& nested_cycle
))
6354 if (dt
== vect_nested_cycle
)
6356 found_nested_cycle_def
= true;
6357 reduc_def_stmt
= def_stmt_info
;
6361 if (i
== 1 && code
== COND_EXPR
)
6363 /* Record how value of COND_EXPR is defined. */
6364 if (dt
== vect_constant_def
)
6367 cond_reduc_val
= ops
[i
];
6369 if (dt
== vect_induction_def
6371 && is_nonwrapping_integer_induction (def_stmt_info
, loop
))
6374 cond_reduc_def_stmt
= def_stmt_info
;
6380 vectype_in
= vectype_out
;
6382 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6383 directy used in stmt. */
6384 if (reduc_index
== -1)
6386 if (STMT_VINFO_REDUC_TYPE (stmt_info
) == FOLD_LEFT_REDUCTION
)
6388 if (dump_enabled_p ())
6389 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6390 "in-order reduction chain without SLP.\n");
6395 reduc_def_stmt
= STMT_VINFO_REDUC_DEF (orig_stmt_info
);
6397 reduc_def_stmt
= STMT_VINFO_REDUC_DEF (stmt_info
);
6400 if (! reduc_def_stmt
|| gimple_code (reduc_def_stmt
) != GIMPLE_PHI
)
6403 if (!(reduc_index
== -1
6404 || dts
[reduc_index
] == vect_reduction_def
6405 || dts
[reduc_index
] == vect_nested_cycle
6406 || ((dts
[reduc_index
] == vect_internal_def
6407 || dts
[reduc_index
] == vect_external_def
6408 || dts
[reduc_index
] == vect_constant_def
6409 || dts
[reduc_index
] == vect_induction_def
)
6410 && nested_cycle
&& found_nested_cycle_def
)))
6412 /* For pattern recognized stmts, orig_stmt might be a reduction,
6413 but some helper statements for the pattern might not, or
6414 might be COND_EXPRs with reduction uses in the condition. */
6415 gcc_assert (orig_stmt
);
6419 stmt_vec_info reduc_def_info
= vinfo_for_stmt (reduc_def_stmt
);
6420 /* PHIs should not participate in patterns. */
6421 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info
));
6422 enum vect_reduction_type v_reduc_type
6423 = STMT_VINFO_REDUC_TYPE (reduc_def_info
);
6424 gimple
*tmp
= STMT_VINFO_REDUC_DEF (reduc_def_info
);
6426 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = v_reduc_type
;
6427 /* If we have a condition reduction, see if we can simplify it further. */
6428 if (v_reduc_type
== COND_REDUCTION
)
6430 /* TODO: We can't yet handle reduction chains, since we need to treat
6431 each COND_EXPR in the chain specially, not just the last one.
6434 x_1 = PHI <x_3, ...>
6435 x_2 = a_2 ? ... : x_1;
6436 x_3 = a_3 ? ... : x_2;
6438 we're interested in the last element in x_3 for which a_2 || a_3
6439 is true, whereas the current reduction chain handling would
6440 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6441 as a reduction operation. */
6442 if (reduc_index
== -1)
6444 if (dump_enabled_p ())
6445 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6446 "conditional reduction chains not supported\n");
6450 /* vect_is_simple_reduction ensured that operand 2 is the
6451 loop-carried operand. */
6452 gcc_assert (reduc_index
== 2);
6454 /* Loop peeling modifies initial value of reduction PHI, which
6455 makes the reduction stmt to be transformed different to the
6456 original stmt analyzed. We need to record reduction code for
6457 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6458 it can be used directly at transform stage. */
6459 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
) == MAX_EXPR
6460 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
) == MIN_EXPR
)
6462 /* Also set the reduction type to CONST_COND_REDUCTION. */
6463 gcc_assert (cond_reduc_dt
== vect_constant_def
);
6464 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = CONST_COND_REDUCTION
;
6466 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST
,
6467 vectype_in
, OPTIMIZE_FOR_SPEED
))
6469 if (dump_enabled_p ())
6470 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6471 "optimizing condition reduction with"
6472 " FOLD_EXTRACT_LAST.\n");
6473 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = EXTRACT_LAST_REDUCTION
;
6475 else if (cond_reduc_dt
== vect_induction_def
)
6477 stmt_vec_info cond_stmt_vinfo
= vinfo_for_stmt (cond_reduc_def_stmt
);
6479 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo
);
6480 tree step
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo
);
6482 gcc_assert (TREE_CODE (base
) == INTEGER_CST
6483 && TREE_CODE (step
) == INTEGER_CST
);
6484 cond_reduc_val
= NULL_TREE
;
6485 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6486 above base; punt if base is the minimum value of the type for
6487 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6488 if (tree_int_cst_sgn (step
) == -1)
6490 cond_reduc_op_code
= MIN_EXPR
;
6491 if (tree_int_cst_sgn (base
) == -1)
6492 cond_reduc_val
= build_int_cst (TREE_TYPE (base
), 0);
6493 else if (tree_int_cst_lt (base
,
6494 TYPE_MAX_VALUE (TREE_TYPE (base
))))
6496 = int_const_binop (PLUS_EXPR
, base
, integer_one_node
);
6500 cond_reduc_op_code
= MAX_EXPR
;
6501 if (tree_int_cst_sgn (base
) == 1)
6502 cond_reduc_val
= build_int_cst (TREE_TYPE (base
), 0);
6503 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base
)),
6506 = int_const_binop (MINUS_EXPR
, base
, integer_one_node
);
6510 if (dump_enabled_p ())
6511 dump_printf_loc (MSG_NOTE
, vect_location
,
6512 "condition expression based on "
6513 "integer induction.\n");
6514 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
6515 = INTEGER_INDUC_COND_REDUCTION
;
6518 else if (cond_reduc_dt
== vect_constant_def
)
6520 enum vect_def_type cond_initial_dt
;
6521 gimple
*def_stmt
= SSA_NAME_DEF_STMT (ops
[reduc_index
]);
6522 tree cond_initial_val
6523 = PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
6525 gcc_assert (cond_reduc_val
!= NULL_TREE
);
6526 vect_is_simple_use (cond_initial_val
, loop_vinfo
, &cond_initial_dt
);
6527 if (cond_initial_dt
== vect_constant_def
6528 && types_compatible_p (TREE_TYPE (cond_initial_val
),
6529 TREE_TYPE (cond_reduc_val
)))
6531 tree e
= fold_binary (LE_EXPR
, boolean_type_node
,
6532 cond_initial_val
, cond_reduc_val
);
6533 if (e
&& (integer_onep (e
) || integer_zerop (e
)))
6535 if (dump_enabled_p ())
6536 dump_printf_loc (MSG_NOTE
, vect_location
,
6537 "condition expression based on "
6538 "compile time constant.\n");
6539 /* Record reduction code at analysis stage. */
6540 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
)
6541 = integer_onep (e
) ? MAX_EXPR
: MIN_EXPR
;
6542 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
6543 = CONST_COND_REDUCTION
;
6550 gcc_assert (tmp
== orig_stmt
6551 || (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
))
6554 /* We changed STMT to be the first stmt in reduction chain, hence we
6555 check that in this case the first element in the chain is STMT. */
6556 gcc_assert (stmt
== tmp
6557 || REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
)) == stmt
);
6559 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt
)))
6565 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
6567 gcc_assert (ncopies
>= 1);
6569 vec_mode
= TYPE_MODE (vectype_in
);
6570 poly_uint64 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
6572 if (code
== COND_EXPR
)
6574 /* Only call during the analysis stage, otherwise we'll lose
6576 if (!vec_stmt
&& !vectorizable_condition (stmt
, gsi
, NULL
,
6577 ops
[reduc_index
], 0, NULL
,
6580 if (dump_enabled_p ())
6581 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6582 "unsupported condition in reduction\n");
6588 /* 4. Supportable by target? */
6590 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
6591 || code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
6593 /* Shifts and rotates are only supported by vectorizable_shifts,
6594 not vectorizable_reduction. */
6595 if (dump_enabled_p ())
6596 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6597 "unsupported shift or rotation.\n");
6601 /* 4.1. check support for the operation in the loop */
6602 optab
= optab_for_tree_code (code
, vectype_in
, optab_default
);
6605 if (dump_enabled_p ())
6606 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6612 if (optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
6614 if (dump_enabled_p ())
6615 dump_printf (MSG_NOTE
, "op not supported by target.\n");
6617 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
6618 || !vect_worthwhile_without_simd_p (loop_vinfo
, code
))
6621 if (dump_enabled_p ())
6622 dump_printf (MSG_NOTE
, "proceeding using word mode.\n");
6625 /* Worthwhile without SIMD support? */
6626 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in
))
6627 && !vect_worthwhile_without_simd_p (loop_vinfo
, code
))
6629 if (dump_enabled_p ())
6630 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6631 "not worthwhile without SIMD support.\n");
6637 /* 4.2. Check support for the epilog operation.
6639 If STMT represents a reduction pattern, then the type of the
6640 reduction variable may be different than the type of the rest
6641 of the arguments. For example, consider the case of accumulation
6642 of shorts into an int accumulator; The original code:
6643 S1: int_a = (int) short_a;
6644 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6647 STMT: int_acc = widen_sum <short_a, int_acc>
6650 1. The tree-code that is used to create the vector operation in the
6651 epilog code (that reduces the partial results) is not the
6652 tree-code of STMT, but is rather the tree-code of the original
6653 stmt from the pattern that STMT is replacing. I.e, in the example
6654 above we want to use 'widen_sum' in the loop, but 'plus' in the
6656 2. The type (mode) we use to check available target support
6657 for the vector operation to be created in the *epilog*, is
6658 determined by the type of the reduction variable (in the example
6659 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6660 However the type (mode) we use to check available target support
6661 for the vector operation to be created *inside the loop*, is
6662 determined by the type of the other arguments to STMT (in the
6663 example we'd check this: optab_handler (widen_sum_optab,
6666 This is contrary to "regular" reductions, in which the types of all
6667 the arguments are the same as the type of the reduction variable.
6668 For "regular" reductions we can therefore use the same vector type
6669 (and also the same tree-code) when generating the epilog code and
6670 when generating the code inside the loop. */
6672 vect_reduction_type reduction_type
6673 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
6675 && (reduction_type
== TREE_CODE_REDUCTION
6676 || reduction_type
== FOLD_LEFT_REDUCTION
))
6678 /* This is a reduction pattern: get the vectype from the type of the
6679 reduction variable, and get the tree-code from orig_stmt. */
6680 orig_code
= gimple_assign_rhs_code (orig_stmt
);
6681 gcc_assert (vectype_out
);
6682 vec_mode
= TYPE_MODE (vectype_out
);
6686 /* Regular reduction: use the same vectype and tree-code as used for
6687 the vector code inside the loop can be used for the epilog code. */
6690 if (code
== MINUS_EXPR
)
6691 orig_code
= PLUS_EXPR
;
6693 /* For simple condition reductions, replace with the actual expression
6694 we want to base our reduction around. */
6695 if (reduction_type
== CONST_COND_REDUCTION
)
6697 orig_code
= STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
);
6698 gcc_assert (orig_code
== MAX_EXPR
|| orig_code
== MIN_EXPR
);
6700 else if (reduction_type
== INTEGER_INDUC_COND_REDUCTION
)
6701 orig_code
= cond_reduc_op_code
;
6706 def_bb
= gimple_bb (reduc_def_stmt
);
6707 def_stmt_loop
= def_bb
->loop_father
;
6708 def_arg
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
6709 loop_preheader_edge (def_stmt_loop
));
6710 stmt_vec_info def_arg_stmt_info
= loop_vinfo
->lookup_def (def_arg
);
6711 if (def_arg_stmt_info
6712 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info
)
6713 == vect_double_reduction_def
))
6714 double_reduc
= true;
6717 reduc_fn
= IFN_LAST
;
6719 if (reduction_type
== TREE_CODE_REDUCTION
6720 || reduction_type
== FOLD_LEFT_REDUCTION
6721 || reduction_type
== INTEGER_INDUC_COND_REDUCTION
6722 || reduction_type
== CONST_COND_REDUCTION
)
6724 if (reduction_type
== FOLD_LEFT_REDUCTION
6725 ? fold_left_reduction_fn (orig_code
, &reduc_fn
)
6726 : reduction_fn_for_scalar_code (orig_code
, &reduc_fn
))
6728 if (reduc_fn
!= IFN_LAST
6729 && !direct_internal_fn_supported_p (reduc_fn
, vectype_out
,
6730 OPTIMIZE_FOR_SPEED
))
6732 if (dump_enabled_p ())
6733 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6734 "reduc op not supported by target.\n");
6736 reduc_fn
= IFN_LAST
;
6741 if (!nested_cycle
|| double_reduc
)
6743 if (dump_enabled_p ())
6744 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6745 "no reduc code for scalar code.\n");
6751 else if (reduction_type
== COND_REDUCTION
)
6753 int scalar_precision
6754 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type
));
6755 cr_index_scalar_type
= make_unsigned_type (scalar_precision
);
6756 cr_index_vector_type
= build_vector_type (cr_index_scalar_type
,
6759 if (direct_internal_fn_supported_p (IFN_REDUC_MAX
, cr_index_vector_type
,
6760 OPTIMIZE_FOR_SPEED
))
6761 reduc_fn
= IFN_REDUC_MAX
;
6764 if (reduction_type
!= EXTRACT_LAST_REDUCTION
6765 && reduc_fn
== IFN_LAST
6766 && !nunits_out
.is_constant ())
6768 if (dump_enabled_p ())
6769 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6770 "missing target support for reduction on"
6771 " variable-length vectors.\n");
6775 if ((double_reduc
|| reduction_type
!= TREE_CODE_REDUCTION
)
6778 if (dump_enabled_p ())
6779 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6780 "multiple types in double reduction or condition "
6785 /* For SLP reductions, see if there is a neutral value we can use. */
6786 tree neutral_op
= NULL_TREE
;
6788 neutral_op
= neutral_op_for_slp_reduction
6789 (slp_node_instance
->reduc_phis
, code
,
6790 REDUC_GROUP_FIRST_ELEMENT (stmt_info
) != NULL
);
6792 if (double_reduc
&& reduction_type
== FOLD_LEFT_REDUCTION
)
6794 /* We can't support in-order reductions of code such as this:
6796 for (int i = 0; i < n1; ++i)
6797 for (int j = 0; j < n2; ++j)
6800 since GCC effectively transforms the loop when vectorizing:
6802 for (int i = 0; i < n1 / VF; ++i)
6803 for (int j = 0; j < n2; ++j)
6804 for (int k = 0; k < VF; ++k)
6807 which is a reassociation of the original operation. */
6808 if (dump_enabled_p ())
6809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6810 "in-order double reduction not supported.\n");
6815 if (reduction_type
== FOLD_LEFT_REDUCTION
6817 && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
6819 /* We cannot use in-order reductions in this case because there is
6820 an implicit reassociation of the operations involved. */
6821 if (dump_enabled_p ())
6822 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6823 "in-order unchained SLP reductions not supported.\n");
6827 /* For double reductions, and for SLP reductions with a neutral value,
6828 we construct a variable-length initial vector by loading a vector
6829 full of the neutral value and then shift-and-inserting the start
6830 values into the low-numbered elements. */
6831 if ((double_reduc
|| neutral_op
)
6832 && !nunits_out
.is_constant ()
6833 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT
,
6834 vectype_out
, OPTIMIZE_FOR_SPEED
))
6836 if (dump_enabled_p ())
6837 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6838 "reduction on variable-length vectors requires"
6839 " target support for a vector-shift-and-insert"
6844 /* Check extra constraints for variable-length unchained SLP reductions. */
6845 if (STMT_SLP_TYPE (stmt_info
)
6846 && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
6847 && !nunits_out
.is_constant ())
6849 /* We checked above that we could build the initial vector when
6850 there's a neutral element value. Check here for the case in
6851 which each SLP statement has its own initial value and in which
6852 that value needs to be repeated for every instance of the
6853 statement within the initial vector. */
6854 unsigned int group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
6855 scalar_mode elt_mode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype_out
));
6857 && !can_duplicate_and_interleave_p (group_size
, elt_mode
))
6859 if (dump_enabled_p ())
6860 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6861 "unsupported form of SLP reduction for"
6862 " variable-length vectors: cannot build"
6863 " initial vector.\n");
6866 /* The epilogue code relies on the number of elements being a multiple
6867 of the group size. The duplicate-and-interleave approach to setting
6868 up the the initial vector does too. */
6869 if (!multiple_p (nunits_out
, group_size
))
6871 if (dump_enabled_p ())
6872 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6873 "unsupported form of SLP reduction for"
6874 " variable-length vectors: the vector size"
6875 " is not a multiple of the number of results.\n");
6880 /* In case of widenning multiplication by a constant, we update the type
6881 of the constant to be the type of the other operand. We check that the
6882 constant fits the type in the pattern recognition pass. */
6883 if (code
== DOT_PROD_EXPR
6884 && !types_compatible_p (TREE_TYPE (ops
[0]), TREE_TYPE (ops
[1])))
6886 if (TREE_CODE (ops
[0]) == INTEGER_CST
)
6887 ops
[0] = fold_convert (TREE_TYPE (ops
[1]), ops
[0]);
6888 else if (TREE_CODE (ops
[1]) == INTEGER_CST
)
6889 ops
[1] = fold_convert (TREE_TYPE (ops
[0]), ops
[1]);
6892 if (dump_enabled_p ())
6893 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6894 "invalid types in dot-prod\n");
6900 if (reduction_type
== COND_REDUCTION
)
6904 if (! max_loop_iterations (loop
, &ni
))
6906 if (dump_enabled_p ())
6907 dump_printf_loc (MSG_NOTE
, vect_location
,
6908 "loop count not known, cannot create cond "
6912 /* Convert backedges to iterations. */
6915 /* The additional index will be the same type as the condition. Check
6916 that the loop can fit into this less one (because we'll use up the
6917 zero slot for when there are no matches). */
6918 tree max_index
= TYPE_MAX_VALUE (cr_index_scalar_type
);
6919 if (wi::geu_p (ni
, wi::to_widest (max_index
)))
6921 if (dump_enabled_p ())
6922 dump_printf_loc (MSG_NOTE
, vect_location
,
6923 "loop size is greater than data size.\n");
6928 /* In case the vectorization factor (VF) is bigger than the number
6929 of elements that we can fit in a vectype (nunits), we have to generate
6930 more than one vector stmt - i.e - we need to "unroll" the
6931 vector stmt by a factor VF/nunits. For more details see documentation
6932 in vectorizable_operation. */
6934 /* If the reduction is used in an outer loop we need to generate
6935 VF intermediate results, like so (e.g. for ncopies=2):
6940 (i.e. we generate VF results in 2 registers).
6941 In this case we have a separate def-use cycle for each copy, and therefore
6942 for each copy we get the vector def for the reduction variable from the
6943 respective phi node created for this copy.
6945 Otherwise (the reduction is unused in the loop nest), we can combine
6946 together intermediate results, like so (e.g. for ncopies=2):
6950 (i.e. we generate VF/2 results in a single register).
6951 In this case for each copy we get the vector def for the reduction variable
6952 from the vectorized reduction operation generated in the previous iteration.
6954 This only works when we see both the reduction PHI and its only consumer
6955 in vectorizable_reduction and there are no intermediate stmts
6957 stmt_vec_info use_stmt_info
;
6958 tree reduc_phi_result
= gimple_phi_result (reduc_def_stmt
);
6960 && (STMT_VINFO_RELEVANT (stmt_info
) <= vect_used_only_live
)
6961 && (use_stmt_info
= loop_vinfo
->lookup_single_use (reduc_phi_result
))
6962 && (use_stmt_info
== stmt_info
6963 || STMT_VINFO_RELATED_STMT (use_stmt_info
) == stmt
))
6965 single_defuse_cycle
= true;
6969 epilog_copies
= ncopies
;
6971 /* If the reduction stmt is one of the patterns that have lane
6972 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6974 && ! single_defuse_cycle
)
6975 && (code
== DOT_PROD_EXPR
6976 || code
== WIDEN_SUM_EXPR
6977 || code
== SAD_EXPR
))
6979 if (dump_enabled_p ())
6980 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6981 "multi def-use cycle not possible for lane-reducing "
6982 "reduction operation\n");
6987 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6991 internal_fn cond_fn
= get_conditional_internal_fn (code
);
6992 vec_loop_masks
*masks
= &LOOP_VINFO_MASKS (loop_vinfo
);
6994 if (!vec_stmt
) /* transformation not required. */
6996 vect_model_reduction_cost (stmt_info
, reduc_fn
, ncopies
, cost_vec
);
6997 if (loop_vinfo
&& LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
6999 if (reduction_type
!= FOLD_LEFT_REDUCTION
7000 && (cond_fn
== IFN_LAST
7001 || !direct_internal_fn_supported_p (cond_fn
, vectype_in
,
7002 OPTIMIZE_FOR_SPEED
)))
7004 if (dump_enabled_p ())
7005 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7006 "can't use a fully-masked loop because no"
7007 " conditional operation is available.\n");
7008 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7010 else if (reduc_index
== -1)
7012 if (dump_enabled_p ())
7013 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7014 "can't use a fully-masked loop for chained"
7016 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7019 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
* vec_num
,
7022 if (dump_enabled_p ()
7023 && reduction_type
== FOLD_LEFT_REDUCTION
)
7024 dump_printf_loc (MSG_NOTE
, vect_location
,
7025 "using an in-order (fold-left) reduction.\n");
7026 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
7032 if (dump_enabled_p ())
7033 dump_printf_loc (MSG_NOTE
, vect_location
, "transform reduction.\n");
7035 /* FORNOW: Multiple types are not supported for condition. */
7036 if (code
== COND_EXPR
)
7037 gcc_assert (ncopies
== 1);
7039 bool masked_loop_p
= LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
);
7041 if (reduction_type
== FOLD_LEFT_REDUCTION
)
7042 return vectorize_fold_left_reduction
7043 (stmt
, gsi
, vec_stmt
, slp_node
, reduc_def_stmt
, code
,
7044 reduc_fn
, ops
, vectype_in
, reduc_index
, masks
);
7046 if (reduction_type
== EXTRACT_LAST_REDUCTION
)
7048 gcc_assert (!slp_node
);
7049 return vectorizable_condition (stmt
, gsi
, vec_stmt
,
7050 NULL
, reduc_index
, NULL
, NULL
);
7053 /* Create the destination vector */
7054 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
7056 prev_stmt_info
= NULL
;
7057 prev_phi_info
= NULL
;
7060 vec_oprnds0
.create (1);
7061 vec_oprnds1
.create (1);
7062 if (op_type
== ternary_op
)
7063 vec_oprnds2
.create (1);
7066 phis
.create (vec_num
);
7067 vect_defs
.create (vec_num
);
7069 vect_defs
.quick_push (NULL_TREE
);
7072 phis
.splice (SLP_TREE_VEC_STMTS (slp_node_instance
->reduc_phis
));
7074 phis
.quick_push (STMT_VINFO_VEC_STMT (vinfo_for_stmt (reduc_def_stmt
)));
7076 for (j
= 0; j
< ncopies
; j
++)
7078 if (code
== COND_EXPR
)
7080 gcc_assert (!slp_node
);
7081 vectorizable_condition (stmt
, gsi
, vec_stmt
,
7082 PHI_RESULT (phis
[0]),
7083 reduc_index
, NULL
, NULL
);
7084 /* Multiple types are not supported for condition. */
7093 /* Get vec defs for all the operands except the reduction index,
7094 ensuring the ordering of the ops in the vector is kept. */
7095 auto_vec
<tree
, 3> slp_ops
;
7096 auto_vec
<vec
<tree
>, 3> vec_defs
;
7098 slp_ops
.quick_push (ops
[0]);
7099 slp_ops
.quick_push (ops
[1]);
7100 if (op_type
== ternary_op
)
7101 slp_ops
.quick_push (ops
[2]);
7103 vect_get_slp_defs (slp_ops
, slp_node
, &vec_defs
);
7105 vec_oprnds0
.safe_splice (vec_defs
[0]);
7106 vec_defs
[0].release ();
7107 vec_oprnds1
.safe_splice (vec_defs
[1]);
7108 vec_defs
[1].release ();
7109 if (op_type
== ternary_op
)
7111 vec_oprnds2
.safe_splice (vec_defs
[2]);
7112 vec_defs
[2].release ();
7117 vec_oprnds0
.quick_push
7118 (vect_get_vec_def_for_operand (ops
[0], stmt
));
7119 vec_oprnds1
.quick_push
7120 (vect_get_vec_def_for_operand (ops
[1], stmt
));
7121 if (op_type
== ternary_op
)
7122 vec_oprnds2
.quick_push
7123 (vect_get_vec_def_for_operand (ops
[2], stmt
));
7130 gcc_assert (reduc_index
!= -1 || ! single_defuse_cycle
);
7132 if (single_defuse_cycle
&& reduc_index
== 0)
7133 vec_oprnds0
[0] = gimple_get_lhs (new_stmt
);
7136 = vect_get_vec_def_for_stmt_copy (dts
[0], vec_oprnds0
[0]);
7137 if (single_defuse_cycle
&& reduc_index
== 1)
7138 vec_oprnds1
[0] = gimple_get_lhs (new_stmt
);
7141 = vect_get_vec_def_for_stmt_copy (dts
[1], vec_oprnds1
[0]);
7142 if (op_type
== ternary_op
)
7144 if (single_defuse_cycle
&& reduc_index
== 2)
7145 vec_oprnds2
[0] = gimple_get_lhs (new_stmt
);
7148 = vect_get_vec_def_for_stmt_copy (dts
[2], vec_oprnds2
[0]);
7153 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
7155 tree vop
[3] = { def0
, vec_oprnds1
[i
], NULL_TREE
};
7158 /* Make sure that the reduction accumulator is vop[0]. */
7159 if (reduc_index
== 1)
7161 gcc_assert (commutative_tree_code (code
));
7162 std::swap (vop
[0], vop
[1]);
7164 tree mask
= vect_get_loop_mask (gsi
, masks
, vec_num
* ncopies
,
7165 vectype_in
, i
* ncopies
+ j
);
7166 gcall
*call
= gimple_build_call_internal (cond_fn
, 4, mask
,
7169 new_temp
= make_ssa_name (vec_dest
, call
);
7170 gimple_call_set_lhs (call
, new_temp
);
7171 gimple_call_set_nothrow (call
, true);
7176 if (op_type
== ternary_op
)
7177 vop
[2] = vec_oprnds2
[i
];
7179 new_stmt
= gimple_build_assign (vec_dest
, code
,
7180 vop
[0], vop
[1], vop
[2]);
7181 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7182 gimple_assign_set_lhs (new_stmt
, new_temp
);
7184 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7188 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7189 vect_defs
.quick_push (new_temp
);
7192 vect_defs
[0] = new_temp
;
7199 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7201 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7203 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7206 /* Finalize the reduction-phi (set its arguments) and create the
7207 epilog reduction code. */
7208 if ((!single_defuse_cycle
|| code
== COND_EXPR
) && !slp_node
)
7209 vect_defs
[0] = gimple_get_lhs (*vec_stmt
);
7211 vect_create_epilog_for_reduction (vect_defs
, stmt
, reduc_def_stmt
,
7212 epilog_copies
, reduc_fn
, phis
,
7213 double_reduc
, slp_node
, slp_node_instance
,
7214 cond_reduc_val
, cond_reduc_op_code
,
7220 /* Function vect_min_worthwhile_factor.
7222 For a loop where we could vectorize the operation indicated by CODE,
7223 return the minimum vectorization factor that makes it worthwhile
7224 to use generic vectors. */
7226 vect_min_worthwhile_factor (enum tree_code code
)
7246 /* Return true if VINFO indicates we are doing loop vectorization and if
7247 it is worth decomposing CODE operations into scalar operations for
7248 that loop's vectorization factor. */
7251 vect_worthwhile_without_simd_p (vec_info
*vinfo
, tree_code code
)
7253 loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
);
7254 unsigned HOST_WIDE_INT value
;
7256 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&value
)
7257 && value
>= vect_min_worthwhile_factor (code
));
7260 /* Function vectorizable_induction
7262 Check if PHI performs an induction computation that can be vectorized.
7263 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7264 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7265 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7268 vectorizable_induction (gimple
*phi
,
7269 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
7270 gimple
**vec_stmt
, slp_tree slp_node
,
7271 stmt_vector_for_cost
*cost_vec
)
7273 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
7274 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7275 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7277 bool nested_in_vect_loop
= false;
7278 struct loop
*iv_loop
;
7280 edge pe
= loop_preheader_edge (loop
);
7282 tree new_vec
, vec_init
, vec_step
, t
;
7285 gphi
*induction_phi
;
7286 tree induc_def
, vec_dest
;
7287 tree init_expr
, step_expr
;
7288 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
7292 imm_use_iterator imm_iter
;
7293 use_operand_p use_p
;
7297 gimple_stmt_iterator si
;
7298 basic_block bb
= gimple_bb (phi
);
7300 if (gimple_code (phi
) != GIMPLE_PHI
)
7303 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
7306 /* Make sure it was recognized as induction computation. */
7307 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
7310 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7311 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7316 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7317 gcc_assert (ncopies
>= 1);
7319 /* FORNOW. These restrictions should be relaxed. */
7320 if (nested_in_vect_loop_p (loop
, phi
))
7322 imm_use_iterator imm_iter
;
7323 use_operand_p use_p
;
7330 if (dump_enabled_p ())
7331 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7332 "multiple types in nested loop.\n");
7336 /* FORNOW: outer loop induction with SLP not supported. */
7337 if (STMT_SLP_TYPE (stmt_info
))
7341 latch_e
= loop_latch_edge (loop
->inner
);
7342 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
7343 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
7345 gimple
*use_stmt
= USE_STMT (use_p
);
7346 if (is_gimple_debug (use_stmt
))
7349 if (!flow_bb_inside_loop_p (loop
->inner
, gimple_bb (use_stmt
)))
7351 exit_phi
= use_stmt
;
7357 stmt_vec_info exit_phi_vinfo
= loop_vinfo
->lookup_stmt (exit_phi
);
7358 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
7359 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
)))
7361 if (dump_enabled_p ())
7362 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7363 "inner-loop induction only used outside "
7364 "of the outer vectorized loop.\n");
7369 nested_in_vect_loop
= true;
7370 iv_loop
= loop
->inner
;
7374 gcc_assert (iv_loop
== (gimple_bb (phi
))->loop_father
);
7376 if (slp_node
&& !nunits
.is_constant ())
7378 /* The current SLP code creates the initial value element-by-element. */
7379 if (dump_enabled_p ())
7380 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7381 "SLP induction not supported for variable-length"
7386 if (!vec_stmt
) /* transformation not required. */
7388 STMT_VINFO_TYPE (stmt_info
) = induc_vec_info_type
;
7389 DUMP_VECT_SCOPE ("vectorizable_induction");
7390 vect_model_induction_cost (stmt_info
, ncopies
, cost_vec
);
7396 /* Compute a vector variable, initialized with the first VF values of
7397 the induction variable. E.g., for an iv with IV_PHI='X' and
7398 evolution S, for a vector of 4 units, we want to compute:
7399 [X, X + S, X + 2*S, X + 3*S]. */
7401 if (dump_enabled_p ())
7402 dump_printf_loc (MSG_NOTE
, vect_location
, "transform induction phi.\n");
7404 latch_e
= loop_latch_edge (iv_loop
);
7405 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
7407 step_expr
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info
);
7408 gcc_assert (step_expr
!= NULL_TREE
);
7410 pe
= loop_preheader_edge (iv_loop
);
7411 init_expr
= PHI_ARG_DEF_FROM_EDGE (phi
,
7412 loop_preheader_edge (iv_loop
));
7415 if (!nested_in_vect_loop
)
7417 /* Convert the initial value to the desired type. */
7418 tree new_type
= TREE_TYPE (vectype
);
7419 init_expr
= gimple_convert (&stmts
, new_type
, init_expr
);
7421 /* If we are using the loop mask to "peel" for alignment then we need
7422 to adjust the start value here. */
7423 tree skip_niters
= LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo
);
7424 if (skip_niters
!= NULL_TREE
)
7426 if (FLOAT_TYPE_P (vectype
))
7427 skip_niters
= gimple_build (&stmts
, FLOAT_EXPR
, new_type
,
7430 skip_niters
= gimple_convert (&stmts
, new_type
, skip_niters
);
7431 tree skip_step
= gimple_build (&stmts
, MULT_EXPR
, new_type
,
7432 skip_niters
, step_expr
);
7433 init_expr
= gimple_build (&stmts
, MINUS_EXPR
, new_type
,
7434 init_expr
, skip_step
);
7438 /* Convert the step to the desired type. */
7439 step_expr
= gimple_convert (&stmts
, TREE_TYPE (vectype
), step_expr
);
7443 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7444 gcc_assert (!new_bb
);
7447 /* Find the first insertion point in the BB. */
7448 si
= gsi_after_labels (bb
);
7450 /* For SLP induction we have to generate several IVs as for example
7451 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7452 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7453 [VF*S, VF*S, VF*S, VF*S] for all. */
7456 /* Enforced above. */
7457 unsigned int const_nunits
= nunits
.to_constant ();
7459 /* Generate [VF*S, VF*S, ... ]. */
7460 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7462 expr
= build_int_cst (integer_type_node
, vf
);
7463 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
7466 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
7467 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
7469 if (! CONSTANT_CLASS_P (new_name
))
7470 new_name
= vect_init_vector (phi
, new_name
,
7471 TREE_TYPE (step_expr
), NULL
);
7472 new_vec
= build_vector_from_val (vectype
, new_name
);
7473 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
7475 /* Now generate the IVs. */
7476 unsigned group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
7477 unsigned nvects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7478 unsigned elts
= const_nunits
* nvects
;
7479 unsigned nivs
= least_common_multiple (group_size
,
7480 const_nunits
) / const_nunits
;
7481 gcc_assert (elts
% group_size
== 0);
7482 tree elt
= init_expr
;
7484 for (ivn
= 0; ivn
< nivs
; ++ivn
)
7486 tree_vector_builder
elts (vectype
, const_nunits
, 1);
7488 for (unsigned eltn
= 0; eltn
< const_nunits
; ++eltn
)
7490 if (ivn
*const_nunits
+ eltn
>= group_size
7491 && (ivn
* const_nunits
+ eltn
) % group_size
== 0)
7492 elt
= gimple_build (&stmts
, PLUS_EXPR
, TREE_TYPE (elt
),
7494 elts
.quick_push (elt
);
7496 vec_init
= gimple_build_vector (&stmts
, &elts
);
7499 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7500 gcc_assert (!new_bb
);
7503 /* Create the induction-phi that defines the induction-operand. */
7504 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
7505 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
7506 loop_vinfo
->add_stmt (induction_phi
);
7507 induc_def
= PHI_RESULT (induction_phi
);
7509 /* Create the iv update inside the loop */
7510 vec_def
= make_ssa_name (vec_dest
);
7511 new_stmt
= gimple_build_assign (vec_def
, PLUS_EXPR
, induc_def
, vec_step
);
7512 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7513 loop_vinfo
->add_stmt (new_stmt
);
7515 /* Set the arguments of the phi node: */
7516 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
7517 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
7520 SLP_TREE_VEC_STMTS (slp_node
).quick_push (induction_phi
);
7523 /* Re-use IVs when we can. */
7527 = least_common_multiple (group_size
, const_nunits
) / group_size
;
7528 /* Generate [VF'*S, VF'*S, ... ]. */
7529 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7531 expr
= build_int_cst (integer_type_node
, vfp
);
7532 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
7535 expr
= build_int_cst (TREE_TYPE (step_expr
), vfp
);
7536 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
7538 if (! CONSTANT_CLASS_P (new_name
))
7539 new_name
= vect_init_vector (phi
, new_name
,
7540 TREE_TYPE (step_expr
), NULL
);
7541 new_vec
= build_vector_from_val (vectype
, new_name
);
7542 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
7543 for (; ivn
< nvects
; ++ivn
)
7545 gimple
*iv
= SLP_TREE_VEC_STMTS (slp_node
)[ivn
- nivs
];
7547 if (gimple_code (iv
) == GIMPLE_PHI
)
7548 def
= gimple_phi_result (iv
);
7550 def
= gimple_assign_lhs (iv
);
7551 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7554 if (gimple_code (iv
) == GIMPLE_PHI
)
7555 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7558 gimple_stmt_iterator tgsi
= gsi_for_stmt (iv
);
7559 gsi_insert_after (&tgsi
, new_stmt
, GSI_CONTINUE_LINKING
);
7561 loop_vinfo
->add_stmt (new_stmt
);
7562 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7569 /* Create the vector that holds the initial_value of the induction. */
7570 if (nested_in_vect_loop
)
7572 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7573 been created during vectorization of previous stmts. We obtain it
7574 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7575 vec_init
= vect_get_vec_def_for_operand (init_expr
, phi
);
7576 /* If the initial value is not of proper type, convert it. */
7577 if (!useless_type_conversion_p (vectype
, TREE_TYPE (vec_init
)))
7580 = gimple_build_assign (vect_get_new_ssa_name (vectype
,
7584 build1 (VIEW_CONVERT_EXPR
, vectype
,
7586 vec_init
= gimple_assign_lhs (new_stmt
);
7587 new_bb
= gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop
),
7589 gcc_assert (!new_bb
);
7590 loop_vinfo
->add_stmt (new_stmt
);
7595 /* iv_loop is the loop to be vectorized. Create:
7596 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7598 new_name
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_expr
);
7600 unsigned HOST_WIDE_INT const_nunits
;
7601 if (nunits
.is_constant (&const_nunits
))
7603 tree_vector_builder
elts (vectype
, const_nunits
, 1);
7604 elts
.quick_push (new_name
);
7605 for (i
= 1; i
< const_nunits
; i
++)
7607 /* Create: new_name_i = new_name + step_expr */
7608 new_name
= gimple_build (&stmts
, PLUS_EXPR
, TREE_TYPE (new_name
),
7609 new_name
, step_expr
);
7610 elts
.quick_push (new_name
);
7612 /* Create a vector from [new_name_0, new_name_1, ...,
7613 new_name_nunits-1] */
7614 vec_init
= gimple_build_vector (&stmts
, &elts
);
7616 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr
)))
7617 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7618 vec_init
= gimple_build (&stmts
, VEC_SERIES_EXPR
, vectype
,
7619 new_name
, step_expr
);
7623 [base, base, base, ...]
7624 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7625 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)));
7626 gcc_assert (flag_associative_math
);
7627 tree index
= build_index_vector (vectype
, 0, 1);
7628 tree base_vec
= gimple_build_vector_from_val (&stmts
, vectype
,
7630 tree step_vec
= gimple_build_vector_from_val (&stmts
, vectype
,
7632 vec_init
= gimple_build (&stmts
, FLOAT_EXPR
, vectype
, index
);
7633 vec_init
= gimple_build (&stmts
, MULT_EXPR
, vectype
,
7634 vec_init
, step_vec
);
7635 vec_init
= gimple_build (&stmts
, PLUS_EXPR
, vectype
,
7636 vec_init
, base_vec
);
7641 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7642 gcc_assert (!new_bb
);
7647 /* Create the vector that holds the step of the induction. */
7648 if (nested_in_vect_loop
)
7649 /* iv_loop is nested in the loop to be vectorized. Generate:
7650 vec_step = [S, S, S, S] */
7651 new_name
= step_expr
;
7654 /* iv_loop is the loop to be vectorized. Generate:
7655 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7656 gimple_seq seq
= NULL
;
7657 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7659 expr
= build_int_cst (integer_type_node
, vf
);
7660 expr
= gimple_build (&seq
, FLOAT_EXPR
, TREE_TYPE (step_expr
), expr
);
7663 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
7664 new_name
= gimple_build (&seq
, MULT_EXPR
, TREE_TYPE (step_expr
),
7668 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
7669 gcc_assert (!new_bb
);
7673 t
= unshare_expr (new_name
);
7674 gcc_assert (CONSTANT_CLASS_P (new_name
)
7675 || TREE_CODE (new_name
) == SSA_NAME
);
7676 new_vec
= build_vector_from_val (vectype
, t
);
7677 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
7680 /* Create the following def-use cycle:
7685 vec_iv = PHI <vec_init, vec_loop>
7689 vec_loop = vec_iv + vec_step; */
7691 /* Create the induction-phi that defines the induction-operand. */
7692 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
7693 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
7694 stmt_vec_info induction_phi_info
= loop_vinfo
->add_stmt (induction_phi
);
7695 induc_def
= PHI_RESULT (induction_phi
);
7697 /* Create the iv update inside the loop */
7698 vec_def
= make_ssa_name (vec_dest
);
7699 new_stmt
= gimple_build_assign (vec_def
, PLUS_EXPR
, induc_def
, vec_step
);
7700 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7701 stmt_vec_info new_stmt_info
= loop_vinfo
->add_stmt (new_stmt
);
7703 /* Set the arguments of the phi node: */
7704 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
7705 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
7708 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= induction_phi
;
7710 /* In case that vectorization factor (VF) is bigger than the number
7711 of elements that we can fit in a vectype (nunits), we have to generate
7712 more than one vector stmt - i.e - we need to "unroll" the
7713 vector stmt by a factor VF/nunits. For more details see documentation
7714 in vectorizable_operation. */
7718 gimple_seq seq
= NULL
;
7719 stmt_vec_info prev_stmt_vinfo
;
7720 /* FORNOW. This restriction should be relaxed. */
7721 gcc_assert (!nested_in_vect_loop
);
7723 /* Create the vector that holds the step of the induction. */
7724 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7726 expr
= build_int_cst (integer_type_node
, nunits
);
7727 expr
= gimple_build (&seq
, FLOAT_EXPR
, TREE_TYPE (step_expr
), expr
);
7730 expr
= build_int_cst (TREE_TYPE (step_expr
), nunits
);
7731 new_name
= gimple_build (&seq
, MULT_EXPR
, TREE_TYPE (step_expr
),
7735 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
7736 gcc_assert (!new_bb
);
7739 t
= unshare_expr (new_name
);
7740 gcc_assert (CONSTANT_CLASS_P (new_name
)
7741 || TREE_CODE (new_name
) == SSA_NAME
);
7742 new_vec
= build_vector_from_val (vectype
, t
);
7743 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
7745 vec_def
= induc_def
;
7746 prev_stmt_vinfo
= induction_phi_info
;
7747 for (i
= 1; i
< ncopies
; i
++)
7749 /* vec_i = vec_prev + vec_step */
7750 new_stmt
= gimple_build_assign (vec_dest
, PLUS_EXPR
,
7752 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
7753 gimple_assign_set_lhs (new_stmt
, vec_def
);
7755 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7756 new_stmt_info
= loop_vinfo
->add_stmt (new_stmt
);
7757 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo
) = new_stmt
;
7758 prev_stmt_vinfo
= new_stmt_info
;
7762 if (nested_in_vect_loop
)
7764 /* Find the loop-closed exit-phi of the induction, and record
7765 the final vector of induction results: */
7767 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
7769 gimple
*use_stmt
= USE_STMT (use_p
);
7770 if (is_gimple_debug (use_stmt
))
7773 if (!flow_bb_inside_loop_p (iv_loop
, gimple_bb (use_stmt
)))
7775 exit_phi
= use_stmt
;
7781 stmt_vec_info stmt_vinfo
= loop_vinfo
->lookup_stmt (exit_phi
);
7782 /* FORNOW. Currently not supporting the case that an inner-loop induction
7783 is not used in the outer-loop (i.e. only outside the outer-loop). */
7784 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo
)
7785 && !STMT_VINFO_LIVE_P (stmt_vinfo
));
7787 STMT_VINFO_VEC_STMT (stmt_vinfo
) = new_stmt
;
7788 if (dump_enabled_p ())
7790 dump_printf_loc (MSG_NOTE
, vect_location
,
7791 "vector of inductions after inner-loop:");
7792 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
7798 if (dump_enabled_p ())
7800 dump_printf_loc (MSG_NOTE
, vect_location
,
7801 "transform induction: created def-use cycle: ");
7802 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, induction_phi
, 0);
7803 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
7804 SSA_NAME_DEF_STMT (vec_def
), 0);
7810 /* Function vectorizable_live_operation.
7812 STMT computes a value that is used outside the loop. Check if
7813 it can be supported. */
7816 vectorizable_live_operation (gimple
*stmt
,
7817 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
7818 slp_tree slp_node
, int slp_index
,
7820 stmt_vector_for_cost
*)
7822 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7823 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7824 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7825 imm_use_iterator imm_iter
;
7826 tree lhs
, lhs_type
, bitsize
, vec_bitsize
;
7827 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7828 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7831 auto_vec
<tree
> vec_oprnds
;
7833 poly_uint64 vec_index
= 0;
7835 gcc_assert (STMT_VINFO_LIVE_P (stmt_info
));
7837 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
)
7840 /* FORNOW. CHECKME. */
7841 if (nested_in_vect_loop_p (loop
, stmt
))
7844 /* If STMT is not relevant and it is a simple assignment and its inputs are
7845 invariant then it can remain in place, unvectorized. The original last
7846 scalar value that it computes will be used. */
7847 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
7849 gcc_assert (is_simple_and_all_uses_invariant (stmt
, loop_vinfo
));
7850 if (dump_enabled_p ())
7851 dump_printf_loc (MSG_NOTE
, vect_location
,
7852 "statement is simple and uses invariant. Leaving in "
7860 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7864 gcc_assert (slp_index
>= 0);
7866 int num_scalar
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
7867 int num_vec
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7869 /* Get the last occurrence of the scalar index from the concatenation of
7870 all the slp vectors. Calculate which slp vector it is and the index
7872 poly_uint64 pos
= (num_vec
* nunits
) - num_scalar
+ slp_index
;
7874 /* Calculate which vector contains the result, and which lane of
7875 that vector we need. */
7876 if (!can_div_trunc_p (pos
, nunits
, &vec_entry
, &vec_index
))
7878 if (dump_enabled_p ())
7879 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7880 "Cannot determine which vector holds the"
7881 " final result.\n");
7888 /* No transformation required. */
7889 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
7891 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST
, vectype
,
7892 OPTIMIZE_FOR_SPEED
))
7894 if (dump_enabled_p ())
7895 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7896 "can't use a fully-masked loop because "
7897 "the target doesn't support extract last "
7899 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7903 if (dump_enabled_p ())
7904 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7905 "can't use a fully-masked loop because an "
7906 "SLP statement is live after the loop.\n");
7907 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7909 else if (ncopies
> 1)
7911 if (dump_enabled_p ())
7912 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7913 "can't use a fully-masked loop because"
7914 " ncopies is greater than 1.\n");
7915 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7919 gcc_assert (ncopies
== 1 && !slp_node
);
7920 vect_record_loop_mask (loop_vinfo
,
7921 &LOOP_VINFO_MASKS (loop_vinfo
),
7928 /* If stmt has a related stmt, then use that for getting the lhs. */
7929 if (is_pattern_stmt_p (stmt_info
))
7930 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7932 lhs
= (is_a
<gphi
*> (stmt
)) ? gimple_phi_result (stmt
)
7933 : gimple_get_lhs (stmt
);
7934 lhs_type
= TREE_TYPE (lhs
);
7936 bitsize
= (VECTOR_BOOLEAN_TYPE_P (vectype
)
7937 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype
)))
7938 : TYPE_SIZE (TREE_TYPE (vectype
)));
7939 vec_bitsize
= TYPE_SIZE (vectype
);
7941 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7942 tree vec_lhs
, bitstart
;
7945 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
7947 /* Get the correct slp vectorized stmt. */
7948 gimple
*vec_stmt
= SLP_TREE_VEC_STMTS (slp_node
)[vec_entry
];
7949 if (gphi
*phi
= dyn_cast
<gphi
*> (vec_stmt
))
7950 vec_lhs
= gimple_phi_result (phi
);
7952 vec_lhs
= gimple_get_lhs (vec_stmt
);
7954 /* Get entry to use. */
7955 bitstart
= bitsize_int (vec_index
);
7956 bitstart
= int_const_binop (MULT_EXPR
, bitsize
, bitstart
);
7960 enum vect_def_type dt
= STMT_VINFO_DEF_TYPE (stmt_info
);
7961 vec_lhs
= vect_get_vec_def_for_operand_1 (stmt_info
, dt
);
7962 gcc_checking_assert (ncopies
== 1
7963 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
7965 /* For multiple copies, get the last copy. */
7966 for (int i
= 1; i
< ncopies
; ++i
)
7967 vec_lhs
= vect_get_vec_def_for_stmt_copy (vect_unknown_def_type
,
7970 /* Get the last lane in the vector. */
7971 bitstart
= int_const_binop (MINUS_EXPR
, vec_bitsize
, bitsize
);
7974 gimple_seq stmts
= NULL
;
7976 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
7980 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7982 where VEC_LHS is the vectorized live-out result and MASK is
7983 the loop mask for the final iteration. */
7984 gcc_assert (ncopies
== 1 && !slp_node
);
7985 tree scalar_type
= TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info
));
7986 tree mask
= vect_get_loop_mask (gsi
, &LOOP_VINFO_MASKS (loop_vinfo
),
7988 tree scalar_res
= gimple_build (&stmts
, CFN_EXTRACT_LAST
,
7989 scalar_type
, mask
, vec_lhs
);
7991 /* Convert the extracted vector element to the required scalar type. */
7992 new_tree
= gimple_convert (&stmts
, lhs_type
, scalar_res
);
7996 tree bftype
= TREE_TYPE (vectype
);
7997 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
7998 bftype
= build_nonstandard_integer_type (tree_to_uhwi (bitsize
), 1);
7999 new_tree
= build3 (BIT_FIELD_REF
, bftype
, vec_lhs
, bitsize
, bitstart
);
8000 new_tree
= force_gimple_operand (fold_convert (lhs_type
, new_tree
),
8001 &stmts
, true, NULL_TREE
);
8005 gsi_insert_seq_on_edge_immediate (single_exit (loop
), stmts
);
8007 /* Replace use of lhs with newly computed result. If the use stmt is a
8008 single arg PHI, just replace all uses of PHI result. It's necessary
8009 because lcssa PHI defining lhs may be before newly inserted stmt. */
8010 use_operand_p use_p
;
8011 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, lhs
)
8012 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
8013 && !is_gimple_debug (use_stmt
))
8015 if (gimple_code (use_stmt
) == GIMPLE_PHI
8016 && gimple_phi_num_args (use_stmt
) == 1)
8018 replace_uses_by (gimple_phi_result (use_stmt
), new_tree
);
8022 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
8023 SET_USE (use_p
, new_tree
);
8025 update_stmt (use_stmt
);
8031 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
8034 vect_loop_kill_debug_uses (struct loop
*loop
, gimple
*stmt
)
8036 ssa_op_iter op_iter
;
8037 imm_use_iterator imm_iter
;
8038 def_operand_p def_p
;
8041 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
8043 FOR_EACH_IMM_USE_STMT (ustmt
, imm_iter
, DEF_FROM_PTR (def_p
))
8047 if (!is_gimple_debug (ustmt
))
8050 bb
= gimple_bb (ustmt
);
8052 if (!flow_bb_inside_loop_p (loop
, bb
))
8054 if (gimple_debug_bind_p (ustmt
))
8056 if (dump_enabled_p ())
8057 dump_printf_loc (MSG_NOTE
, vect_location
,
8058 "killing debug use\n");
8060 gimple_debug_bind_reset_value (ustmt
);
8061 update_stmt (ustmt
);
8070 /* Given loop represented by LOOP_VINFO, return true if computation of
8071 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8075 loop_niters_no_overflow (loop_vec_info loop_vinfo
)
8077 /* Constant case. */
8078 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
8080 tree cst_niters
= LOOP_VINFO_NITERS (loop_vinfo
);
8081 tree cst_nitersm1
= LOOP_VINFO_NITERSM1 (loop_vinfo
);
8083 gcc_assert (TREE_CODE (cst_niters
) == INTEGER_CST
);
8084 gcc_assert (TREE_CODE (cst_nitersm1
) == INTEGER_CST
);
8085 if (wi::to_widest (cst_nitersm1
) < wi::to_widest (cst_niters
))
8090 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8091 /* Check the upper bound of loop niters. */
8092 if (get_max_loop_iterations (loop
, &max
))
8094 tree type
= TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
));
8095 signop sgn
= TYPE_SIGN (type
);
8096 widest_int type_max
= widest_int::from (wi::max_value (type
), sgn
);
8103 /* Return a mask type with half the number of elements as TYPE. */
8106 vect_halve_mask_nunits (tree type
)
8108 poly_uint64 nunits
= exact_div (TYPE_VECTOR_SUBPARTS (type
), 2);
8109 return build_truth_vector_type (nunits
, current_vector_size
);
8112 /* Return a mask type with twice as many elements as TYPE. */
8115 vect_double_mask_nunits (tree type
)
8117 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (type
) * 2;
8118 return build_truth_vector_type (nunits
, current_vector_size
);
8121 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8122 contain a sequence of NVECTORS masks that each control a vector of type
8126 vect_record_loop_mask (loop_vec_info loop_vinfo
, vec_loop_masks
*masks
,
8127 unsigned int nvectors
, tree vectype
)
8129 gcc_assert (nvectors
!= 0);
8130 if (masks
->length () < nvectors
)
8131 masks
->safe_grow_cleared (nvectors
);
8132 rgroup_masks
*rgm
= &(*masks
)[nvectors
- 1];
8133 /* The number of scalars per iteration and the number of vectors are
8134 both compile-time constants. */
8135 unsigned int nscalars_per_iter
8136 = exact_div (nvectors
* TYPE_VECTOR_SUBPARTS (vectype
),
8137 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)).to_constant ();
8138 if (rgm
->max_nscalars_per_iter
< nscalars_per_iter
)
8140 rgm
->max_nscalars_per_iter
= nscalars_per_iter
;
8141 rgm
->mask_type
= build_same_sized_truth_vector_type (vectype
);
8145 /* Given a complete set of masks MASKS, extract mask number INDEX
8146 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8147 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8149 See the comment above vec_loop_masks for more details about the mask
8153 vect_get_loop_mask (gimple_stmt_iterator
*gsi
, vec_loop_masks
*masks
,
8154 unsigned int nvectors
, tree vectype
, unsigned int index
)
8156 rgroup_masks
*rgm
= &(*masks
)[nvectors
- 1];
8157 tree mask_type
= rgm
->mask_type
;
8159 /* Populate the rgroup's mask array, if this is the first time we've
8161 if (rgm
->masks
.is_empty ())
8163 rgm
->masks
.safe_grow_cleared (nvectors
);
8164 for (unsigned int i
= 0; i
< nvectors
; ++i
)
8166 tree mask
= make_temp_ssa_name (mask_type
, NULL
, "loop_mask");
8167 /* Provide a dummy definition until the real one is available. */
8168 SSA_NAME_DEF_STMT (mask
) = gimple_build_nop ();
8169 rgm
->masks
[i
] = mask
;
8173 tree mask
= rgm
->masks
[index
];
8174 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type
),
8175 TYPE_VECTOR_SUBPARTS (vectype
)))
8177 /* A loop mask for data type X can be reused for data type Y
8178 if X has N times more elements than Y and if Y's elements
8179 are N times bigger than X's. In this case each sequence
8180 of N elements in the loop mask will be all-zero or all-one.
8181 We can then view-convert the mask so that each sequence of
8182 N elements is replaced by a single element. */
8183 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type
),
8184 TYPE_VECTOR_SUBPARTS (vectype
)));
8185 gimple_seq seq
= NULL
;
8186 mask_type
= build_same_sized_truth_vector_type (vectype
);
8187 mask
= gimple_build (&seq
, VIEW_CONVERT_EXPR
, mask_type
, mask
);
8189 gsi_insert_seq_before (gsi
, seq
, GSI_SAME_STMT
);
8194 /* Scale profiling counters by estimation for LOOP which is vectorized
8198 scale_profile_for_vect_loop (struct loop
*loop
, unsigned vf
)
8200 edge preheader
= loop_preheader_edge (loop
);
8201 /* Reduce loop iterations by the vectorization factor. */
8202 gcov_type new_est_niter
= niter_for_unrolled_loop (loop
, vf
);
8203 profile_count freq_h
= loop
->header
->count
, freq_e
= preheader
->count ();
8205 if (freq_h
.nonzero_p ())
8207 profile_probability p
;
8209 /* Avoid dropping loop body profile counter to 0 because of zero count
8210 in loop's preheader. */
8211 if (!(freq_e
== profile_count::zero ()))
8212 freq_e
= freq_e
.force_nonzero ();
8213 p
= freq_e
.apply_scale (new_est_niter
+ 1, 1).probability_in (freq_h
);
8214 scale_loop_frequencies (loop
, p
);
8217 edge exit_e
= single_exit (loop
);
8218 exit_e
->probability
= profile_probability::always ()
8219 .apply_scale (1, new_est_niter
+ 1);
8221 edge exit_l
= single_pred_edge (loop
->latch
);
8222 profile_probability prob
= exit_l
->probability
;
8223 exit_l
->probability
= exit_e
->probability
.invert ();
8224 if (prob
.initialized_p () && exit_l
->probability
.initialized_p ())
8225 scale_bbs_frequencies (&loop
->latch
, 1, exit_l
->probability
/ prob
);
8228 /* Vectorize STMT if relevant, inserting any new instructions before GSI.
8229 When vectorizing STMT as a store, set *SEEN_STORE to its stmt_vec_info.
8230 *SLP_SCHEDULE is a running record of whether we have called
8231 vect_schedule_slp. */
8234 vect_transform_loop_stmt (loop_vec_info loop_vinfo
, gimple
*stmt
,
8235 gimple_stmt_iterator
*gsi
,
8236 stmt_vec_info
*seen_store
, bool *slp_scheduled
)
8238 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8239 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
8240 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
8244 if (dump_enabled_p ())
8246 dump_printf_loc (MSG_NOTE
, vect_location
,
8247 "------>vectorizing statement: ");
8248 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8251 if (MAY_HAVE_DEBUG_BIND_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
8252 vect_loop_kill_debug_uses (loop
, stmt
);
8254 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8255 && !STMT_VINFO_LIVE_P (stmt_info
))
8258 if (STMT_VINFO_VECTYPE (stmt_info
))
8261 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
8262 if (!STMT_SLP_TYPE (stmt_info
)
8263 && maybe_ne (nunits
, vf
)
8264 && dump_enabled_p ())
8265 /* For SLP VF is set according to unrolling factor, and not
8266 to vector size, hence for SLP this print is not valid. */
8267 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
8270 /* SLP. Schedule all the SLP instances when the first SLP stmt is
8272 if (slp_vect_type slptype
= STMT_SLP_TYPE (stmt_info
))
8275 if (!*slp_scheduled
)
8277 *slp_scheduled
= true;
8279 DUMP_VECT_SCOPE ("scheduling SLP instances");
8281 vect_schedule_slp (loop_vinfo
);
8284 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
8285 if (slptype
== pure_slp
)
8289 if (dump_enabled_p ())
8290 dump_printf_loc (MSG_NOTE
, vect_location
, "transform statement.\n");
8292 bool grouped_store
= false;
8293 if (vect_transform_stmt (stmt
, gsi
, &grouped_store
, NULL
, NULL
))
8294 *seen_store
= stmt_info
;
8297 /* Function vect_transform_loop.
8299 The analysis phase has determined that the loop is vectorizable.
8300 Vectorize the loop - created vectorized stmts to replace the scalar
8301 stmts in the loop, and update the loop exit condition.
8302 Returns scalar epilogue loop if any. */
8305 vect_transform_loop (loop_vec_info loop_vinfo
)
8307 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8308 struct loop
*epilogue
= NULL
;
8309 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
8310 int nbbs
= loop
->num_nodes
;
8312 tree niters_vector
= NULL_TREE
;
8313 tree step_vector
= NULL_TREE
;
8314 tree niters_vector_mult_vf
= NULL_TREE
;
8315 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
8316 unsigned int lowest_vf
= constant_lower_bound (vf
);
8317 bool slp_scheduled
= false;
8319 bool check_profitability
= false;
8322 DUMP_VECT_SCOPE ("vec_transform_loop");
8324 loop_vinfo
->shared
->check_datarefs ();
8326 /* Use the more conservative vectorization threshold. If the number
8327 of iterations is constant assume the cost check has been performed
8328 by our caller. If the threshold makes all loops profitable that
8329 run at least the (estimated) vectorization factor number of times
8330 checking is pointless, too. */
8331 th
= LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
);
8332 if (th
>= vect_vf_for_cost (loop_vinfo
)
8333 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
8335 if (dump_enabled_p ())
8336 dump_printf_loc (MSG_NOTE
, vect_location
,
8337 "Profitability threshold is %d loop iterations.\n",
8339 check_profitability
= true;
8342 /* Make sure there exists a single-predecessor exit bb. Do this before
8344 edge e
= single_exit (loop
);
8345 if (! single_pred_p (e
->dest
))
8347 split_loop_exit_edge (e
);
8348 if (dump_enabled_p ())
8349 dump_printf (MSG_NOTE
, "split exit edge\n");
8352 /* Version the loop first, if required, so the profitability check
8355 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
8357 poly_uint64 versioning_threshold
8358 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
);
8359 if (check_profitability
8360 && ordered_p (poly_uint64 (th
), versioning_threshold
))
8362 versioning_threshold
= ordered_max (poly_uint64 (th
),
8363 versioning_threshold
);
8364 check_profitability
= false;
8366 vect_loop_versioning (loop_vinfo
, th
, check_profitability
,
8367 versioning_threshold
);
8368 check_profitability
= false;
8371 /* Make sure there exists a single-predecessor exit bb also on the
8372 scalar loop copy. Do this after versioning but before peeling
8373 so CFG structure is fine for both scalar and if-converted loop
8374 to make slpeel_duplicate_current_defs_from_edges face matched
8375 loop closed PHI nodes on the exit. */
8376 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
8378 e
= single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
));
8379 if (! single_pred_p (e
->dest
))
8381 split_loop_exit_edge (e
);
8382 if (dump_enabled_p ())
8383 dump_printf (MSG_NOTE
, "split exit edge of scalar loop\n");
8387 tree niters
= vect_build_loop_niters (loop_vinfo
);
8388 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = niters
;
8389 tree nitersm1
= unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo
));
8390 bool niters_no_overflow
= loop_niters_no_overflow (loop_vinfo
);
8391 epilogue
= vect_do_peeling (loop_vinfo
, niters
, nitersm1
, &niters_vector
,
8392 &step_vector
, &niters_vector_mult_vf
, th
,
8393 check_profitability
, niters_no_overflow
);
8395 if (niters_vector
== NULL_TREE
)
8397 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
8398 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
8399 && known_eq (lowest_vf
, vf
))
8402 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
)),
8403 LOOP_VINFO_INT_NITERS (loop_vinfo
) / lowest_vf
);
8404 step_vector
= build_one_cst (TREE_TYPE (niters
));
8407 vect_gen_vector_loop_niters (loop_vinfo
, niters
, &niters_vector
,
8408 &step_vector
, niters_no_overflow
);
8411 /* 1) Make sure the loop header has exactly two entries
8412 2) Make sure we have a preheader basic block. */
8414 gcc_assert (EDGE_COUNT (loop
->header
->preds
) == 2);
8416 split_edge (loop_preheader_edge (loop
));
8418 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
8419 && vect_use_loop_mask_for_alignment_p (loop_vinfo
))
8420 /* This will deal with any possible peeling. */
8421 vect_prepare_for_masked_peels (loop_vinfo
);
8423 /* FORNOW: the vectorizer supports only loops which body consist
8424 of one basic block (header + empty latch). When the vectorizer will
8425 support more involved loop forms, the order by which the BBs are
8426 traversed need to be reconsidered. */
8428 for (i
= 0; i
< nbbs
; i
++)
8430 basic_block bb
= bbs
[i
];
8431 stmt_vec_info stmt_info
;
8433 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
8436 gphi
*phi
= si
.phi ();
8437 if (dump_enabled_p ())
8439 dump_printf_loc (MSG_NOTE
, vect_location
,
8440 "------>vectorizing phi: ");
8441 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
8443 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
8447 if (MAY_HAVE_DEBUG_BIND_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
8448 vect_loop_kill_debug_uses (loop
, phi
);
8450 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8451 && !STMT_VINFO_LIVE_P (stmt_info
))
8454 if (STMT_VINFO_VECTYPE (stmt_info
)
8456 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
)), vf
))
8457 && dump_enabled_p ())
8458 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
8460 if ((STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
8461 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
8462 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
)
8463 && ! PURE_SLP_STMT (stmt_info
))
8465 if (dump_enabled_p ())
8466 dump_printf_loc (MSG_NOTE
, vect_location
, "transform phi.\n");
8467 vect_transform_stmt (phi
, NULL
, NULL
, NULL
, NULL
);
8471 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
8474 stmt
= gsi_stmt (si
);
8475 /* During vectorization remove existing clobber stmts. */
8476 if (gimple_clobber_p (stmt
))
8478 unlink_stmt_vdef (stmt
);
8479 gsi_remove (&si
, true);
8480 release_defs (stmt
);
8484 stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
8486 /* vector stmts created in the outer-loop during vectorization of
8487 stmts in an inner-loop may not have a stmt_info, and do not
8488 need to be vectorized. */
8489 stmt_vec_info seen_store
= NULL
;
8492 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8494 gimple
*def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
8495 for (gimple_stmt_iterator subsi
= gsi_start (def_seq
);
8496 !gsi_end_p (subsi
); gsi_next (&subsi
))
8497 vect_transform_loop_stmt (loop_vinfo
,
8498 gsi_stmt (subsi
), &si
,
8501 gimple
*pat_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
8502 vect_transform_loop_stmt (loop_vinfo
, pat_stmt
, &si
,
8503 &seen_store
, &slp_scheduled
);
8505 vect_transform_loop_stmt (loop_vinfo
, stmt
, &si
,
8506 &seen_store
, &slp_scheduled
);
8510 if (STMT_VINFO_GROUPED_ACCESS (seen_store
))
8512 /* Interleaving. If IS_STORE is TRUE, the
8513 vectorization of the interleaving chain was
8514 completed - free all the stores in the chain. */
8516 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store
));
8520 /* Free the attached stmt_vec_info and remove the
8522 free_stmt_vec_info (stmt
);
8523 unlink_stmt_vdef (stmt
);
8524 gsi_remove (&si
, true);
8525 release_defs (stmt
);
8533 /* Stub out scalar statements that must not survive vectorization.
8534 Doing this here helps with grouped statements, or statements that
8535 are involved in patterns. */
8536 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
);
8537 !gsi_end_p (gsi
); gsi_next (&gsi
))
8539 gcall
*call
= dyn_cast
<gcall
*> (gsi_stmt (gsi
));
8540 if (call
&& gimple_call_internal_p (call
, IFN_MASK_LOAD
))
8542 tree lhs
= gimple_get_lhs (call
);
8543 if (!VECTOR_TYPE_P (TREE_TYPE (lhs
)))
8545 tree zero
= build_zero_cst (TREE_TYPE (lhs
));
8546 gimple
*new_stmt
= gimple_build_assign (lhs
, zero
);
8547 gsi_replace (&gsi
, new_stmt
, true);
8553 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8554 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8555 if (integer_onep (step_vector
))
8556 niters_no_overflow
= true;
8557 vect_set_loop_condition (loop
, loop_vinfo
, niters_vector
, step_vector
,
8558 niters_vector_mult_vf
, !niters_no_overflow
);
8560 unsigned int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
8561 scale_profile_for_vect_loop (loop
, assumed_vf
);
8563 /* True if the final iteration might not handle a full vector's
8564 worth of scalar iterations. */
8565 bool final_iter_may_be_partial
= LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
);
8566 /* The minimum number of iterations performed by the epilogue. This
8567 is 1 when peeling for gaps because we always need a final scalar
8569 int min_epilogue_iters
= LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) ? 1 : 0;
8570 /* +1 to convert latch counts to loop iteration counts,
8571 -min_epilogue_iters to remove iterations that cannot be performed
8572 by the vector code. */
8573 int bias_for_lowest
= 1 - min_epilogue_iters
;
8574 int bias_for_assumed
= bias_for_lowest
;
8575 int alignment_npeels
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
8576 if (alignment_npeels
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
8578 /* When the amount of peeling is known at compile time, the first
8579 iteration will have exactly alignment_npeels active elements.
8580 In the worst case it will have at least one. */
8581 int min_first_active
= (alignment_npeels
> 0 ? alignment_npeels
: 1);
8582 bias_for_lowest
+= lowest_vf
- min_first_active
;
8583 bias_for_assumed
+= assumed_vf
- min_first_active
;
8585 /* In these calculations the "- 1" converts loop iteration counts
8586 back to latch counts. */
8587 if (loop
->any_upper_bound
)
8588 loop
->nb_iterations_upper_bound
8589 = (final_iter_may_be_partial
8590 ? wi::udiv_ceil (loop
->nb_iterations_upper_bound
+ bias_for_lowest
,
8592 : wi::udiv_floor (loop
->nb_iterations_upper_bound
+ bias_for_lowest
,
8594 if (loop
->any_likely_upper_bound
)
8595 loop
->nb_iterations_likely_upper_bound
8596 = (final_iter_may_be_partial
8597 ? wi::udiv_ceil (loop
->nb_iterations_likely_upper_bound
8598 + bias_for_lowest
, lowest_vf
) - 1
8599 : wi::udiv_floor (loop
->nb_iterations_likely_upper_bound
8600 + bias_for_lowest
, lowest_vf
) - 1);
8601 if (loop
->any_estimate
)
8602 loop
->nb_iterations_estimate
8603 = (final_iter_may_be_partial
8604 ? wi::udiv_ceil (loop
->nb_iterations_estimate
+ bias_for_assumed
,
8606 : wi::udiv_floor (loop
->nb_iterations_estimate
+ bias_for_assumed
,
8609 if (dump_enabled_p ())
8611 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
8613 dump_printf_loc (MSG_NOTE
, vect_location
,
8614 "LOOP VECTORIZED\n");
8616 dump_printf_loc (MSG_NOTE
, vect_location
,
8617 "OUTER LOOP VECTORIZED\n");
8618 dump_printf (MSG_NOTE
, "\n");
8622 dump_printf_loc (MSG_NOTE
, vect_location
,
8623 "LOOP EPILOGUE VECTORIZED (VS=");
8624 dump_dec (MSG_NOTE
, current_vector_size
);
8625 dump_printf (MSG_NOTE
, ")\n");
8629 /* Free SLP instances here because otherwise stmt reference counting
8631 slp_instance instance
;
8632 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), i
, instance
)
8633 vect_free_slp_instance (instance
, true);
8634 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
8635 /* Clear-up safelen field since its value is invalid after vectorization
8636 since vectorized loop can have loop-carried dependencies. */
8639 /* Don't vectorize epilogue for epilogue. */
8640 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
8643 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK
))
8648 auto_vector_sizes vector_sizes
;
8649 targetm
.vectorize
.autovectorize_vector_sizes (&vector_sizes
);
8650 unsigned int next_size
= 0;
8652 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
8653 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) >= 0
8654 && known_eq (vf
, lowest_vf
))
8657 = (LOOP_VINFO_INT_NITERS (loop_vinfo
)
8658 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
));
8659 eiters
= eiters
% lowest_vf
;
8660 epilogue
->nb_iterations_upper_bound
= eiters
- 1;
8663 while (next_size
< vector_sizes
.length ()
8664 && !(constant_multiple_p (current_vector_size
,
8665 vector_sizes
[next_size
], &ratio
)
8666 && eiters
>= lowest_vf
/ ratio
))
8670 while (next_size
< vector_sizes
.length ()
8671 && maybe_lt (current_vector_size
, vector_sizes
[next_size
]))
8674 if (next_size
== vector_sizes
.length ())
8680 epilogue
->force_vectorize
= loop
->force_vectorize
;
8681 epilogue
->safelen
= loop
->safelen
;
8682 epilogue
->dont_vectorize
= false;
8684 /* We may need to if-convert epilogue to vectorize it. */
8685 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
8686 tree_if_conversion (epilogue
);
8692 /* The code below is trying to perform simple optimization - revert
8693 if-conversion for masked stores, i.e. if the mask of a store is zero
8694 do not perform it and all stored value producers also if possible.
8702 this transformation will produce the following semi-hammock:
8704 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8706 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8707 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8708 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8709 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8710 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8711 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8716 optimize_mask_stores (struct loop
*loop
)
8718 basic_block
*bbs
= get_loop_body (loop
);
8719 unsigned nbbs
= loop
->num_nodes
;
8722 struct loop
*bb_loop
;
8723 gimple_stmt_iterator gsi
;
8725 auto_vec
<gimple
*> worklist
;
8727 vect_location
= find_loop_location (loop
);
8728 /* Pick up all masked stores in loop if any. */
8729 for (i
= 0; i
< nbbs
; i
++)
8732 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
8735 stmt
= gsi_stmt (gsi
);
8736 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
8737 worklist
.safe_push (stmt
);
8742 if (worklist
.is_empty ())
8745 /* Loop has masked stores. */
8746 while (!worklist
.is_empty ())
8748 gimple
*last
, *last_store
;
8751 basic_block store_bb
, join_bb
;
8752 gimple_stmt_iterator gsi_to
;
8753 tree vdef
, new_vdef
;
8758 last
= worklist
.pop ();
8759 mask
= gimple_call_arg (last
, 2);
8760 bb
= gimple_bb (last
);
8761 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8762 the same loop as if_bb. It could be different to LOOP when two
8763 level loop-nest is vectorized and mask_store belongs to the inner
8765 e
= split_block (bb
, last
);
8766 bb_loop
= bb
->loop_father
;
8767 gcc_assert (loop
== bb_loop
|| flow_loop_nested_p (loop
, bb_loop
));
8769 store_bb
= create_empty_bb (bb
);
8770 add_bb_to_loop (store_bb
, bb_loop
);
8771 e
->flags
= EDGE_TRUE_VALUE
;
8772 efalse
= make_edge (bb
, store_bb
, EDGE_FALSE_VALUE
);
8773 /* Put STORE_BB to likely part. */
8774 efalse
->probability
= profile_probability::unlikely ();
8775 store_bb
->count
= efalse
->count ();
8776 make_single_succ_edge (store_bb
, join_bb
, EDGE_FALLTHRU
);
8777 if (dom_info_available_p (CDI_DOMINATORS
))
8778 set_immediate_dominator (CDI_DOMINATORS
, store_bb
, bb
);
8779 if (dump_enabled_p ())
8780 dump_printf_loc (MSG_NOTE
, vect_location
,
8781 "Create new block %d to sink mask stores.",
8783 /* Create vector comparison with boolean result. */
8784 vectype
= TREE_TYPE (mask
);
8785 zero
= build_zero_cst (vectype
);
8786 stmt
= gimple_build_cond (EQ_EXPR
, mask
, zero
, NULL_TREE
, NULL_TREE
);
8787 gsi
= gsi_last_bb (bb
);
8788 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
8789 /* Create new PHI node for vdef of the last masked store:
8790 .MEM_2 = VDEF <.MEM_1>
8791 will be converted to
8792 .MEM.3 = VDEF <.MEM_1>
8793 and new PHI node will be created in join bb
8794 .MEM_2 = PHI <.MEM_1, .MEM_3>
8796 vdef
= gimple_vdef (last
);
8797 new_vdef
= make_ssa_name (gimple_vop (cfun
), last
);
8798 gimple_set_vdef (last
, new_vdef
);
8799 phi
= create_phi_node (vdef
, join_bb
);
8800 add_phi_arg (phi
, new_vdef
, EDGE_SUCC (store_bb
, 0), UNKNOWN_LOCATION
);
8802 /* Put all masked stores with the same mask to STORE_BB if possible. */
8805 gimple_stmt_iterator gsi_from
;
8806 gimple
*stmt1
= NULL
;
8808 /* Move masked store to STORE_BB. */
8810 gsi
= gsi_for_stmt (last
);
8812 /* Shift GSI to the previous stmt for further traversal. */
8814 gsi_to
= gsi_start_bb (store_bb
);
8815 gsi_move_before (&gsi_from
, &gsi_to
);
8816 /* Setup GSI_TO to the non-empty block start. */
8817 gsi_to
= gsi_start_bb (store_bb
);
8818 if (dump_enabled_p ())
8820 dump_printf_loc (MSG_NOTE
, vect_location
,
8821 "Move stmt to created bb\n");
8822 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, last
, 0);
8824 /* Move all stored value producers if possible. */
8825 while (!gsi_end_p (gsi
))
8828 imm_use_iterator imm_iter
;
8829 use_operand_p use_p
;
8832 /* Skip debug statements. */
8833 if (is_gimple_debug (gsi_stmt (gsi
)))
8838 stmt1
= gsi_stmt (gsi
);
8839 /* Do not consider statements writing to memory or having
8840 volatile operand. */
8841 if (gimple_vdef (stmt1
)
8842 || gimple_has_volatile_ops (stmt1
))
8846 lhs
= gimple_get_lhs (stmt1
);
8850 /* LHS of vectorized stmt must be SSA_NAME. */
8851 if (TREE_CODE (lhs
) != SSA_NAME
)
8854 if (!VECTOR_TYPE_P (TREE_TYPE (lhs
)))
8856 /* Remove dead scalar statement. */
8857 if (has_zero_uses (lhs
))
8859 gsi_remove (&gsi_from
, true);
8864 /* Check that LHS does not have uses outside of STORE_BB. */
8866 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
8869 use_stmt
= USE_STMT (use_p
);
8870 if (is_gimple_debug (use_stmt
))
8872 if (gimple_bb (use_stmt
) != store_bb
)
8881 if (gimple_vuse (stmt1
)
8882 && gimple_vuse (stmt1
) != gimple_vuse (last_store
))
8885 /* Can move STMT1 to STORE_BB. */
8886 if (dump_enabled_p ())
8888 dump_printf_loc (MSG_NOTE
, vect_location
,
8889 "Move stmt to created bb\n");
8890 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt1
, 0);
8892 gsi_move_before (&gsi_from
, &gsi_to
);
8893 /* Shift GSI_TO for further insertion. */
8896 /* Put other masked stores with the same mask to STORE_BB. */
8897 if (worklist
.is_empty ()
8898 || gimple_call_arg (worklist
.last (), 2) != mask
8899 || worklist
.last () != stmt1
)
8901 last
= worklist
.pop ();
8903 add_phi_arg (phi
, gimple_vuse (last_store
), e
, UNKNOWN_LOCATION
);