]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vect-loop.c
vec: add exact argument for various grow functions.
[thirdparty/gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-vectorizer.h"
48 #include "gimple-fold.h"
49 #include "cgraph.h"
50 #include "tree-cfg.h"
51 #include "tree-if-conv.h"
52 #include "internal-fn.h"
53 #include "tree-vector-builder.h"
54 #include "vec-perm-indices.h"
55 #include "tree-eh.h"
56
57 /* Loop Vectorization Pass.
58
59 This pass tries to vectorize loops.
60
61 For example, the vectorizer transforms the following simple loop:
62
63 short a[N]; short b[N]; short c[N]; int i;
64
65 for (i=0; i<N; i++){
66 a[i] = b[i] + c[i];
67 }
68
69 as if it was manually vectorized by rewriting the source code into:
70
71 typedef int __attribute__((mode(V8HI))) v8hi;
72 short a[N]; short b[N]; short c[N]; int i;
73 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
74 v8hi va, vb, vc;
75
76 for (i=0; i<N/8; i++){
77 vb = pb[i];
78 vc = pc[i];
79 va = vb + vc;
80 pa[i] = va;
81 }
82
83 The main entry to this pass is vectorize_loops(), in which
84 the vectorizer applies a set of analyses on a given set of loops,
85 followed by the actual vectorization transformation for the loops that
86 had successfully passed the analysis phase.
87 Throughout this pass we make a distinction between two types of
88 data: scalars (which are represented by SSA_NAMES), and memory references
89 ("data-refs"). These two types of data require different handling both
90 during analysis and transformation. The types of data-refs that the
91 vectorizer currently supports are ARRAY_REFS which base is an array DECL
92 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
93 accesses are required to have a simple (consecutive) access pattern.
94
95 Analysis phase:
96 ===============
97 The driver for the analysis phase is vect_analyze_loop().
98 It applies a set of analyses, some of which rely on the scalar evolution
99 analyzer (scev) developed by Sebastian Pop.
100
101 During the analysis phase the vectorizer records some information
102 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
103 loop, as well as general information about the loop as a whole, which is
104 recorded in a "loop_vec_info" struct attached to each loop.
105
106 Transformation phase:
107 =====================
108 The loop transformation phase scans all the stmts in the loop, and
109 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
110 the loop that needs to be vectorized. It inserts the vector code sequence
111 just before the scalar stmt S, and records a pointer to the vector code
112 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
113 attached to S). This pointer will be used for the vectorization of following
114 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
115 otherwise, we rely on dead code elimination for removing it.
116
117 For example, say stmt S1 was vectorized into stmt VS1:
118
119 VS1: vb = px[i];
120 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
121 S2: a = b;
122
123 To vectorize stmt S2, the vectorizer first finds the stmt that defines
124 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
125 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
126 resulting sequence would be:
127
128 VS1: vb = px[i];
129 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
130 VS2: va = vb;
131 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
132
133 Operands that are not SSA_NAMEs, are data-refs that appear in
134 load/store operations (like 'x[i]' in S1), and are handled differently.
135
136 Target modeling:
137 =================
138 Currently the only target specific information that is used is the
139 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
140 Targets that can support different sizes of vectors, for now will need
141 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
142 flexibility will be added in the future.
143
144 Since we only vectorize operations which vector form can be
145 expressed using existing tree codes, to verify that an operation is
146 supported, the vectorizer checks the relevant optab at the relevant
147 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
148 the value found is CODE_FOR_nothing, then there's no target support, and
149 we can't vectorize the stmt.
150
151 For additional information on this project see:
152 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
153 */
154
155 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
156 static stmt_vec_info vect_is_simple_reduction (loop_vec_info, stmt_vec_info,
157 bool *, bool *);
158
159 /* Subroutine of vect_determine_vf_for_stmt that handles only one
160 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
161 may already be set for general statements (not just data refs). */
162
163 static opt_result
164 vect_determine_vf_for_stmt_1 (vec_info *vinfo, stmt_vec_info stmt_info,
165 bool vectype_maybe_set_p,
166 poly_uint64 *vf)
167 {
168 gimple *stmt = stmt_info->stmt;
169
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
173 {
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return opt_result::success ();
177 }
178
179 tree stmt_vectype, nunits_vectype;
180 opt_result res = vect_get_vector_types_for_stmt (vinfo, stmt_info,
181 &stmt_vectype,
182 &nunits_vectype);
183 if (!res)
184 return res;
185
186 if (stmt_vectype)
187 {
188 if (STMT_VINFO_VECTYPE (stmt_info))
189 /* The only case when a vectype had been already set is for stmts
190 that contain a data ref, or for "pattern-stmts" (stmts generated
191 by the vectorizer to represent/replace a certain idiom). */
192 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
193 || vectype_maybe_set_p)
194 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
197 }
198
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
201
202 return opt_result::success ();
203 }
204
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. Return true on success
208 or false if something prevented vectorization. */
209
210 static opt_result
211 vect_determine_vf_for_stmt (vec_info *vinfo,
212 stmt_vec_info stmt_info, poly_uint64 *vf)
213 {
214 if (dump_enabled_p ())
215 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
216 stmt_info->stmt);
217 opt_result res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, false, vf);
218 if (!res)
219 return res;
220
221 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
222 && STMT_VINFO_RELATED_STMT (stmt_info))
223 {
224 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
225 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
226
227 /* If a pattern statement has def stmts, analyze them too. */
228 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
229 !gsi_end_p (si); gsi_next (&si))
230 {
231 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
232 if (dump_enabled_p ())
233 dump_printf_loc (MSG_NOTE, vect_location,
234 "==> examining pattern def stmt: %G",
235 def_stmt_info->stmt);
236 res = vect_determine_vf_for_stmt_1 (vinfo, def_stmt_info, true, vf);
237 if (!res)
238 return res;
239 }
240
241 if (dump_enabled_p ())
242 dump_printf_loc (MSG_NOTE, vect_location,
243 "==> examining pattern statement: %G",
244 stmt_info->stmt);
245 res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, true, vf);
246 if (!res)
247 return res;
248 }
249
250 return opt_result::success ();
251 }
252
253 /* Function vect_determine_vectorization_factor
254
255 Determine the vectorization factor (VF). VF is the number of data elements
256 that are operated upon in parallel in a single iteration of the vectorized
257 loop. For example, when vectorizing a loop that operates on 4byte elements,
258 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
259 elements can fit in a single vector register.
260
261 We currently support vectorization of loops in which all types operated upon
262 are of the same size. Therefore this function currently sets VF according to
263 the size of the types operated upon, and fails if there are multiple sizes
264 in the loop.
265
266 VF is also the factor by which the loop iterations are strip-mined, e.g.:
267 original loop:
268 for (i=0; i<N; i++){
269 a[i] = b[i] + c[i];
270 }
271
272 vectorized loop:
273 for (i=0; i<N; i+=VF){
274 a[i:VF] = b[i:VF] + c[i:VF];
275 }
276 */
277
278 static opt_result
279 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
280 {
281 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
282 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
283 unsigned nbbs = loop->num_nodes;
284 poly_uint64 vectorization_factor = 1;
285 tree scalar_type = NULL_TREE;
286 gphi *phi;
287 tree vectype;
288 stmt_vec_info stmt_info;
289 unsigned i;
290
291 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
292
293 for (i = 0; i < nbbs; i++)
294 {
295 basic_block bb = bbs[i];
296
297 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
298 gsi_next (&si))
299 {
300 phi = si.phi ();
301 stmt_info = loop_vinfo->lookup_stmt (phi);
302 if (dump_enabled_p ())
303 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: %G",
304 phi);
305
306 gcc_assert (stmt_info);
307
308 if (STMT_VINFO_RELEVANT_P (stmt_info)
309 || STMT_VINFO_LIVE_P (stmt_info))
310 {
311 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
312 scalar_type = TREE_TYPE (PHI_RESULT (phi));
313
314 if (dump_enabled_p ())
315 dump_printf_loc (MSG_NOTE, vect_location,
316 "get vectype for scalar type: %T\n",
317 scalar_type);
318
319 vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type);
320 if (!vectype)
321 return opt_result::failure_at (phi,
322 "not vectorized: unsupported "
323 "data-type %T\n",
324 scalar_type);
325 STMT_VINFO_VECTYPE (stmt_info) = vectype;
326
327 if (dump_enabled_p ())
328 dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n",
329 vectype);
330
331 if (dump_enabled_p ())
332 {
333 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
334 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
335 dump_printf (MSG_NOTE, "\n");
336 }
337
338 vect_update_max_nunits (&vectorization_factor, vectype);
339 }
340 }
341
342 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
343 gsi_next (&si))
344 {
345 if (is_gimple_debug (gsi_stmt (si)))
346 continue;
347 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
348 opt_result res
349 = vect_determine_vf_for_stmt (loop_vinfo,
350 stmt_info, &vectorization_factor);
351 if (!res)
352 return res;
353 }
354 }
355
356 /* TODO: Analyze cost. Decide if worth while to vectorize. */
357 if (dump_enabled_p ())
358 {
359 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
360 dump_dec (MSG_NOTE, vectorization_factor);
361 dump_printf (MSG_NOTE, "\n");
362 }
363
364 if (known_le (vectorization_factor, 1U))
365 return opt_result::failure_at (vect_location,
366 "not vectorized: unsupported data-type\n");
367 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
368 return opt_result::success ();
369 }
370
371
372 /* Function vect_is_simple_iv_evolution.
373
374 FORNOW: A simple evolution of an induction variables in the loop is
375 considered a polynomial evolution. */
376
377 static bool
378 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
379 tree * step)
380 {
381 tree init_expr;
382 tree step_expr;
383 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
384 basic_block bb;
385
386 /* When there is no evolution in this loop, the evolution function
387 is not "simple". */
388 if (evolution_part == NULL_TREE)
389 return false;
390
391 /* When the evolution is a polynomial of degree >= 2
392 the evolution function is not "simple". */
393 if (tree_is_chrec (evolution_part))
394 return false;
395
396 step_expr = evolution_part;
397 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
398
399 if (dump_enabled_p ())
400 dump_printf_loc (MSG_NOTE, vect_location, "step: %T, init: %T\n",
401 step_expr, init_expr);
402
403 *init = init_expr;
404 *step = step_expr;
405
406 if (TREE_CODE (step_expr) != INTEGER_CST
407 && (TREE_CODE (step_expr) != SSA_NAME
408 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
409 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
410 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
411 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
412 || !flag_associative_math)))
413 && (TREE_CODE (step_expr) != REAL_CST
414 || !flag_associative_math))
415 {
416 if (dump_enabled_p ())
417 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
418 "step unknown.\n");
419 return false;
420 }
421
422 return true;
423 }
424
425 /* Return true if PHI, described by STMT_INFO, is the inner PHI in
426 what we are assuming is a double reduction. For example, given
427 a structure like this:
428
429 outer1:
430 x_1 = PHI <x_4(outer2), ...>;
431 ...
432
433 inner:
434 x_2 = PHI <x_1(outer1), ...>;
435 ...
436 x_3 = ...;
437 ...
438
439 outer2:
440 x_4 = PHI <x_3(inner)>;
441 ...
442
443 outer loop analysis would treat x_1 as a double reduction phi and
444 this function would then return true for x_2. */
445
446 static bool
447 vect_inner_phi_in_double_reduction_p (loop_vec_info loop_vinfo, gphi *phi)
448 {
449 use_operand_p use_p;
450 ssa_op_iter op_iter;
451 FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE)
452 if (stmt_vec_info def_info = loop_vinfo->lookup_def (USE_FROM_PTR (use_p)))
453 if (STMT_VINFO_DEF_TYPE (def_info) == vect_double_reduction_def)
454 return true;
455 return false;
456 }
457
458 /* Function vect_analyze_scalar_cycles_1.
459
460 Examine the cross iteration def-use cycles of scalar variables
461 in LOOP. LOOP_VINFO represents the loop that is now being
462 considered for vectorization (can be LOOP, or an outer-loop
463 enclosing LOOP). */
464
465 static void
466 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop)
467 {
468 basic_block bb = loop->header;
469 tree init, step;
470 auto_vec<stmt_vec_info, 64> worklist;
471 gphi_iterator gsi;
472 bool double_reduc, reduc_chain;
473
474 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
475
476 /* First - identify all inductions. Reduction detection assumes that all the
477 inductions have been identified, therefore, this order must not be
478 changed. */
479 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
480 {
481 gphi *phi = gsi.phi ();
482 tree access_fn = NULL;
483 tree def = PHI_RESULT (phi);
484 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
485
486 if (dump_enabled_p ())
487 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
488
489 /* Skip virtual phi's. The data dependences that are associated with
490 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
491 if (virtual_operand_p (def))
492 continue;
493
494 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
495
496 /* Analyze the evolution function. */
497 access_fn = analyze_scalar_evolution (loop, def);
498 if (access_fn)
499 {
500 STRIP_NOPS (access_fn);
501 if (dump_enabled_p ())
502 dump_printf_loc (MSG_NOTE, vect_location,
503 "Access function of PHI: %T\n", access_fn);
504 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
505 = initial_condition_in_loop_num (access_fn, loop->num);
506 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
507 = evolution_part_in_loop_num (access_fn, loop->num);
508 }
509
510 if (!access_fn
511 || vect_inner_phi_in_double_reduction_p (loop_vinfo, phi)
512 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
513 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
514 && TREE_CODE (step) != INTEGER_CST))
515 {
516 worklist.safe_push (stmt_vinfo);
517 continue;
518 }
519
520 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
521 != NULL_TREE);
522 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
523
524 if (dump_enabled_p ())
525 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
526 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
527 }
528
529
530 /* Second - identify all reductions and nested cycles. */
531 while (worklist.length () > 0)
532 {
533 stmt_vec_info stmt_vinfo = worklist.pop ();
534 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
535 tree def = PHI_RESULT (phi);
536
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
539
540 gcc_assert (!virtual_operand_p (def)
541 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
542
543 stmt_vec_info reduc_stmt_info
544 = vect_is_simple_reduction (loop_vinfo, stmt_vinfo, &double_reduc,
545 &reduc_chain);
546 if (reduc_stmt_info)
547 {
548 STMT_VINFO_REDUC_DEF (stmt_vinfo) = reduc_stmt_info;
549 STMT_VINFO_REDUC_DEF (reduc_stmt_info) = stmt_vinfo;
550 if (double_reduc)
551 {
552 if (dump_enabled_p ())
553 dump_printf_loc (MSG_NOTE, vect_location,
554 "Detected double reduction.\n");
555
556 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
557 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_double_reduction_def;
558 }
559 else
560 {
561 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
562 {
563 if (dump_enabled_p ())
564 dump_printf_loc (MSG_NOTE, vect_location,
565 "Detected vectorizable nested cycle.\n");
566
567 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
568 }
569 else
570 {
571 if (dump_enabled_p ())
572 dump_printf_loc (MSG_NOTE, vect_location,
573 "Detected reduction.\n");
574
575 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
576 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
577 /* Store the reduction cycles for possible vectorization in
578 loop-aware SLP if it was not detected as reduction
579 chain. */
580 if (! reduc_chain)
581 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
582 (reduc_stmt_info);
583 }
584 }
585 }
586 else
587 if (dump_enabled_p ())
588 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
589 "Unknown def-use cycle pattern.\n");
590 }
591 }
592
593
594 /* Function vect_analyze_scalar_cycles.
595
596 Examine the cross iteration def-use cycles of scalar variables, by
597 analyzing the loop-header PHIs of scalar variables. Classify each
598 cycle as one of the following: invariant, induction, reduction, unknown.
599 We do that for the loop represented by LOOP_VINFO, and also to its
600 inner-loop, if exists.
601 Examples for scalar cycles:
602
603 Example1: reduction:
604
605 loop1:
606 for (i=0; i<N; i++)
607 sum += a[i];
608
609 Example2: induction:
610
611 loop2:
612 for (i=0; i<N; i++)
613 a[i] = i; */
614
615 static void
616 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
617 {
618 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
619
620 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
621
622 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
623 Reductions in such inner-loop therefore have different properties than
624 the reductions in the nest that gets vectorized:
625 1. When vectorized, they are executed in the same order as in the original
626 scalar loop, so we can't change the order of computation when
627 vectorizing them.
628 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
629 current checks are too strict. */
630
631 if (loop->inner)
632 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
633 }
634
635 /* Transfer group and reduction information from STMT_INFO to its
636 pattern stmt. */
637
638 static void
639 vect_fixup_reduc_chain (stmt_vec_info stmt_info)
640 {
641 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
642 stmt_vec_info stmtp;
643 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
644 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
645 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
646 do
647 {
648 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
649 gcc_checking_assert (STMT_VINFO_DEF_TYPE (stmtp)
650 == STMT_VINFO_DEF_TYPE (stmt_info));
651 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
652 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
653 if (stmt_info)
654 REDUC_GROUP_NEXT_ELEMENT (stmtp)
655 = STMT_VINFO_RELATED_STMT (stmt_info);
656 }
657 while (stmt_info);
658 }
659
660 /* Fixup scalar cycles that now have their stmts detected as patterns. */
661
662 static void
663 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
664 {
665 stmt_vec_info first;
666 unsigned i;
667
668 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
669 if (STMT_VINFO_IN_PATTERN_P (first))
670 {
671 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
672 while (next)
673 {
674 if (! STMT_VINFO_IN_PATTERN_P (next)
675 || STMT_VINFO_REDUC_IDX (STMT_VINFO_RELATED_STMT (next)) == -1)
676 break;
677 next = REDUC_GROUP_NEXT_ELEMENT (next);
678 }
679 /* If not all stmt in the chain are patterns or if we failed
680 to update STMT_VINFO_REDUC_IDX try to handle the chain
681 without patterns. */
682 if (! next
683 && STMT_VINFO_REDUC_IDX (STMT_VINFO_RELATED_STMT (first)) != -1)
684 {
685 vect_fixup_reduc_chain (first);
686 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
687 = STMT_VINFO_RELATED_STMT (first);
688 }
689 }
690 }
691
692 /* Function vect_get_loop_niters.
693
694 Determine how many iterations the loop is executed and place it
695 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
696 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
697 niter information holds in ASSUMPTIONS.
698
699 Return the loop exit condition. */
700
701
702 static gcond *
703 vect_get_loop_niters (class loop *loop, tree *assumptions,
704 tree *number_of_iterations, tree *number_of_iterationsm1)
705 {
706 edge exit = single_exit (loop);
707 class tree_niter_desc niter_desc;
708 tree niter_assumptions, niter, may_be_zero;
709 gcond *cond = get_loop_exit_condition (loop);
710
711 *assumptions = boolean_true_node;
712 *number_of_iterationsm1 = chrec_dont_know;
713 *number_of_iterations = chrec_dont_know;
714 DUMP_VECT_SCOPE ("get_loop_niters");
715
716 if (!exit)
717 return cond;
718
719 may_be_zero = NULL_TREE;
720 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
721 || chrec_contains_undetermined (niter_desc.niter))
722 return cond;
723
724 niter_assumptions = niter_desc.assumptions;
725 may_be_zero = niter_desc.may_be_zero;
726 niter = niter_desc.niter;
727
728 if (may_be_zero && integer_zerop (may_be_zero))
729 may_be_zero = NULL_TREE;
730
731 if (may_be_zero)
732 {
733 if (COMPARISON_CLASS_P (may_be_zero))
734 {
735 /* Try to combine may_be_zero with assumptions, this can simplify
736 computation of niter expression. */
737 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
738 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
739 niter_assumptions,
740 fold_build1 (TRUTH_NOT_EXPR,
741 boolean_type_node,
742 may_be_zero));
743 else
744 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
745 build_int_cst (TREE_TYPE (niter), 0),
746 rewrite_to_non_trapping_overflow (niter));
747
748 may_be_zero = NULL_TREE;
749 }
750 else if (integer_nonzerop (may_be_zero))
751 {
752 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
753 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
754 return cond;
755 }
756 else
757 return cond;
758 }
759
760 *assumptions = niter_assumptions;
761 *number_of_iterationsm1 = niter;
762
763 /* We want the number of loop header executions which is the number
764 of latch executions plus one.
765 ??? For UINT_MAX latch executions this number overflows to zero
766 for loops like do { n++; } while (n != 0); */
767 if (niter && !chrec_contains_undetermined (niter))
768 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
769 build_int_cst (TREE_TYPE (niter), 1));
770 *number_of_iterations = niter;
771
772 return cond;
773 }
774
775 /* Function bb_in_loop_p
776
777 Used as predicate for dfs order traversal of the loop bbs. */
778
779 static bool
780 bb_in_loop_p (const_basic_block bb, const void *data)
781 {
782 const class loop *const loop = (const class loop *)data;
783 if (flow_bb_inside_loop_p (loop, bb))
784 return true;
785 return false;
786 }
787
788
789 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
790 stmt_vec_info structs for all the stmts in LOOP_IN. */
791
792 _loop_vec_info::_loop_vec_info (class loop *loop_in, vec_info_shared *shared)
793 : vec_info (vec_info::loop, init_cost (loop_in), shared),
794 loop (loop_in),
795 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
796 num_itersm1 (NULL_TREE),
797 num_iters (NULL_TREE),
798 num_iters_unchanged (NULL_TREE),
799 num_iters_assumptions (NULL_TREE),
800 th (0),
801 versioning_threshold (0),
802 vectorization_factor (0),
803 max_vectorization_factor (0),
804 mask_skip_niters (NULL_TREE),
805 rgroup_compare_type (NULL_TREE),
806 simd_if_cond (NULL_TREE),
807 unaligned_dr (NULL),
808 peeling_for_alignment (0),
809 ptr_mask (0),
810 ivexpr_map (NULL),
811 scan_map (NULL),
812 slp_unrolling_factor (1),
813 single_scalar_iteration_cost (0),
814 vec_outside_cost (0),
815 vec_inside_cost (0),
816 vectorizable (false),
817 can_use_partial_vectors_p (true),
818 using_partial_vectors_p (false),
819 epil_using_partial_vectors_p (false),
820 peeling_for_gaps (false),
821 peeling_for_niter (false),
822 no_data_dependencies (false),
823 has_mask_store (false),
824 scalar_loop_scaling (profile_probability::uninitialized ()),
825 scalar_loop (NULL),
826 orig_loop_info (NULL)
827 {
828 /* CHECKME: We want to visit all BBs before their successors (except for
829 latch blocks, for which this assertion wouldn't hold). In the simple
830 case of the loop forms we allow, a dfs order of the BBs would the same
831 as reversed postorder traversal, so we are safe. */
832
833 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
834 bbs, loop->num_nodes, loop);
835 gcc_assert (nbbs == loop->num_nodes);
836
837 for (unsigned int i = 0; i < nbbs; i++)
838 {
839 basic_block bb = bbs[i];
840 gimple_stmt_iterator si;
841
842 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
843 {
844 gimple *phi = gsi_stmt (si);
845 gimple_set_uid (phi, 0);
846 add_stmt (phi);
847 }
848
849 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
850 {
851 gimple *stmt = gsi_stmt (si);
852 gimple_set_uid (stmt, 0);
853 if (is_gimple_debug (stmt))
854 continue;
855 add_stmt (stmt);
856 /* If .GOMP_SIMD_LANE call for the current loop has 3 arguments, the
857 third argument is the #pragma omp simd if (x) condition, when 0,
858 loop shouldn't be vectorized, when non-zero constant, it should
859 be vectorized normally, otherwise versioned with vectorized loop
860 done if the condition is non-zero at runtime. */
861 if (loop_in->simduid
862 && is_gimple_call (stmt)
863 && gimple_call_internal_p (stmt)
864 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
865 && gimple_call_num_args (stmt) >= 3
866 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
867 && (loop_in->simduid
868 == SSA_NAME_VAR (gimple_call_arg (stmt, 0))))
869 {
870 tree arg = gimple_call_arg (stmt, 2);
871 if (integer_zerop (arg) || TREE_CODE (arg) == SSA_NAME)
872 simd_if_cond = arg;
873 else
874 gcc_assert (integer_nonzerop (arg));
875 }
876 }
877 }
878
879 epilogue_vinfos.create (6);
880 }
881
882 /* Free all levels of rgroup CONTROLS. */
883
884 void
885 release_vec_loop_controls (vec<rgroup_controls> *controls)
886 {
887 rgroup_controls *rgc;
888 unsigned int i;
889 FOR_EACH_VEC_ELT (*controls, i, rgc)
890 rgc->controls.release ();
891 controls->release ();
892 }
893
894 /* Free all memory used by the _loop_vec_info, as well as all the
895 stmt_vec_info structs of all the stmts in the loop. */
896
897 _loop_vec_info::~_loop_vec_info ()
898 {
899 free (bbs);
900
901 release_vec_loop_controls (&masks);
902 release_vec_loop_controls (&lens);
903 delete ivexpr_map;
904 delete scan_map;
905 epilogue_vinfos.release ();
906
907 loop->aux = NULL;
908 }
909
910 /* Return an invariant or register for EXPR and emit necessary
911 computations in the LOOP_VINFO loop preheader. */
912
913 tree
914 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
915 {
916 if (is_gimple_reg (expr)
917 || is_gimple_min_invariant (expr))
918 return expr;
919
920 if (! loop_vinfo->ivexpr_map)
921 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
922 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
923 if (! cached)
924 {
925 gimple_seq stmts = NULL;
926 cached = force_gimple_operand (unshare_expr (expr),
927 &stmts, true, NULL_TREE);
928 if (stmts)
929 {
930 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
931 gsi_insert_seq_on_edge_immediate (e, stmts);
932 }
933 }
934 return cached;
935 }
936
937 /* Return true if we can use CMP_TYPE as the comparison type to produce
938 all masks required to mask LOOP_VINFO. */
939
940 static bool
941 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
942 {
943 rgroup_controls *rgm;
944 unsigned int i;
945 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
946 if (rgm->type != NULL_TREE
947 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
948 cmp_type, rgm->type,
949 OPTIMIZE_FOR_SPEED))
950 return false;
951 return true;
952 }
953
954 /* Calculate the maximum number of scalars per iteration for every
955 rgroup in LOOP_VINFO. */
956
957 static unsigned int
958 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
959 {
960 unsigned int res = 1;
961 unsigned int i;
962 rgroup_controls *rgm;
963 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
964 res = MAX (res, rgm->max_nscalars_per_iter);
965 return res;
966 }
967
968 /* Calculate the minimum precision necessary to represent:
969
970 MAX_NITERS * FACTOR
971
972 as an unsigned integer, where MAX_NITERS is the maximum number of
973 loop header iterations for the original scalar form of LOOP_VINFO. */
974
975 static unsigned
976 vect_min_prec_for_max_niters (loop_vec_info loop_vinfo, unsigned int factor)
977 {
978 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
979
980 /* Get the maximum number of iterations that is representable
981 in the counter type. */
982 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
983 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
984
985 /* Get a more refined estimate for the number of iterations. */
986 widest_int max_back_edges;
987 if (max_loop_iterations (loop, &max_back_edges))
988 max_ni = wi::smin (max_ni, max_back_edges + 1);
989
990 /* Work out how many bits we need to represent the limit. */
991 return wi::min_precision (max_ni * factor, UNSIGNED);
992 }
993
994 /* Each statement in LOOP_VINFO can be masked where necessary. Check
995 whether we can actually generate the masks required. Return true if so,
996 storing the type of the scalar IV in LOOP_VINFO_RGROUP_COMPARE_TYPE. */
997
998 static bool
999 vect_verify_full_masking (loop_vec_info loop_vinfo)
1000 {
1001 unsigned int min_ni_width;
1002 unsigned int max_nscalars_per_iter
1003 = vect_get_max_nscalars_per_iter (loop_vinfo);
1004
1005 /* Use a normal loop if there are no statements that need masking.
1006 This only happens in rare degenerate cases: it means that the loop
1007 has no loads, no stores, and no live-out values. */
1008 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1009 return false;
1010
1011 /* Work out how many bits we need to represent the limit. */
1012 min_ni_width
1013 = vect_min_prec_for_max_niters (loop_vinfo, max_nscalars_per_iter);
1014
1015 /* Find a scalar mode for which WHILE_ULT is supported. */
1016 opt_scalar_int_mode cmp_mode_iter;
1017 tree cmp_type = NULL_TREE;
1018 tree iv_type = NULL_TREE;
1019 widest_int iv_limit = vect_iv_limit_for_partial_vectors (loop_vinfo);
1020 unsigned int iv_precision = UINT_MAX;
1021
1022 if (iv_limit != -1)
1023 iv_precision = wi::min_precision (iv_limit * max_nscalars_per_iter,
1024 UNSIGNED);
1025
1026 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1027 {
1028 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1029 if (cmp_bits >= min_ni_width
1030 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1031 {
1032 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1033 if (this_type
1034 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1035 {
1036 /* Although we could stop as soon as we find a valid mode,
1037 there are at least two reasons why that's not always the
1038 best choice:
1039
1040 - An IV that's Pmode or wider is more likely to be reusable
1041 in address calculations than an IV that's narrower than
1042 Pmode.
1043
1044 - Doing the comparison in IV_PRECISION or wider allows
1045 a natural 0-based IV, whereas using a narrower comparison
1046 type requires mitigations against wrap-around.
1047
1048 Conversely, if the IV limit is variable, doing the comparison
1049 in a wider type than the original type can introduce
1050 unnecessary extensions, so picking the widest valid mode
1051 is not always a good choice either.
1052
1053 Here we prefer the first IV type that's Pmode or wider,
1054 and the first comparison type that's IV_PRECISION or wider.
1055 (The comparison type must be no wider than the IV type,
1056 to avoid extensions in the vector loop.)
1057
1058 ??? We might want to try continuing beyond Pmode for ILP32
1059 targets if CMP_BITS < IV_PRECISION. */
1060 iv_type = this_type;
1061 if (!cmp_type || iv_precision > TYPE_PRECISION (cmp_type))
1062 cmp_type = this_type;
1063 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1064 break;
1065 }
1066 }
1067 }
1068
1069 if (!cmp_type)
1070 return false;
1071
1072 LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo) = cmp_type;
1073 LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo) = iv_type;
1074 return true;
1075 }
1076
1077 /* Check whether we can use vector access with length based on precison
1078 comparison. So far, to keep it simple, we only allow the case that the
1079 precision of the target supported length is larger than the precision
1080 required by loop niters. */
1081
1082 static bool
1083 vect_verify_loop_lens (loop_vec_info loop_vinfo)
1084 {
1085 if (LOOP_VINFO_LENS (loop_vinfo).is_empty ())
1086 return false;
1087
1088 unsigned int max_nitems_per_iter = 1;
1089 unsigned int i;
1090 rgroup_controls *rgl;
1091 /* Find the maximum number of items per iteration for every rgroup. */
1092 FOR_EACH_VEC_ELT (LOOP_VINFO_LENS (loop_vinfo), i, rgl)
1093 {
1094 unsigned nitems_per_iter = rgl->max_nscalars_per_iter * rgl->factor;
1095 max_nitems_per_iter = MAX (max_nitems_per_iter, nitems_per_iter);
1096 }
1097
1098 /* Work out how many bits we need to represent the length limit. */
1099 unsigned int min_ni_prec
1100 = vect_min_prec_for_max_niters (loop_vinfo, max_nitems_per_iter);
1101
1102 /* Now use the maximum of below precisions for one suitable IV type:
1103 - the IV's natural precision
1104 - the precision needed to hold: the maximum number of scalar
1105 iterations multiplied by the scale factor (min_ni_prec above)
1106 - the Pmode precision
1107
1108 If min_ni_prec is less than the precision of the current niters,
1109 we perfer to still use the niters type. Prefer to use Pmode and
1110 wider IV to avoid narrow conversions. */
1111
1112 unsigned int ni_prec
1113 = TYPE_PRECISION (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)));
1114 min_ni_prec = MAX (min_ni_prec, ni_prec);
1115 min_ni_prec = MAX (min_ni_prec, GET_MODE_BITSIZE (Pmode));
1116
1117 tree iv_type = NULL_TREE;
1118 opt_scalar_int_mode tmode_iter;
1119 FOR_EACH_MODE_IN_CLASS (tmode_iter, MODE_INT)
1120 {
1121 scalar_mode tmode = tmode_iter.require ();
1122 unsigned int tbits = GET_MODE_BITSIZE (tmode);
1123
1124 /* ??? Do we really want to construct one IV whose precision exceeds
1125 BITS_PER_WORD? */
1126 if (tbits > BITS_PER_WORD)
1127 break;
1128
1129 /* Find the first available standard integral type. */
1130 if (tbits >= min_ni_prec && targetm.scalar_mode_supported_p (tmode))
1131 {
1132 iv_type = build_nonstandard_integer_type (tbits, true);
1133 break;
1134 }
1135 }
1136
1137 if (!iv_type)
1138 {
1139 if (dump_enabled_p ())
1140 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1141 "can't vectorize with length-based partial vectors"
1142 " because there is no suitable iv type.\n");
1143 return false;
1144 }
1145
1146 LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo) = iv_type;
1147 LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo) = iv_type;
1148
1149 return true;
1150 }
1151
1152 /* Calculate the cost of one scalar iteration of the loop. */
1153 static void
1154 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1155 {
1156 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1157 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1158 int nbbs = loop->num_nodes, factor;
1159 int innerloop_iters, i;
1160
1161 DUMP_VECT_SCOPE ("vect_compute_single_scalar_iteration_cost");
1162
1163 /* Gather costs for statements in the scalar loop. */
1164
1165 /* FORNOW. */
1166 innerloop_iters = 1;
1167 if (loop->inner)
1168 innerloop_iters = 50; /* FIXME */
1169
1170 for (i = 0; i < nbbs; i++)
1171 {
1172 gimple_stmt_iterator si;
1173 basic_block bb = bbs[i];
1174
1175 if (bb->loop_father == loop->inner)
1176 factor = innerloop_iters;
1177 else
1178 factor = 1;
1179
1180 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1181 {
1182 gimple *stmt = gsi_stmt (si);
1183 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1184
1185 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1186 continue;
1187
1188 /* Skip stmts that are not vectorized inside the loop. */
1189 stmt_vec_info vstmt_info = vect_stmt_to_vectorize (stmt_info);
1190 if (!STMT_VINFO_RELEVANT_P (vstmt_info)
1191 && (!STMT_VINFO_LIVE_P (vstmt_info)
1192 || !VECTORIZABLE_CYCLE_DEF
1193 (STMT_VINFO_DEF_TYPE (vstmt_info))))
1194 continue;
1195
1196 vect_cost_for_stmt kind;
1197 if (STMT_VINFO_DATA_REF (stmt_info))
1198 {
1199 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1200 kind = scalar_load;
1201 else
1202 kind = scalar_store;
1203 }
1204 else if (vect_nop_conversion_p (stmt_info))
1205 continue;
1206 else
1207 kind = scalar_stmt;
1208
1209 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1210 factor, kind, stmt_info, 0, vect_prologue);
1211 }
1212 }
1213
1214 /* Now accumulate cost. */
1215 void *target_cost_data = init_cost (loop);
1216 stmt_info_for_cost *si;
1217 int j;
1218 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1219 j, si)
1220 (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count,
1221 si->kind, si->stmt_info, si->vectype,
1222 si->misalign, vect_body);
1223 unsigned dummy, body_cost = 0;
1224 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1225 destroy_cost_data (target_cost_data);
1226 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1227 }
1228
1229
1230 /* Function vect_analyze_loop_form_1.
1231
1232 Verify that certain CFG restrictions hold, including:
1233 - the loop has a pre-header
1234 - the loop has a single entry and exit
1235 - the loop exit condition is simple enough
1236 - the number of iterations can be analyzed, i.e, a countable loop. The
1237 niter could be analyzed under some assumptions. */
1238
1239 opt_result
1240 vect_analyze_loop_form_1 (class loop *loop, gcond **loop_cond,
1241 tree *assumptions, tree *number_of_iterationsm1,
1242 tree *number_of_iterations, gcond **inner_loop_cond)
1243 {
1244 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1245
1246 /* Different restrictions apply when we are considering an inner-most loop,
1247 vs. an outer (nested) loop.
1248 (FORNOW. May want to relax some of these restrictions in the future). */
1249
1250 if (!loop->inner)
1251 {
1252 /* Inner-most loop. We currently require that the number of BBs is
1253 exactly 2 (the header and latch). Vectorizable inner-most loops
1254 look like this:
1255
1256 (pre-header)
1257 |
1258 header <--------+
1259 | | |
1260 | +--> latch --+
1261 |
1262 (exit-bb) */
1263
1264 if (loop->num_nodes != 2)
1265 return opt_result::failure_at (vect_location,
1266 "not vectorized:"
1267 " control flow in loop.\n");
1268
1269 if (empty_block_p (loop->header))
1270 return opt_result::failure_at (vect_location,
1271 "not vectorized: empty loop.\n");
1272 }
1273 else
1274 {
1275 class loop *innerloop = loop->inner;
1276 edge entryedge;
1277
1278 /* Nested loop. We currently require that the loop is doubly-nested,
1279 contains a single inner loop, and the number of BBs is exactly 5.
1280 Vectorizable outer-loops look like this:
1281
1282 (pre-header)
1283 |
1284 header <---+
1285 | |
1286 inner-loop |
1287 | |
1288 tail ------+
1289 |
1290 (exit-bb)
1291
1292 The inner-loop has the properties expected of inner-most loops
1293 as described above. */
1294
1295 if ((loop->inner)->inner || (loop->inner)->next)
1296 return opt_result::failure_at (vect_location,
1297 "not vectorized:"
1298 " multiple nested loops.\n");
1299
1300 if (loop->num_nodes != 5)
1301 return opt_result::failure_at (vect_location,
1302 "not vectorized:"
1303 " control flow in loop.\n");
1304
1305 entryedge = loop_preheader_edge (innerloop);
1306 if (entryedge->src != loop->header
1307 || !single_exit (innerloop)
1308 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1309 return opt_result::failure_at (vect_location,
1310 "not vectorized:"
1311 " unsupported outerloop form.\n");
1312
1313 /* Analyze the inner-loop. */
1314 tree inner_niterm1, inner_niter, inner_assumptions;
1315 opt_result res
1316 = vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1317 &inner_assumptions, &inner_niterm1,
1318 &inner_niter, NULL);
1319 if (!res)
1320 {
1321 if (dump_enabled_p ())
1322 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1323 "not vectorized: Bad inner loop.\n");
1324 return res;
1325 }
1326
1327 /* Don't support analyzing niter under assumptions for inner
1328 loop. */
1329 if (!integer_onep (inner_assumptions))
1330 return opt_result::failure_at (vect_location,
1331 "not vectorized: Bad inner loop.\n");
1332
1333 if (!expr_invariant_in_loop_p (loop, inner_niter))
1334 return opt_result::failure_at (vect_location,
1335 "not vectorized: inner-loop count not"
1336 " invariant.\n");
1337
1338 if (dump_enabled_p ())
1339 dump_printf_loc (MSG_NOTE, vect_location,
1340 "Considering outer-loop vectorization.\n");
1341 }
1342
1343 if (!single_exit (loop))
1344 return opt_result::failure_at (vect_location,
1345 "not vectorized: multiple exits.\n");
1346 if (EDGE_COUNT (loop->header->preds) != 2)
1347 return opt_result::failure_at (vect_location,
1348 "not vectorized:"
1349 " too many incoming edges.\n");
1350
1351 /* We assume that the loop exit condition is at the end of the loop. i.e,
1352 that the loop is represented as a do-while (with a proper if-guard
1353 before the loop if needed), where the loop header contains all the
1354 executable statements, and the latch is empty. */
1355 if (!empty_block_p (loop->latch)
1356 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1357 return opt_result::failure_at (vect_location,
1358 "not vectorized: latch block not empty.\n");
1359
1360 /* Make sure the exit is not abnormal. */
1361 edge e = single_exit (loop);
1362 if (e->flags & EDGE_ABNORMAL)
1363 return opt_result::failure_at (vect_location,
1364 "not vectorized:"
1365 " abnormal loop exit edge.\n");
1366
1367 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1368 number_of_iterationsm1);
1369 if (!*loop_cond)
1370 return opt_result::failure_at
1371 (vect_location,
1372 "not vectorized: complicated exit condition.\n");
1373
1374 if (integer_zerop (*assumptions)
1375 || !*number_of_iterations
1376 || chrec_contains_undetermined (*number_of_iterations))
1377 return opt_result::failure_at
1378 (*loop_cond,
1379 "not vectorized: number of iterations cannot be computed.\n");
1380
1381 if (integer_zerop (*number_of_iterations))
1382 return opt_result::failure_at
1383 (*loop_cond,
1384 "not vectorized: number of iterations = 0.\n");
1385
1386 return opt_result::success ();
1387 }
1388
1389 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1390
1391 opt_loop_vec_info
1392 vect_analyze_loop_form (class loop *loop, vec_info_shared *shared)
1393 {
1394 tree assumptions, number_of_iterations, number_of_iterationsm1;
1395 gcond *loop_cond, *inner_loop_cond = NULL;
1396
1397 opt_result res
1398 = vect_analyze_loop_form_1 (loop, &loop_cond,
1399 &assumptions, &number_of_iterationsm1,
1400 &number_of_iterations, &inner_loop_cond);
1401 if (!res)
1402 return opt_loop_vec_info::propagate_failure (res);
1403
1404 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1405 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1406 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1407 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1408 if (!integer_onep (assumptions))
1409 {
1410 /* We consider to vectorize this loop by versioning it under
1411 some assumptions. In order to do this, we need to clear
1412 existing information computed by scev and niter analyzer. */
1413 scev_reset_htab ();
1414 free_numbers_of_iterations_estimates (loop);
1415 /* Also set flag for this loop so that following scev and niter
1416 analysis are done under the assumptions. */
1417 loop_constraint_set (loop, LOOP_C_FINITE);
1418 /* Also record the assumptions for versioning. */
1419 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1420 }
1421
1422 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1423 {
1424 if (dump_enabled_p ())
1425 {
1426 dump_printf_loc (MSG_NOTE, vect_location,
1427 "Symbolic number of iterations is ");
1428 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1429 dump_printf (MSG_NOTE, "\n");
1430 }
1431 }
1432
1433 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1434 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1435 if (inner_loop_cond)
1436 {
1437 stmt_vec_info inner_loop_cond_info
1438 = loop_vinfo->lookup_stmt (inner_loop_cond);
1439 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1440 }
1441
1442 gcc_assert (!loop->aux);
1443 loop->aux = loop_vinfo;
1444 return opt_loop_vec_info::success (loop_vinfo);
1445 }
1446
1447
1448
1449 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1450 statements update the vectorization factor. */
1451
1452 static void
1453 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1454 {
1455 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1456 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1457 int nbbs = loop->num_nodes;
1458 poly_uint64 vectorization_factor;
1459 int i;
1460
1461 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1462
1463 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1464 gcc_assert (known_ne (vectorization_factor, 0U));
1465
1466 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1467 vectorization factor of the loop is the unrolling factor required by
1468 the SLP instances. If that unrolling factor is 1, we say, that we
1469 perform pure SLP on loop - cross iteration parallelism is not
1470 exploited. */
1471 bool only_slp_in_loop = true;
1472 for (i = 0; i < nbbs; i++)
1473 {
1474 basic_block bb = bbs[i];
1475 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1476 gsi_next (&si))
1477 {
1478 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (si.phi ());
1479 if (!stmt_info)
1480 continue;
1481 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1482 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1483 && !PURE_SLP_STMT (stmt_info))
1484 /* STMT needs both SLP and loop-based vectorization. */
1485 only_slp_in_loop = false;
1486 }
1487 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1488 gsi_next (&si))
1489 {
1490 if (is_gimple_debug (gsi_stmt (si)))
1491 continue;
1492 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1493 stmt_info = vect_stmt_to_vectorize (stmt_info);
1494 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1495 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1496 && !PURE_SLP_STMT (stmt_info))
1497 /* STMT needs both SLP and loop-based vectorization. */
1498 only_slp_in_loop = false;
1499 }
1500 }
1501
1502 if (only_slp_in_loop)
1503 {
1504 if (dump_enabled_p ())
1505 dump_printf_loc (MSG_NOTE, vect_location,
1506 "Loop contains only SLP stmts\n");
1507 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1508 }
1509 else
1510 {
1511 if (dump_enabled_p ())
1512 dump_printf_loc (MSG_NOTE, vect_location,
1513 "Loop contains SLP and non-SLP stmts\n");
1514 /* Both the vectorization factor and unroll factor have the form
1515 GET_MODE_SIZE (loop_vinfo->vector_mode) * X for some rational X,
1516 so they must have a common multiple. */
1517 vectorization_factor
1518 = force_common_multiple (vectorization_factor,
1519 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1520 }
1521
1522 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1523 if (dump_enabled_p ())
1524 {
1525 dump_printf_loc (MSG_NOTE, vect_location,
1526 "Updating vectorization factor to ");
1527 dump_dec (MSG_NOTE, vectorization_factor);
1528 dump_printf (MSG_NOTE, ".\n");
1529 }
1530 }
1531
1532 /* Return true if STMT_INFO describes a double reduction phi and if
1533 the other phi in the reduction is also relevant for vectorization.
1534 This rejects cases such as:
1535
1536 outer1:
1537 x_1 = PHI <x_3(outer2), ...>;
1538 ...
1539
1540 inner:
1541 x_2 = ...;
1542 ...
1543
1544 outer2:
1545 x_3 = PHI <x_2(inner)>;
1546
1547 if nothing in x_2 or elsewhere makes x_1 relevant. */
1548
1549 static bool
1550 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1551 {
1552 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1553 return false;
1554
1555 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1556 }
1557
1558 /* Function vect_analyze_loop_operations.
1559
1560 Scan the loop stmts and make sure they are all vectorizable. */
1561
1562 static opt_result
1563 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1564 {
1565 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1566 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1567 int nbbs = loop->num_nodes;
1568 int i;
1569 stmt_vec_info stmt_info;
1570 bool need_to_vectorize = false;
1571 bool ok;
1572
1573 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1574
1575 auto_vec<stmt_info_for_cost> cost_vec;
1576
1577 for (i = 0; i < nbbs; i++)
1578 {
1579 basic_block bb = bbs[i];
1580
1581 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1582 gsi_next (&si))
1583 {
1584 gphi *phi = si.phi ();
1585 ok = true;
1586
1587 stmt_info = loop_vinfo->lookup_stmt (phi);
1588 if (dump_enabled_p ())
1589 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: %G", phi);
1590 if (virtual_operand_p (gimple_phi_result (phi)))
1591 continue;
1592
1593 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1594 (i.e., a phi in the tail of the outer-loop). */
1595 if (! is_loop_header_bb_p (bb))
1596 {
1597 /* FORNOW: we currently don't support the case that these phis
1598 are not used in the outerloop (unless it is double reduction,
1599 i.e., this phi is vect_reduction_def), cause this case
1600 requires to actually do something here. */
1601 if (STMT_VINFO_LIVE_P (stmt_info)
1602 && !vect_active_double_reduction_p (stmt_info))
1603 return opt_result::failure_at (phi,
1604 "Unsupported loop-closed phi"
1605 " in outer-loop.\n");
1606
1607 /* If PHI is used in the outer loop, we check that its operand
1608 is defined in the inner loop. */
1609 if (STMT_VINFO_RELEVANT_P (stmt_info))
1610 {
1611 tree phi_op;
1612
1613 if (gimple_phi_num_args (phi) != 1)
1614 return opt_result::failure_at (phi, "unsupported phi");
1615
1616 phi_op = PHI_ARG_DEF (phi, 0);
1617 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1618 if (!op_def_info)
1619 return opt_result::failure_at (phi, "unsupported phi\n");
1620
1621 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1622 && (STMT_VINFO_RELEVANT (op_def_info)
1623 != vect_used_in_outer_by_reduction))
1624 return opt_result::failure_at (phi, "unsupported phi\n");
1625
1626 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def
1627 || (STMT_VINFO_DEF_TYPE (stmt_info)
1628 == vect_double_reduction_def))
1629 && !vectorizable_lc_phi (loop_vinfo,
1630 stmt_info, NULL, NULL))
1631 return opt_result::failure_at (phi, "unsupported phi\n");
1632 }
1633
1634 continue;
1635 }
1636
1637 gcc_assert (stmt_info);
1638
1639 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1640 || STMT_VINFO_LIVE_P (stmt_info))
1641 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1642 /* A scalar-dependence cycle that we don't support. */
1643 return opt_result::failure_at (phi,
1644 "not vectorized:"
1645 " scalar dependence cycle.\n");
1646
1647 if (STMT_VINFO_RELEVANT_P (stmt_info))
1648 {
1649 need_to_vectorize = true;
1650 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1651 && ! PURE_SLP_STMT (stmt_info))
1652 ok = vectorizable_induction (loop_vinfo,
1653 stmt_info, NULL, NULL,
1654 &cost_vec);
1655 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1656 || (STMT_VINFO_DEF_TYPE (stmt_info)
1657 == vect_double_reduction_def)
1658 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1659 && ! PURE_SLP_STMT (stmt_info))
1660 ok = vectorizable_reduction (loop_vinfo,
1661 stmt_info, NULL, NULL, &cost_vec);
1662 }
1663
1664 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1665 if (ok
1666 && STMT_VINFO_LIVE_P (stmt_info)
1667 && !PURE_SLP_STMT (stmt_info))
1668 ok = vectorizable_live_operation (loop_vinfo,
1669 stmt_info, NULL, NULL, NULL,
1670 -1, false, &cost_vec);
1671
1672 if (!ok)
1673 return opt_result::failure_at (phi,
1674 "not vectorized: relevant phi not "
1675 "supported: %G",
1676 static_cast <gimple *> (phi));
1677 }
1678
1679 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1680 gsi_next (&si))
1681 {
1682 gimple *stmt = gsi_stmt (si);
1683 if (!gimple_clobber_p (stmt)
1684 && !is_gimple_debug (stmt))
1685 {
1686 opt_result res
1687 = vect_analyze_stmt (loop_vinfo,
1688 loop_vinfo->lookup_stmt (stmt),
1689 &need_to_vectorize,
1690 NULL, NULL, &cost_vec);
1691 if (!res)
1692 return res;
1693 }
1694 }
1695 } /* bbs */
1696
1697 add_stmt_costs (loop_vinfo, loop_vinfo->target_cost_data, &cost_vec);
1698
1699 /* All operations in the loop are either irrelevant (deal with loop
1700 control, or dead), or only used outside the loop and can be moved
1701 out of the loop (e.g. invariants, inductions). The loop can be
1702 optimized away by scalar optimizations. We're better off not
1703 touching this loop. */
1704 if (!need_to_vectorize)
1705 {
1706 if (dump_enabled_p ())
1707 dump_printf_loc (MSG_NOTE, vect_location,
1708 "All the computation can be taken out of the loop.\n");
1709 return opt_result::failure_at
1710 (vect_location,
1711 "not vectorized: redundant loop. no profit to vectorize.\n");
1712 }
1713
1714 return opt_result::success ();
1715 }
1716
1717 /* Return true if we know that the iteration count is smaller than the
1718 vectorization factor. Return false if it isn't, or if we can't be sure
1719 either way. */
1720
1721 static bool
1722 vect_known_niters_smaller_than_vf (loop_vec_info loop_vinfo)
1723 {
1724 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1725
1726 HOST_WIDE_INT max_niter;
1727 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1728 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1729 else
1730 max_niter = max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1731
1732 if (max_niter != -1 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1733 return true;
1734
1735 return false;
1736 }
1737
1738 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1739 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1740 definitely no, or -1 if it's worth retrying. */
1741
1742 static int
1743 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1744 {
1745 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1746 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1747
1748 /* Only loops that can handle partially-populated vectors can have iteration
1749 counts less than the vectorization factor. */
1750 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
1751 {
1752 if (vect_known_niters_smaller_than_vf (loop_vinfo))
1753 {
1754 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1756 "not vectorized: iteration count smaller than "
1757 "vectorization factor.\n");
1758 return 0;
1759 }
1760 }
1761
1762 int min_profitable_iters, min_profitable_estimate;
1763 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1764 &min_profitable_estimate);
1765
1766 if (min_profitable_iters < 0)
1767 {
1768 if (dump_enabled_p ())
1769 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1770 "not vectorized: vectorization not profitable.\n");
1771 if (dump_enabled_p ())
1772 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1773 "not vectorized: vector version will never be "
1774 "profitable.\n");
1775 return -1;
1776 }
1777
1778 int min_scalar_loop_bound = (param_min_vect_loop_bound
1779 * assumed_vf);
1780
1781 /* Use the cost model only if it is more conservative than user specified
1782 threshold. */
1783 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1784 min_profitable_iters);
1785
1786 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1787
1788 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1789 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1790 {
1791 if (dump_enabled_p ())
1792 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1793 "not vectorized: vectorization not profitable.\n");
1794 if (dump_enabled_p ())
1795 dump_printf_loc (MSG_NOTE, vect_location,
1796 "not vectorized: iteration count smaller than user "
1797 "specified loop bound parameter or minimum profitable "
1798 "iterations (whichever is more conservative).\n");
1799 return 0;
1800 }
1801
1802 /* The static profitablity threshold min_profitable_estimate includes
1803 the cost of having to check at runtime whether the scalar loop
1804 should be used instead. If it turns out that we don't need or want
1805 such a check, the threshold we should use for the static estimate
1806 is simply the point at which the vector loop becomes more profitable
1807 than the scalar loop. */
1808 if (min_profitable_estimate > min_profitable_iters
1809 && !LOOP_REQUIRES_VERSIONING (loop_vinfo)
1810 && !LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
1811 && !LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1812 && !vect_apply_runtime_profitability_check_p (loop_vinfo))
1813 {
1814 if (dump_enabled_p ())
1815 dump_printf_loc (MSG_NOTE, vect_location, "no need for a runtime"
1816 " choice between the scalar and vector loops\n");
1817 min_profitable_estimate = min_profitable_iters;
1818 }
1819
1820 HOST_WIDE_INT estimated_niter;
1821
1822 /* If we are vectorizing an epilogue then we know the maximum number of
1823 scalar iterations it will cover is at least one lower than the
1824 vectorization factor of the main loop. */
1825 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
1826 estimated_niter
1827 = vect_vf_for_cost (LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo)) - 1;
1828 else
1829 {
1830 estimated_niter = estimated_stmt_executions_int (loop);
1831 if (estimated_niter == -1)
1832 estimated_niter = likely_max_stmt_executions_int (loop);
1833 }
1834 if (estimated_niter != -1
1835 && ((unsigned HOST_WIDE_INT) estimated_niter
1836 < MAX (th, (unsigned) min_profitable_estimate)))
1837 {
1838 if (dump_enabled_p ())
1839 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1840 "not vectorized: estimated iteration count too "
1841 "small.\n");
1842 if (dump_enabled_p ())
1843 dump_printf_loc (MSG_NOTE, vect_location,
1844 "not vectorized: estimated iteration count smaller "
1845 "than specified loop bound parameter or minimum "
1846 "profitable iterations (whichever is more "
1847 "conservative).\n");
1848 return -1;
1849 }
1850
1851 return 1;
1852 }
1853
1854 static opt_result
1855 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1856 vec<data_reference_p> *datarefs,
1857 unsigned int *n_stmts)
1858 {
1859 *n_stmts = 0;
1860 for (unsigned i = 0; i < loop->num_nodes; i++)
1861 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1862 !gsi_end_p (gsi); gsi_next (&gsi))
1863 {
1864 gimple *stmt = gsi_stmt (gsi);
1865 if (is_gimple_debug (stmt))
1866 continue;
1867 ++(*n_stmts);
1868 opt_result res = vect_find_stmt_data_reference (loop, stmt, datarefs,
1869 NULL, 0);
1870 if (!res)
1871 {
1872 if (is_gimple_call (stmt) && loop->safelen)
1873 {
1874 tree fndecl = gimple_call_fndecl (stmt), op;
1875 if (fndecl != NULL_TREE)
1876 {
1877 cgraph_node *node = cgraph_node::get (fndecl);
1878 if (node != NULL && node->simd_clones != NULL)
1879 {
1880 unsigned int j, n = gimple_call_num_args (stmt);
1881 for (j = 0; j < n; j++)
1882 {
1883 op = gimple_call_arg (stmt, j);
1884 if (DECL_P (op)
1885 || (REFERENCE_CLASS_P (op)
1886 && get_base_address (op)))
1887 break;
1888 }
1889 op = gimple_call_lhs (stmt);
1890 /* Ignore #pragma omp declare simd functions
1891 if they don't have data references in the
1892 call stmt itself. */
1893 if (j == n
1894 && !(op
1895 && (DECL_P (op)
1896 || (REFERENCE_CLASS_P (op)
1897 && get_base_address (op)))))
1898 continue;
1899 }
1900 }
1901 }
1902 return res;
1903 }
1904 /* If dependence analysis will give up due to the limit on the
1905 number of datarefs stop here and fail fatally. */
1906 if (datarefs->length ()
1907 > (unsigned)param_loop_max_datarefs_for_datadeps)
1908 return opt_result::failure_at (stmt, "exceeded param "
1909 "loop-max-datarefs-for-datadeps\n");
1910 }
1911 return opt_result::success ();
1912 }
1913
1914 /* Look for SLP-only access groups and turn each individual access into its own
1915 group. */
1916 static void
1917 vect_dissolve_slp_only_groups (loop_vec_info loop_vinfo)
1918 {
1919 unsigned int i;
1920 struct data_reference *dr;
1921
1922 DUMP_VECT_SCOPE ("vect_dissolve_slp_only_groups");
1923
1924 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1925 FOR_EACH_VEC_ELT (datarefs, i, dr)
1926 {
1927 gcc_assert (DR_REF (dr));
1928 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (DR_STMT (dr));
1929
1930 /* Check if the load is a part of an interleaving chain. */
1931 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1932 {
1933 stmt_vec_info first_element = DR_GROUP_FIRST_ELEMENT (stmt_info);
1934 unsigned int group_size = DR_GROUP_SIZE (first_element);
1935
1936 /* Check if SLP-only groups. */
1937 if (!STMT_SLP_TYPE (stmt_info)
1938 && STMT_VINFO_SLP_VECT_ONLY (first_element))
1939 {
1940 /* Dissolve the group. */
1941 STMT_VINFO_SLP_VECT_ONLY (first_element) = false;
1942
1943 stmt_vec_info vinfo = first_element;
1944 while (vinfo)
1945 {
1946 stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (vinfo);
1947 DR_GROUP_FIRST_ELEMENT (vinfo) = vinfo;
1948 DR_GROUP_NEXT_ELEMENT (vinfo) = NULL;
1949 DR_GROUP_SIZE (vinfo) = 1;
1950 if (STMT_VINFO_STRIDED_P (first_element))
1951 DR_GROUP_GAP (vinfo) = 0;
1952 else
1953 DR_GROUP_GAP (vinfo) = group_size - 1;
1954 vinfo = next;
1955 }
1956 }
1957 }
1958 }
1959 }
1960
1961
1962 /* Decides whether we need to create an epilogue loop to handle
1963 remaining scalar iterations and sets PEELING_FOR_NITERS accordingly. */
1964
1965 void
1966 determine_peel_for_niter (loop_vec_info loop_vinfo)
1967 {
1968 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
1969
1970 unsigned HOST_WIDE_INT const_vf;
1971 HOST_WIDE_INT max_niter
1972 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1973
1974 unsigned th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
1975 if (!th && LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo))
1976 th = LOOP_VINFO_COST_MODEL_THRESHOLD (LOOP_VINFO_ORIG_LOOP_INFO
1977 (loop_vinfo));
1978
1979 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
1980 /* The main loop handles all iterations. */
1981 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
1982 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1983 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0)
1984 {
1985 /* Work out the (constant) number of iterations that need to be
1986 peeled for reasons other than niters. */
1987 unsigned int peel_niter = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
1988 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
1989 peel_niter += 1;
1990 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_niter,
1991 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1992 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
1993 }
1994 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1995 /* ??? When peeling for gaps but not alignment, we could
1996 try to check whether the (variable) niters is known to be
1997 VF * N + 1. That's something of a niche case though. */
1998 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1999 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2000 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2001 < (unsigned) exact_log2 (const_vf))
2002 /* In case of versioning, check if the maximum number of
2003 iterations is greater than th. If they are identical,
2004 the epilogue is unnecessary. */
2005 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2006 || ((unsigned HOST_WIDE_INT) max_niter
2007 > (th / const_vf) * const_vf))))
2008 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2009 }
2010
2011
2012 /* Function vect_analyze_loop_2.
2013
2014 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2015 for it. The different analyses will record information in the
2016 loop_vec_info struct. */
2017 static opt_result
2018 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
2019 {
2020 opt_result ok = opt_result::success ();
2021 int res;
2022 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
2023 poly_uint64 min_vf = 2;
2024 loop_vec_info orig_loop_vinfo = NULL;
2025
2026 /* If we are dealing with an epilogue then orig_loop_vinfo points to the
2027 loop_vec_info of the first vectorized loop. */
2028 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2029 orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
2030 else
2031 orig_loop_vinfo = loop_vinfo;
2032 gcc_assert (orig_loop_vinfo);
2033
2034 /* The first group of checks is independent of the vector size. */
2035 fatal = true;
2036
2037 if (LOOP_VINFO_SIMD_IF_COND (loop_vinfo)
2038 && integer_zerop (LOOP_VINFO_SIMD_IF_COND (loop_vinfo)))
2039 return opt_result::failure_at (vect_location,
2040 "not vectorized: simd if(0)\n");
2041
2042 /* Find all data references in the loop (which correspond to vdefs/vuses)
2043 and analyze their evolution in the loop. */
2044
2045 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
2046
2047 /* Gather the data references and count stmts in the loop. */
2048 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
2049 {
2050 opt_result res
2051 = vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
2052 &LOOP_VINFO_DATAREFS (loop_vinfo),
2053 n_stmts);
2054 if (!res)
2055 {
2056 if (dump_enabled_p ())
2057 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2058 "not vectorized: loop contains function "
2059 "calls or data references that cannot "
2060 "be analyzed\n");
2061 return res;
2062 }
2063 loop_vinfo->shared->save_datarefs ();
2064 }
2065 else
2066 loop_vinfo->shared->check_datarefs ();
2067
2068 /* Analyze the data references and also adjust the minimal
2069 vectorization factor according to the loads and stores. */
2070
2071 ok = vect_analyze_data_refs (loop_vinfo, &min_vf, &fatal);
2072 if (!ok)
2073 {
2074 if (dump_enabled_p ())
2075 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2076 "bad data references.\n");
2077 return ok;
2078 }
2079
2080 /* Classify all cross-iteration scalar data-flow cycles.
2081 Cross-iteration cycles caused by virtual phis are analyzed separately. */
2082 vect_analyze_scalar_cycles (loop_vinfo);
2083
2084 vect_pattern_recog (loop_vinfo);
2085
2086 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
2087
2088 /* Analyze the access patterns of the data-refs in the loop (consecutive,
2089 complex, etc.). FORNOW: Only handle consecutive access pattern. */
2090
2091 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
2092 if (!ok)
2093 {
2094 if (dump_enabled_p ())
2095 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2096 "bad data access.\n");
2097 return ok;
2098 }
2099
2100 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
2101
2102 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo, &fatal);
2103 if (!ok)
2104 {
2105 if (dump_enabled_p ())
2106 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2107 "unexpected pattern.\n");
2108 return ok;
2109 }
2110
2111 /* While the rest of the analysis below depends on it in some way. */
2112 fatal = false;
2113
2114 /* Analyze data dependences between the data-refs in the loop
2115 and adjust the maximum vectorization factor according to
2116 the dependences.
2117 FORNOW: fail at the first data dependence that we encounter. */
2118
2119 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
2120 if (!ok)
2121 {
2122 if (dump_enabled_p ())
2123 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2124 "bad data dependence.\n");
2125 return ok;
2126 }
2127 if (max_vf != MAX_VECTORIZATION_FACTOR
2128 && maybe_lt (max_vf, min_vf))
2129 return opt_result::failure_at (vect_location, "bad data dependence.\n");
2130 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
2131
2132 ok = vect_determine_vectorization_factor (loop_vinfo);
2133 if (!ok)
2134 {
2135 if (dump_enabled_p ())
2136 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2137 "can't determine vectorization factor.\n");
2138 return ok;
2139 }
2140 if (max_vf != MAX_VECTORIZATION_FACTOR
2141 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2142 return opt_result::failure_at (vect_location, "bad data dependence.\n");
2143
2144 /* Compute the scalar iteration cost. */
2145 vect_compute_single_scalar_iteration_cost (loop_vinfo);
2146
2147 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2148
2149 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
2150 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
2151 if (!ok)
2152 return ok;
2153
2154 /* If there are any SLP instances mark them as pure_slp. */
2155 bool slp = vect_make_slp_decision (loop_vinfo);
2156 if (slp)
2157 {
2158 /* Find stmts that need to be both vectorized and SLPed. */
2159 vect_detect_hybrid_slp (loop_vinfo);
2160
2161 /* Update the vectorization factor based on the SLP decision. */
2162 vect_update_vf_for_slp (loop_vinfo);
2163
2164 /* Optimize the SLP graph with the vectorization factor fixed. */
2165 vect_optimize_slp (loop_vinfo);
2166 }
2167
2168 bool saved_can_use_partial_vectors_p
2169 = LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo);
2170
2171 /* We don't expect to have to roll back to anything other than an empty
2172 set of rgroups. */
2173 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
2174
2175 /* This is the point where we can re-start analysis with SLP forced off. */
2176 start_over:
2177
2178 /* Now the vectorization factor is final. */
2179 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2180 gcc_assert (known_ne (vectorization_factor, 0U));
2181
2182 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
2183 {
2184 dump_printf_loc (MSG_NOTE, vect_location,
2185 "vectorization_factor = ");
2186 dump_dec (MSG_NOTE, vectorization_factor);
2187 dump_printf (MSG_NOTE, ", niters = %wd\n",
2188 LOOP_VINFO_INT_NITERS (loop_vinfo));
2189 }
2190
2191 /* Analyze the alignment of the data-refs in the loop.
2192 Fail if a data reference is found that cannot be vectorized. */
2193
2194 ok = vect_analyze_data_refs_alignment (loop_vinfo);
2195 if (!ok)
2196 {
2197 if (dump_enabled_p ())
2198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2199 "bad data alignment.\n");
2200 return ok;
2201 }
2202
2203 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
2204 It is important to call pruning after vect_analyze_data_ref_accesses,
2205 since we use grouping information gathered by interleaving analysis. */
2206 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
2207 if (!ok)
2208 return ok;
2209
2210 /* Do not invoke vect_enhance_data_refs_alignment for epilogue
2211 vectorization, since we do not want to add extra peeling or
2212 add versioning for alignment. */
2213 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2214 /* This pass will decide on using loop versioning and/or loop peeling in
2215 order to enhance the alignment of data references in the loop. */
2216 ok = vect_enhance_data_refs_alignment (loop_vinfo);
2217 if (!ok)
2218 return ok;
2219
2220 if (slp)
2221 {
2222 /* Analyze operations in the SLP instances. Note this may
2223 remove unsupported SLP instances which makes the above
2224 SLP kind detection invalid. */
2225 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2226 vect_slp_analyze_operations (loop_vinfo);
2227 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2228 {
2229 ok = opt_result::failure_at (vect_location,
2230 "unsupported SLP instances\n");
2231 goto again;
2232 }
2233 }
2234
2235 /* Dissolve SLP-only groups. */
2236 vect_dissolve_slp_only_groups (loop_vinfo);
2237
2238 /* Scan all the remaining operations in the loop that are not subject
2239 to SLP and make sure they are vectorizable. */
2240 ok = vect_analyze_loop_operations (loop_vinfo);
2241 if (!ok)
2242 {
2243 if (dump_enabled_p ())
2244 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2245 "bad operation or unsupported loop bound.\n");
2246 return ok;
2247 }
2248
2249 /* For now, we don't expect to mix both masking and length approaches for one
2250 loop, disable it if both are recorded. */
2251 if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
2252 && !LOOP_VINFO_MASKS (loop_vinfo).is_empty ()
2253 && !LOOP_VINFO_LENS (loop_vinfo).is_empty ())
2254 {
2255 if (dump_enabled_p ())
2256 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2257 "can't vectorize a loop with partial vectors"
2258 " because we don't expect to mix different"
2259 " approaches with partial vectors for the"
2260 " same loop.\n");
2261 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
2262 }
2263
2264 /* Decide whether to vectorize a loop with partial vectors for
2265 this vectorization factor. */
2266 if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
2267 {
2268 if (param_vect_partial_vector_usage == 0)
2269 LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = false;
2270 else if (vect_verify_full_masking (loop_vinfo)
2271 || vect_verify_loop_lens (loop_vinfo))
2272 {
2273 /* The epilogue and other known niters less than VF
2274 cases can still use vector access with length fully. */
2275 if (param_vect_partial_vector_usage == 1
2276 && !LOOP_VINFO_EPILOGUE_P (loop_vinfo)
2277 && !vect_known_niters_smaller_than_vf (loop_vinfo))
2278 {
2279 LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = false;
2280 LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P (loop_vinfo) = true;
2281 }
2282 else
2283 LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = true;
2284 }
2285 else
2286 LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = false;
2287 }
2288 else
2289 LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = false;
2290
2291 if (dump_enabled_p ())
2292 {
2293 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
2294 dump_printf_loc (MSG_NOTE, vect_location,
2295 "operating on partial vectors.\n");
2296 else
2297 dump_printf_loc (MSG_NOTE, vect_location,
2298 "operating only on full vectors.\n");
2299 }
2300
2301 /* If epilog loop is required because of data accesses with gaps,
2302 one additional iteration needs to be peeled. Check if there is
2303 enough iterations for vectorization. */
2304 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2305 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2306 && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
2307 {
2308 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2309 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2310
2311 if (known_lt (wi::to_widest (scalar_niters), vf))
2312 return opt_result::failure_at (vect_location,
2313 "loop has no enough iterations to"
2314 " support peeling for gaps.\n");
2315 }
2316
2317 /* If we're vectorizing an epilogue loop, the vectorized loop either needs
2318 to be able to handle fewer than VF scalars, or needs to have a lower VF
2319 than the main loop. */
2320 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)
2321 && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)
2322 && maybe_ge (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
2323 LOOP_VINFO_VECT_FACTOR (orig_loop_vinfo)))
2324 return opt_result::failure_at (vect_location,
2325 "Vectorization factor too high for"
2326 " epilogue loop.\n");
2327
2328 /* Check the costings of the loop make vectorizing worthwhile. */
2329 res = vect_analyze_loop_costing (loop_vinfo);
2330 if (res < 0)
2331 {
2332 ok = opt_result::failure_at (vect_location,
2333 "Loop costings may not be worthwhile.\n");
2334 goto again;
2335 }
2336 if (!res)
2337 return opt_result::failure_at (vect_location,
2338 "Loop costings not worthwhile.\n");
2339
2340 determine_peel_for_niter (loop_vinfo);
2341 /* If an epilogue loop is required make sure we can create one. */
2342 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2343 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2344 {
2345 if (dump_enabled_p ())
2346 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2347 if (!vect_can_advance_ivs_p (loop_vinfo)
2348 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2349 single_exit (LOOP_VINFO_LOOP
2350 (loop_vinfo))))
2351 {
2352 ok = opt_result::failure_at (vect_location,
2353 "not vectorized: can't create required "
2354 "epilog loop\n");
2355 goto again;
2356 }
2357 }
2358
2359 /* During peeling, we need to check if number of loop iterations is
2360 enough for both peeled prolog loop and vector loop. This check
2361 can be merged along with threshold check of loop versioning, so
2362 increase threshold for this case if necessary.
2363
2364 If we are analyzing an epilogue we still want to check what its
2365 versioning threshold would be. If we decide to vectorize the epilogues we
2366 will want to use the lowest versioning threshold of all epilogues and main
2367 loop. This will enable us to enter a vectorized epilogue even when
2368 versioning the loop. We can't simply check whether the epilogue requires
2369 versioning though since we may have skipped some versioning checks when
2370 analyzing the epilogue. For instance, checks for alias versioning will be
2371 skipped when dealing with epilogues as we assume we already checked them
2372 for the main loop. So instead we always check the 'orig_loop_vinfo'. */
2373 if (LOOP_REQUIRES_VERSIONING (orig_loop_vinfo))
2374 {
2375 poly_uint64 niters_th = 0;
2376 unsigned int th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2377
2378 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2379 {
2380 /* Niters for peeled prolog loop. */
2381 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2382 {
2383 dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2384 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2385 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2386 }
2387 else
2388 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2389 }
2390
2391 /* Niters for at least one iteration of vectorized loop. */
2392 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
2393 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2394 /* One additional iteration because of peeling for gap. */
2395 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2396 niters_th += 1;
2397
2398 /* Use the same condition as vect_transform_loop to decide when to use
2399 the cost to determine a versioning threshold. */
2400 if (vect_apply_runtime_profitability_check_p (loop_vinfo)
2401 && ordered_p (th, niters_th))
2402 niters_th = ordered_max (poly_uint64 (th), niters_th);
2403
2404 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2405 }
2406
2407 gcc_assert (known_eq (vectorization_factor,
2408 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2409
2410 /* Ok to vectorize! */
2411 return opt_result::success ();
2412
2413 again:
2414 /* Ensure that "ok" is false (with an opt_problem if dumping is enabled). */
2415 gcc_assert (!ok);
2416
2417 /* Try again with SLP forced off but if we didn't do any SLP there is
2418 no point in re-trying. */
2419 if (!slp)
2420 return ok;
2421
2422 /* If there are reduction chains re-trying will fail anyway. */
2423 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2424 return ok;
2425
2426 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2427 via interleaving or lane instructions. */
2428 slp_instance instance;
2429 slp_tree node;
2430 unsigned i, j;
2431 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2432 {
2433 stmt_vec_info vinfo;
2434 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2435 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2436 continue;
2437 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2438 unsigned int size = DR_GROUP_SIZE (vinfo);
2439 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2440 if (! vect_store_lanes_supported (vectype, size, false)
2441 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2442 && ! vect_grouped_store_supported (vectype, size))
2443 return opt_result::failure_at (vinfo->stmt,
2444 "unsupported grouped store\n");
2445 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2446 {
2447 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2448 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2449 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2450 size = DR_GROUP_SIZE (vinfo);
2451 vectype = STMT_VINFO_VECTYPE (vinfo);
2452 if (! vect_load_lanes_supported (vectype, size, false)
2453 && ! vect_grouped_load_supported (vectype, single_element_p,
2454 size))
2455 return opt_result::failure_at (vinfo->stmt,
2456 "unsupported grouped load\n");
2457 }
2458 }
2459
2460 if (dump_enabled_p ())
2461 dump_printf_loc (MSG_NOTE, vect_location,
2462 "re-trying with SLP disabled\n");
2463
2464 /* Roll back state appropriately. No SLP this time. */
2465 slp = false;
2466 /* Restore vectorization factor as it were without SLP. */
2467 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2468 /* Free the SLP instances. */
2469 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2470 vect_free_slp_instance (instance, false);
2471 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2472 /* Reset SLP type to loop_vect on all stmts. */
2473 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2474 {
2475 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2476 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2477 !gsi_end_p (si); gsi_next (&si))
2478 {
2479 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2480 STMT_SLP_TYPE (stmt_info) = loop_vect;
2481 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
2482 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
2483 {
2484 /* vectorizable_reduction adjusts reduction stmt def-types,
2485 restore them to that of the PHI. */
2486 STMT_VINFO_DEF_TYPE (STMT_VINFO_REDUC_DEF (stmt_info))
2487 = STMT_VINFO_DEF_TYPE (stmt_info);
2488 STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize
2489 (STMT_VINFO_REDUC_DEF (stmt_info)))
2490 = STMT_VINFO_DEF_TYPE (stmt_info);
2491 }
2492 }
2493 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2494 !gsi_end_p (si); gsi_next (&si))
2495 {
2496 if (is_gimple_debug (gsi_stmt (si)))
2497 continue;
2498 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2499 STMT_SLP_TYPE (stmt_info) = loop_vect;
2500 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2501 {
2502 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2503 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2504 STMT_SLP_TYPE (stmt_info) = loop_vect;
2505 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2506 !gsi_end_p (pi); gsi_next (&pi))
2507 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2508 = loop_vect;
2509 }
2510 }
2511 }
2512 /* Free optimized alias test DDRS. */
2513 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2514 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2515 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2516 /* Reset target cost data. */
2517 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2518 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2519 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2520 /* Reset accumulated rgroup information. */
2521 release_vec_loop_controls (&LOOP_VINFO_MASKS (loop_vinfo));
2522 release_vec_loop_controls (&LOOP_VINFO_LENS (loop_vinfo));
2523 /* Reset assorted flags. */
2524 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2525 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2526 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2527 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2528 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
2529 = saved_can_use_partial_vectors_p;
2530
2531 goto start_over;
2532 }
2533
2534 /* Return true if vectorizing a loop using NEW_LOOP_VINFO appears
2535 to be better than vectorizing it using OLD_LOOP_VINFO. Assume that
2536 OLD_LOOP_VINFO is better unless something specifically indicates
2537 otherwise.
2538
2539 Note that this deliberately isn't a partial order. */
2540
2541 static bool
2542 vect_better_loop_vinfo_p (loop_vec_info new_loop_vinfo,
2543 loop_vec_info old_loop_vinfo)
2544 {
2545 struct loop *loop = LOOP_VINFO_LOOP (new_loop_vinfo);
2546 gcc_assert (LOOP_VINFO_LOOP (old_loop_vinfo) == loop);
2547
2548 poly_int64 new_vf = LOOP_VINFO_VECT_FACTOR (new_loop_vinfo);
2549 poly_int64 old_vf = LOOP_VINFO_VECT_FACTOR (old_loop_vinfo);
2550
2551 /* Always prefer a VF of loop->simdlen over any other VF. */
2552 if (loop->simdlen)
2553 {
2554 bool new_simdlen_p = known_eq (new_vf, loop->simdlen);
2555 bool old_simdlen_p = known_eq (old_vf, loop->simdlen);
2556 if (new_simdlen_p != old_simdlen_p)
2557 return new_simdlen_p;
2558 }
2559
2560 /* Limit the VFs to what is likely to be the maximum number of iterations,
2561 to handle cases in which at least one loop_vinfo is fully-masked. */
2562 HOST_WIDE_INT estimated_max_niter = likely_max_stmt_executions_int (loop);
2563 if (estimated_max_niter != -1)
2564 {
2565 if (known_le (estimated_max_niter, new_vf))
2566 new_vf = estimated_max_niter;
2567 if (known_le (estimated_max_niter, old_vf))
2568 old_vf = estimated_max_niter;
2569 }
2570
2571 /* Check whether the (fractional) cost per scalar iteration is lower
2572 or higher: new_inside_cost / new_vf vs. old_inside_cost / old_vf. */
2573 poly_widest_int rel_new = (new_loop_vinfo->vec_inside_cost
2574 * poly_widest_int (old_vf));
2575 poly_widest_int rel_old = (old_loop_vinfo->vec_inside_cost
2576 * poly_widest_int (new_vf));
2577 if (maybe_lt (rel_old, rel_new))
2578 {
2579 /* When old_loop_vinfo uses a variable vectorization factor,
2580 we know that it has a lower cost for at least one runtime VF.
2581 However, we don't know how likely that VF is.
2582
2583 One option would be to compare the costs for the estimated VFs.
2584 The problem is that that can put too much pressure on the cost
2585 model. E.g. if the estimated VF is also the lowest possible VF,
2586 and if old_loop_vinfo is 1 unit worse than new_loop_vinfo
2587 for the estimated VF, we'd then choose new_loop_vinfo even
2588 though (a) new_loop_vinfo might not actually be better than
2589 old_loop_vinfo for that VF and (b) it would be significantly
2590 worse at larger VFs.
2591
2592 Here we go for a hacky compromise: pick new_loop_vinfo if it is
2593 no more expensive than old_loop_vinfo even after doubling the
2594 estimated old_loop_vinfo VF. For all but trivial loops, this
2595 ensures that we only pick new_loop_vinfo if it is significantly
2596 better than old_loop_vinfo at the estimated VF. */
2597 if (rel_new.is_constant ())
2598 return false;
2599
2600 HOST_WIDE_INT new_estimated_vf = estimated_poly_value (new_vf);
2601 HOST_WIDE_INT old_estimated_vf = estimated_poly_value (old_vf);
2602 widest_int estimated_rel_new = (new_loop_vinfo->vec_inside_cost
2603 * widest_int (old_estimated_vf));
2604 widest_int estimated_rel_old = (old_loop_vinfo->vec_inside_cost
2605 * widest_int (new_estimated_vf));
2606 return estimated_rel_new * 2 <= estimated_rel_old;
2607 }
2608 if (known_lt (rel_new, rel_old))
2609 return true;
2610
2611 /* If there's nothing to choose between the loop bodies, see whether
2612 there's a difference in the prologue and epilogue costs. */
2613 if (new_loop_vinfo->vec_outside_cost != old_loop_vinfo->vec_outside_cost)
2614 return new_loop_vinfo->vec_outside_cost < old_loop_vinfo->vec_outside_cost;
2615
2616 return false;
2617 }
2618
2619 /* Decide whether to replace OLD_LOOP_VINFO with NEW_LOOP_VINFO. Return
2620 true if we should. */
2621
2622 static bool
2623 vect_joust_loop_vinfos (loop_vec_info new_loop_vinfo,
2624 loop_vec_info old_loop_vinfo)
2625 {
2626 if (!vect_better_loop_vinfo_p (new_loop_vinfo, old_loop_vinfo))
2627 return false;
2628
2629 if (dump_enabled_p ())
2630 dump_printf_loc (MSG_NOTE, vect_location,
2631 "***** Preferring vector mode %s to vector mode %s\n",
2632 GET_MODE_NAME (new_loop_vinfo->vector_mode),
2633 GET_MODE_NAME (old_loop_vinfo->vector_mode));
2634 return true;
2635 }
2636
2637 /* Function vect_analyze_loop.
2638
2639 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2640 for it. The different analyses will record information in the
2641 loop_vec_info struct. */
2642 opt_loop_vec_info
2643 vect_analyze_loop (class loop *loop, vec_info_shared *shared)
2644 {
2645 auto_vector_modes vector_modes;
2646
2647 /* Autodetect first vector size we try. */
2648 unsigned int autovec_flags
2649 = targetm.vectorize.autovectorize_vector_modes (&vector_modes,
2650 loop->simdlen != 0);
2651 unsigned int mode_i = 0;
2652
2653 DUMP_VECT_SCOPE ("analyze_loop_nest");
2654
2655 if (loop_outer (loop)
2656 && loop_vec_info_for_loop (loop_outer (loop))
2657 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2658 return opt_loop_vec_info::failure_at (vect_location,
2659 "outer-loop already vectorized.\n");
2660
2661 if (!find_loop_nest (loop, &shared->loop_nest))
2662 return opt_loop_vec_info::failure_at
2663 (vect_location,
2664 "not vectorized: loop nest containing two or more consecutive inner"
2665 " loops cannot be vectorized\n");
2666
2667 unsigned n_stmts = 0;
2668 machine_mode autodetected_vector_mode = VOIDmode;
2669 opt_loop_vec_info first_loop_vinfo = opt_loop_vec_info::success (NULL);
2670 machine_mode next_vector_mode = VOIDmode;
2671 poly_uint64 lowest_th = 0;
2672 unsigned vectorized_loops = 0;
2673 bool pick_lowest_cost_p = ((autovec_flags & VECT_COMPARE_COSTS)
2674 && !unlimited_cost_model (loop));
2675
2676 bool vect_epilogues = false;
2677 opt_result res = opt_result::success ();
2678 unsigned HOST_WIDE_INT simdlen = loop->simdlen;
2679 while (1)
2680 {
2681 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2682 opt_loop_vec_info loop_vinfo = vect_analyze_loop_form (loop, shared);
2683 if (!loop_vinfo)
2684 {
2685 if (dump_enabled_p ())
2686 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2687 "bad loop form.\n");
2688 gcc_checking_assert (first_loop_vinfo == NULL);
2689 return loop_vinfo;
2690 }
2691 loop_vinfo->vector_mode = next_vector_mode;
2692
2693 bool fatal = false;
2694
2695 /* When pick_lowest_cost_p is true, we should in principle iterate
2696 over all the loop_vec_infos that LOOP_VINFO could replace and
2697 try to vectorize LOOP_VINFO under the same conditions.
2698 E.g. when trying to replace an epilogue loop, we should vectorize
2699 LOOP_VINFO as an epilogue loop with the same VF limit. When trying
2700 to replace the main loop, we should vectorize LOOP_VINFO as a main
2701 loop too.
2702
2703 However, autovectorize_vector_modes is usually sorted as follows:
2704
2705 - Modes that naturally produce lower VFs usually follow modes that
2706 naturally produce higher VFs.
2707
2708 - When modes naturally produce the same VF, maskable modes
2709 usually follow unmaskable ones, so that the maskable mode
2710 can be used to vectorize the epilogue of the unmaskable mode.
2711
2712 This order is preferred because it leads to the maximum
2713 epilogue vectorization opportunities. Targets should only use
2714 a different order if they want to make wide modes available while
2715 disparaging them relative to earlier, smaller modes. The assumption
2716 in that case is that the wider modes are more expensive in some
2717 way that isn't reflected directly in the costs.
2718
2719 There should therefore be few interesting cases in which
2720 LOOP_VINFO fails when treated as an epilogue loop, succeeds when
2721 treated as a standalone loop, and ends up being genuinely cheaper
2722 than FIRST_LOOP_VINFO. */
2723 if (vect_epilogues)
2724 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = first_loop_vinfo;
2725
2726 res = vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts);
2727 if (mode_i == 0)
2728 autodetected_vector_mode = loop_vinfo->vector_mode;
2729 if (dump_enabled_p ())
2730 {
2731 if (res)
2732 dump_printf_loc (MSG_NOTE, vect_location,
2733 "***** Analysis succeeded with vector mode %s\n",
2734 GET_MODE_NAME (loop_vinfo->vector_mode));
2735 else
2736 dump_printf_loc (MSG_NOTE, vect_location,
2737 "***** Analysis failed with vector mode %s\n",
2738 GET_MODE_NAME (loop_vinfo->vector_mode));
2739 }
2740
2741 loop->aux = NULL;
2742
2743 if (!fatal)
2744 while (mode_i < vector_modes.length ()
2745 && vect_chooses_same_modes_p (loop_vinfo, vector_modes[mode_i]))
2746 {
2747 if (dump_enabled_p ())
2748 dump_printf_loc (MSG_NOTE, vect_location,
2749 "***** The result for vector mode %s would"
2750 " be the same\n",
2751 GET_MODE_NAME (vector_modes[mode_i]));
2752 mode_i += 1;
2753 }
2754
2755 if (res)
2756 {
2757 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2758 vectorized_loops++;
2759
2760 /* Once we hit the desired simdlen for the first time,
2761 discard any previous attempts. */
2762 if (simdlen
2763 && known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), simdlen))
2764 {
2765 delete first_loop_vinfo;
2766 first_loop_vinfo = opt_loop_vec_info::success (NULL);
2767 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = NULL;
2768 simdlen = 0;
2769 }
2770 else if (pick_lowest_cost_p && first_loop_vinfo)
2771 {
2772 /* Keep trying to roll back vectorization attempts while the
2773 loop_vec_infos they produced were worse than this one. */
2774 vec<loop_vec_info> &vinfos = first_loop_vinfo->epilogue_vinfos;
2775 while (!vinfos.is_empty ()
2776 && vect_joust_loop_vinfos (loop_vinfo, vinfos.last ()))
2777 {
2778 gcc_assert (vect_epilogues);
2779 delete vinfos.pop ();
2780 }
2781 if (vinfos.is_empty ()
2782 && vect_joust_loop_vinfos (loop_vinfo, first_loop_vinfo))
2783 {
2784 delete first_loop_vinfo;
2785 first_loop_vinfo = opt_loop_vec_info::success (NULL);
2786 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = NULL;
2787 }
2788 }
2789
2790 if (first_loop_vinfo == NULL)
2791 {
2792 first_loop_vinfo = loop_vinfo;
2793 lowest_th = LOOP_VINFO_VERSIONING_THRESHOLD (first_loop_vinfo);
2794 }
2795 else if (vect_epilogues
2796 /* For now only allow one epilogue loop. */
2797 && first_loop_vinfo->epilogue_vinfos.is_empty ())
2798 {
2799 first_loop_vinfo->epilogue_vinfos.safe_push (loop_vinfo);
2800 poly_uint64 th = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
2801 gcc_assert (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2802 || maybe_ne (lowest_th, 0U));
2803 /* Keep track of the known smallest versioning
2804 threshold. */
2805 if (ordered_p (lowest_th, th))
2806 lowest_th = ordered_min (lowest_th, th);
2807 }
2808 else
2809 {
2810 delete loop_vinfo;
2811 loop_vinfo = opt_loop_vec_info::success (NULL);
2812 }
2813
2814 /* Only vectorize epilogues if PARAM_VECT_EPILOGUES_NOMASK is
2815 enabled, SIMDUID is not set, it is the innermost loop and we have
2816 either already found the loop's SIMDLEN or there was no SIMDLEN to
2817 begin with.
2818 TODO: Enable epilogue vectorization for loops with SIMDUID set. */
2819 vect_epilogues = (!simdlen
2820 && loop->inner == NULL
2821 && param_vect_epilogues_nomask
2822 && LOOP_VINFO_PEELING_FOR_NITER (first_loop_vinfo)
2823 && !loop->simduid
2824 /* For now only allow one epilogue loop, but allow
2825 pick_lowest_cost_p to replace it. */
2826 && (first_loop_vinfo->epilogue_vinfos.is_empty ()
2827 || pick_lowest_cost_p));
2828
2829 /* Commit to first_loop_vinfo if we have no reason to try
2830 alternatives. */
2831 if (!simdlen && !vect_epilogues && !pick_lowest_cost_p)
2832 break;
2833 }
2834 else
2835 {
2836 delete loop_vinfo;
2837 loop_vinfo = opt_loop_vec_info::success (NULL);
2838 if (fatal)
2839 {
2840 gcc_checking_assert (first_loop_vinfo == NULL);
2841 break;
2842 }
2843 }
2844
2845 /* Handle the case that the original loop can use partial
2846 vectorization, but want to only adopt it for the epilogue.
2847 The retry should be in the same mode as original. */
2848 if (vect_epilogues
2849 && loop_vinfo
2850 && LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P (loop_vinfo))
2851 {
2852 gcc_assert (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
2853 && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo));
2854 if (dump_enabled_p ())
2855 dump_printf_loc (MSG_NOTE, vect_location,
2856 "***** Re-trying analysis with same vector mode"
2857 " %s for epilogue with partial vectors.\n",
2858 GET_MODE_NAME (loop_vinfo->vector_mode));
2859 continue;
2860 }
2861
2862 if (mode_i < vector_modes.length ()
2863 && VECTOR_MODE_P (autodetected_vector_mode)
2864 && (related_vector_mode (vector_modes[mode_i],
2865 GET_MODE_INNER (autodetected_vector_mode))
2866 == autodetected_vector_mode)
2867 && (related_vector_mode (autodetected_vector_mode,
2868 GET_MODE_INNER (vector_modes[mode_i]))
2869 == vector_modes[mode_i]))
2870 {
2871 if (dump_enabled_p ())
2872 dump_printf_loc (MSG_NOTE, vect_location,
2873 "***** Skipping vector mode %s, which would"
2874 " repeat the analysis for %s\n",
2875 GET_MODE_NAME (vector_modes[mode_i]),
2876 GET_MODE_NAME (autodetected_vector_mode));
2877 mode_i += 1;
2878 }
2879
2880 if (mode_i == vector_modes.length ()
2881 || autodetected_vector_mode == VOIDmode)
2882 break;
2883
2884 /* Try the next biggest vector size. */
2885 next_vector_mode = vector_modes[mode_i++];
2886 if (dump_enabled_p ())
2887 dump_printf_loc (MSG_NOTE, vect_location,
2888 "***** Re-trying analysis with vector mode %s\n",
2889 GET_MODE_NAME (next_vector_mode));
2890 }
2891
2892 if (first_loop_vinfo)
2893 {
2894 loop->aux = (loop_vec_info) first_loop_vinfo;
2895 if (dump_enabled_p ())
2896 dump_printf_loc (MSG_NOTE, vect_location,
2897 "***** Choosing vector mode %s\n",
2898 GET_MODE_NAME (first_loop_vinfo->vector_mode));
2899 LOOP_VINFO_VERSIONING_THRESHOLD (first_loop_vinfo) = lowest_th;
2900 return first_loop_vinfo;
2901 }
2902
2903 return opt_loop_vec_info::propagate_failure (res);
2904 }
2905
2906 /* Return true if there is an in-order reduction function for CODE, storing
2907 it in *REDUC_FN if so. */
2908
2909 static bool
2910 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2911 {
2912 switch (code)
2913 {
2914 case PLUS_EXPR:
2915 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2916 return true;
2917
2918 default:
2919 return false;
2920 }
2921 }
2922
2923 /* Function reduction_fn_for_scalar_code
2924
2925 Input:
2926 CODE - tree_code of a reduction operations.
2927
2928 Output:
2929 REDUC_FN - the corresponding internal function to be used to reduce the
2930 vector of partial results into a single scalar result, or IFN_LAST
2931 if the operation is a supported reduction operation, but does not have
2932 such an internal function.
2933
2934 Return FALSE if CODE currently cannot be vectorized as reduction. */
2935
2936 static bool
2937 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2938 {
2939 switch (code)
2940 {
2941 case MAX_EXPR:
2942 *reduc_fn = IFN_REDUC_MAX;
2943 return true;
2944
2945 case MIN_EXPR:
2946 *reduc_fn = IFN_REDUC_MIN;
2947 return true;
2948
2949 case PLUS_EXPR:
2950 *reduc_fn = IFN_REDUC_PLUS;
2951 return true;
2952
2953 case BIT_AND_EXPR:
2954 *reduc_fn = IFN_REDUC_AND;
2955 return true;
2956
2957 case BIT_IOR_EXPR:
2958 *reduc_fn = IFN_REDUC_IOR;
2959 return true;
2960
2961 case BIT_XOR_EXPR:
2962 *reduc_fn = IFN_REDUC_XOR;
2963 return true;
2964
2965 case MULT_EXPR:
2966 case MINUS_EXPR:
2967 *reduc_fn = IFN_LAST;
2968 return true;
2969
2970 default:
2971 return false;
2972 }
2973 }
2974
2975 /* If there is a neutral value X such that SLP reduction NODE would not
2976 be affected by the introduction of additional X elements, return that X,
2977 otherwise return null. CODE is the code of the reduction and VECTOR_TYPE
2978 is the vector type that would hold element X. REDUC_CHAIN is true if
2979 the SLP statements perform a single reduction, false if each statement
2980 performs an independent reduction. */
2981
2982 static tree
2983 neutral_op_for_slp_reduction (slp_tree slp_node, tree vector_type,
2984 tree_code code, bool reduc_chain)
2985 {
2986 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2987 stmt_vec_info stmt_vinfo = stmts[0];
2988 tree scalar_type = TREE_TYPE (vector_type);
2989 class loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2990 gcc_assert (loop);
2991
2992 switch (code)
2993 {
2994 case WIDEN_SUM_EXPR:
2995 case DOT_PROD_EXPR:
2996 case SAD_EXPR:
2997 case PLUS_EXPR:
2998 case MINUS_EXPR:
2999 case BIT_IOR_EXPR:
3000 case BIT_XOR_EXPR:
3001 return build_zero_cst (scalar_type);
3002
3003 case MULT_EXPR:
3004 return build_one_cst (scalar_type);
3005
3006 case BIT_AND_EXPR:
3007 return build_all_ones_cst (scalar_type);
3008
3009 case MAX_EXPR:
3010 case MIN_EXPR:
3011 /* For MIN/MAX the initial values are neutral. A reduction chain
3012 has only a single initial value, so that value is neutral for
3013 all statements. */
3014 if (reduc_chain)
3015 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
3016 loop_preheader_edge (loop));
3017 return NULL_TREE;
3018
3019 default:
3020 return NULL_TREE;
3021 }
3022 }
3023
3024 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
3025 STMT is printed with a message MSG. */
3026
3027 static void
3028 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
3029 {
3030 dump_printf_loc (msg_type, vect_location, "%s%G", msg, stmt);
3031 }
3032
3033 /* Return true if we need an in-order reduction for operation CODE
3034 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
3035 overflow must wrap. */
3036
3037 bool
3038 needs_fold_left_reduction_p (tree type, tree_code code)
3039 {
3040 /* CHECKME: check for !flag_finite_math_only too? */
3041 if (SCALAR_FLOAT_TYPE_P (type))
3042 switch (code)
3043 {
3044 case MIN_EXPR:
3045 case MAX_EXPR:
3046 return false;
3047
3048 default:
3049 return !flag_associative_math;
3050 }
3051
3052 if (INTEGRAL_TYPE_P (type))
3053 {
3054 if (!operation_no_trapping_overflow (type, code))
3055 return true;
3056 return false;
3057 }
3058
3059 if (SAT_FIXED_POINT_TYPE_P (type))
3060 return true;
3061
3062 return false;
3063 }
3064
3065 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
3066 has a handled computation expression. Store the main reduction
3067 operation in *CODE. */
3068
3069 static bool
3070 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
3071 tree loop_arg, enum tree_code *code,
3072 vec<std::pair<ssa_op_iter, use_operand_p> > &path)
3073 {
3074 auto_bitmap visited;
3075 tree lookfor = PHI_RESULT (phi);
3076 ssa_op_iter curri;
3077 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
3078 while (USE_FROM_PTR (curr) != loop_arg)
3079 curr = op_iter_next_use (&curri);
3080 curri.i = curri.numops;
3081 do
3082 {
3083 path.safe_push (std::make_pair (curri, curr));
3084 tree use = USE_FROM_PTR (curr);
3085 if (use == lookfor)
3086 break;
3087 gimple *def = SSA_NAME_DEF_STMT (use);
3088 if (gimple_nop_p (def)
3089 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
3090 {
3091 pop:
3092 do
3093 {
3094 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
3095 curri = x.first;
3096 curr = x.second;
3097 do
3098 curr = op_iter_next_use (&curri);
3099 /* Skip already visited or non-SSA operands (from iterating
3100 over PHI args). */
3101 while (curr != NULL_USE_OPERAND_P
3102 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
3103 || ! bitmap_set_bit (visited,
3104 SSA_NAME_VERSION
3105 (USE_FROM_PTR (curr)))));
3106 }
3107 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
3108 if (curr == NULL_USE_OPERAND_P)
3109 break;
3110 }
3111 else
3112 {
3113 if (gimple_code (def) == GIMPLE_PHI)
3114 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
3115 else
3116 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
3117 while (curr != NULL_USE_OPERAND_P
3118 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
3119 || ! bitmap_set_bit (visited,
3120 SSA_NAME_VERSION
3121 (USE_FROM_PTR (curr)))))
3122 curr = op_iter_next_use (&curri);
3123 if (curr == NULL_USE_OPERAND_P)
3124 goto pop;
3125 }
3126 }
3127 while (1);
3128 if (dump_file && (dump_flags & TDF_DETAILS))
3129 {
3130 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
3131 unsigned i;
3132 std::pair<ssa_op_iter, use_operand_p> *x;
3133 FOR_EACH_VEC_ELT (path, i, x)
3134 dump_printf (MSG_NOTE, "%T ", USE_FROM_PTR (x->second));
3135 dump_printf (MSG_NOTE, "\n");
3136 }
3137
3138 /* Check whether the reduction path detected is valid. */
3139 bool fail = path.length () == 0;
3140 bool neg = false;
3141 int sign = -1;
3142 *code = ERROR_MARK;
3143 for (unsigned i = 1; i < path.length (); ++i)
3144 {
3145 gimple *use_stmt = USE_STMT (path[i].second);
3146 tree op = USE_FROM_PTR (path[i].second);
3147 if (! is_gimple_assign (use_stmt)
3148 /* The following make sure we can compute the operand index
3149 easily plus it mostly disallows chaining via COND_EXPR condition
3150 operands. */
3151 || (gimple_assign_rhs1_ptr (use_stmt) != path[i].second->use
3152 && (gimple_num_ops (use_stmt) <= 2
3153 || gimple_assign_rhs2_ptr (use_stmt) != path[i].second->use)
3154 && (gimple_num_ops (use_stmt) <= 3
3155 || gimple_assign_rhs3_ptr (use_stmt) != path[i].second->use)))
3156 {
3157 fail = true;
3158 break;
3159 }
3160 /* Check there's only a single stmt the op is used on inside
3161 of the loop. */
3162 imm_use_iterator imm_iter;
3163 gimple *op_use_stmt;
3164 unsigned cnt = 0;
3165 FOR_EACH_IMM_USE_STMT (op_use_stmt, imm_iter, op)
3166 if (!is_gimple_debug (op_use_stmt)
3167 && flow_bb_inside_loop_p (loop, gimple_bb (op_use_stmt)))
3168 {
3169 /* We want to allow x + x but not x < 1 ? x : 2. */
3170 if (is_gimple_assign (op_use_stmt)
3171 && gimple_assign_rhs_code (op_use_stmt) == COND_EXPR)
3172 {
3173 use_operand_p use_p;
3174 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3175 cnt++;
3176 }
3177 else
3178 cnt++;
3179 }
3180 if (cnt != 1)
3181 {
3182 fail = true;
3183 break;
3184 }
3185 tree_code use_code = gimple_assign_rhs_code (use_stmt);
3186 if (use_code == MINUS_EXPR)
3187 {
3188 use_code = PLUS_EXPR;
3189 /* Track whether we negate the reduction value each iteration. */
3190 if (gimple_assign_rhs2 (use_stmt) == op)
3191 neg = ! neg;
3192 }
3193 if (CONVERT_EXPR_CODE_P (use_code)
3194 && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (use_stmt)),
3195 TREE_TYPE (gimple_assign_rhs1 (use_stmt))))
3196 ;
3197 else if (*code == ERROR_MARK)
3198 {
3199 *code = use_code;
3200 sign = TYPE_SIGN (TREE_TYPE (gimple_assign_lhs (use_stmt)));
3201 }
3202 else if (use_code != *code)
3203 {
3204 fail = true;
3205 break;
3206 }
3207 else if ((use_code == MIN_EXPR
3208 || use_code == MAX_EXPR)
3209 && sign != TYPE_SIGN (TREE_TYPE (gimple_assign_lhs (use_stmt))))
3210 {
3211 fail = true;
3212 break;
3213 }
3214 }
3215 return ! fail && ! neg && *code != ERROR_MARK;
3216 }
3217
3218 bool
3219 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
3220 tree loop_arg, enum tree_code code)
3221 {
3222 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
3223 enum tree_code code_;
3224 return (check_reduction_path (loc, loop, phi, loop_arg, &code_, path)
3225 && code_ == code);
3226 }
3227
3228
3229
3230 /* Function vect_is_simple_reduction
3231
3232 (1) Detect a cross-iteration def-use cycle that represents a simple
3233 reduction computation. We look for the following pattern:
3234
3235 loop_header:
3236 a1 = phi < a0, a2 >
3237 a3 = ...
3238 a2 = operation (a3, a1)
3239
3240 or
3241
3242 a3 = ...
3243 loop_header:
3244 a1 = phi < a0, a2 >
3245 a2 = operation (a3, a1)
3246
3247 such that:
3248 1. operation is commutative and associative and it is safe to
3249 change the order of the computation
3250 2. no uses for a2 in the loop (a2 is used out of the loop)
3251 3. no uses of a1 in the loop besides the reduction operation
3252 4. no uses of a1 outside the loop.
3253
3254 Conditions 1,4 are tested here.
3255 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
3256
3257 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
3258 nested cycles.
3259
3260 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
3261 reductions:
3262
3263 a1 = phi < a0, a2 >
3264 inner loop (def of a3)
3265 a2 = phi < a3 >
3266
3267 (4) Detect condition expressions, ie:
3268 for (int i = 0; i < N; i++)
3269 if (a[i] < val)
3270 ret_val = a[i];
3271
3272 */
3273
3274 static stmt_vec_info
3275 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3276 bool *double_reduc, bool *reduc_chain_p)
3277 {
3278 gphi *phi = as_a <gphi *> (phi_info->stmt);
3279 gimple *phi_use_stmt = NULL;
3280 imm_use_iterator imm_iter;
3281 use_operand_p use_p;
3282
3283 *double_reduc = false;
3284 *reduc_chain_p = false;
3285 STMT_VINFO_REDUC_TYPE (phi_info) = TREE_CODE_REDUCTION;
3286
3287 tree phi_name = PHI_RESULT (phi);
3288 /* ??? If there are no uses of the PHI result the inner loop reduction
3289 won't be detected as possibly double-reduction by vectorizable_reduction
3290 because that tries to walk the PHI arg from the preheader edge which
3291 can be constant. See PR60382. */
3292 if (has_zero_uses (phi_name))
3293 return NULL;
3294 class loop *loop = (gimple_bb (phi))->loop_father;
3295 unsigned nphi_def_loop_uses = 0;
3296 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
3297 {
3298 gimple *use_stmt = USE_STMT (use_p);
3299 if (is_gimple_debug (use_stmt))
3300 continue;
3301
3302 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
3303 {
3304 if (dump_enabled_p ())
3305 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3306 "intermediate value used outside loop.\n");
3307
3308 return NULL;
3309 }
3310
3311 nphi_def_loop_uses++;
3312 phi_use_stmt = use_stmt;
3313 }
3314
3315 tree latch_def = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3316 if (TREE_CODE (latch_def) != SSA_NAME)
3317 {
3318 if (dump_enabled_p ())
3319 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3320 "reduction: not ssa_name: %T\n", latch_def);
3321 return NULL;
3322 }
3323
3324 stmt_vec_info def_stmt_info = loop_info->lookup_def (latch_def);
3325 if (!def_stmt_info
3326 || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt)))
3327 return NULL;
3328
3329 bool nested_in_vect_loop
3330 = flow_loop_nested_p (LOOP_VINFO_LOOP (loop_info), loop);
3331 unsigned nlatch_def_loop_uses = 0;
3332 auto_vec<gphi *, 3> lcphis;
3333 bool inner_loop_of_double_reduc = false;
3334 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, latch_def)
3335 {
3336 gimple *use_stmt = USE_STMT (use_p);
3337 if (is_gimple_debug (use_stmt))
3338 continue;
3339 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
3340 nlatch_def_loop_uses++;
3341 else
3342 {
3343 /* We can have more than one loop-closed PHI. */
3344 lcphis.safe_push (as_a <gphi *> (use_stmt));
3345 if (nested_in_vect_loop
3346 && (STMT_VINFO_DEF_TYPE (loop_info->lookup_stmt (use_stmt))
3347 == vect_double_reduction_def))
3348 inner_loop_of_double_reduc = true;
3349 }
3350 }
3351
3352 /* If we are vectorizing an inner reduction we are executing that
3353 in the original order only in case we are not dealing with a
3354 double reduction. */
3355 if (nested_in_vect_loop && !inner_loop_of_double_reduc)
3356 {
3357 if (dump_enabled_p ())
3358 report_vect_op (MSG_NOTE, def_stmt_info->stmt,
3359 "detected nested cycle: ");
3360 return def_stmt_info;
3361 }
3362
3363 /* If this isn't a nested cycle or if the nested cycle reduction value
3364 is used ouside of the inner loop we cannot handle uses of the reduction
3365 value. */
3366 if (nlatch_def_loop_uses > 1 || nphi_def_loop_uses > 1)
3367 {
3368 if (dump_enabled_p ())
3369 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3370 "reduction used in loop.\n");
3371 return NULL;
3372 }
3373
3374 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
3375 defined in the inner loop. */
3376 if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
3377 {
3378 tree op1 = PHI_ARG_DEF (def_stmt, 0);
3379 if (gimple_phi_num_args (def_stmt) != 1
3380 || TREE_CODE (op1) != SSA_NAME)
3381 {
3382 if (dump_enabled_p ())
3383 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3384 "unsupported phi node definition.\n");
3385
3386 return NULL;
3387 }
3388
3389 gimple *def1 = SSA_NAME_DEF_STMT (op1);
3390 if (gimple_bb (def1)
3391 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3392 && loop->inner
3393 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
3394 && is_gimple_assign (def1)
3395 && is_a <gphi *> (phi_use_stmt)
3396 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
3397 {
3398 if (dump_enabled_p ())
3399 report_vect_op (MSG_NOTE, def_stmt,
3400 "detected double reduction: ");
3401
3402 *double_reduc = true;
3403 return def_stmt_info;
3404 }
3405
3406 return NULL;
3407 }
3408
3409 /* Look for the expression computing latch_def from then loop PHI result. */
3410 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
3411 enum tree_code code;
3412 if (check_reduction_path (vect_location, loop, phi, latch_def, &code,
3413 path))
3414 {
3415 STMT_VINFO_REDUC_CODE (phi_info) = code;
3416 if (code == COND_EXPR && !nested_in_vect_loop)
3417 STMT_VINFO_REDUC_TYPE (phi_info) = COND_REDUCTION;
3418
3419 /* Fill in STMT_VINFO_REDUC_IDX and gather stmts for an SLP
3420 reduction chain for which the additional restriction is that
3421 all operations in the chain are the same. */
3422 auto_vec<stmt_vec_info, 8> reduc_chain;
3423 unsigned i;
3424 bool is_slp_reduc = !nested_in_vect_loop && code != COND_EXPR;
3425 for (i = path.length () - 1; i >= 1; --i)
3426 {
3427 gimple *stmt = USE_STMT (path[i].second);
3428 stmt_vec_info stmt_info = loop_info->lookup_stmt (stmt);
3429 STMT_VINFO_REDUC_IDX (stmt_info)
3430 = path[i].second->use - gimple_assign_rhs1_ptr (stmt);
3431 enum tree_code stmt_code = gimple_assign_rhs_code (stmt);
3432 bool leading_conversion = (CONVERT_EXPR_CODE_P (stmt_code)
3433 && (i == 1 || i == path.length () - 1));
3434 if ((stmt_code != code && !leading_conversion)
3435 /* We can only handle the final value in epilogue
3436 generation for reduction chains. */
3437 || (i != 1 && !has_single_use (gimple_assign_lhs (stmt))))
3438 is_slp_reduc = false;
3439 /* For reduction chains we support a trailing/leading
3440 conversions. We do not store those in the actual chain. */
3441 if (leading_conversion)
3442 continue;
3443 reduc_chain.safe_push (stmt_info);
3444 }
3445 if (is_slp_reduc && reduc_chain.length () > 1)
3446 {
3447 for (unsigned i = 0; i < reduc_chain.length () - 1; ++i)
3448 {
3449 REDUC_GROUP_FIRST_ELEMENT (reduc_chain[i]) = reduc_chain[0];
3450 REDUC_GROUP_NEXT_ELEMENT (reduc_chain[i]) = reduc_chain[i+1];
3451 }
3452 REDUC_GROUP_FIRST_ELEMENT (reduc_chain.last ()) = reduc_chain[0];
3453 REDUC_GROUP_NEXT_ELEMENT (reduc_chain.last ()) = NULL;
3454
3455 /* Save the chain for further analysis in SLP detection. */
3456 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (reduc_chain[0]);
3457 REDUC_GROUP_SIZE (reduc_chain[0]) = reduc_chain.length ();
3458
3459 *reduc_chain_p = true;
3460 if (dump_enabled_p ())
3461 dump_printf_loc (MSG_NOTE, vect_location,
3462 "reduction: detected reduction chain\n");
3463 }
3464 else if (dump_enabled_p ())
3465 dump_printf_loc (MSG_NOTE, vect_location,
3466 "reduction: detected reduction\n");
3467
3468 return def_stmt_info;
3469 }
3470
3471 if (dump_enabled_p ())
3472 dump_printf_loc (MSG_NOTE, vect_location,
3473 "reduction: unknown pattern\n");
3474
3475 return NULL;
3476 }
3477
3478 /* Estimate the number of peeled epilogue iterations for LOOP_VINFO.
3479 PEEL_ITERS_PROLOGUE is the number of peeled prologue iterations,
3480 or -1 if not known. */
3481
3482 static int
3483 vect_get_peel_iters_epilogue (loop_vec_info loop_vinfo, int peel_iters_prologue)
3484 {
3485 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3486 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) || peel_iters_prologue == -1)
3487 {
3488 if (dump_enabled_p ())
3489 dump_printf_loc (MSG_NOTE, vect_location,
3490 "cost model: epilogue peel iters set to vf/2 "
3491 "because loop iterations are unknown .\n");
3492 return assumed_vf / 2;
3493 }
3494 else
3495 {
3496 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3497 peel_iters_prologue = MIN (niters, peel_iters_prologue);
3498 int peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3499 /* If we need to peel for gaps, but no peeling is required, we have to
3500 peel VF iterations. */
3501 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !peel_iters_epilogue)
3502 peel_iters_epilogue = assumed_vf;
3503 return peel_iters_epilogue;
3504 }
3505 }
3506
3507 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3508 int
3509 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3510 int *peel_iters_epilogue,
3511 stmt_vector_for_cost *scalar_cost_vec,
3512 stmt_vector_for_cost *prologue_cost_vec,
3513 stmt_vector_for_cost *epilogue_cost_vec)
3514 {
3515 int retval = 0;
3516
3517 *peel_iters_epilogue
3518 = vect_get_peel_iters_epilogue (loop_vinfo, peel_iters_prologue);
3519
3520 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3521 {
3522 /* If peeled iterations are known but number of scalar loop
3523 iterations are unknown, count a taken branch per peeled loop. */
3524 if (peel_iters_prologue > 0)
3525 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3526 NULL, NULL_TREE, 0, vect_prologue);
3527 if (*peel_iters_epilogue > 0)
3528 retval += record_stmt_cost (epilogue_cost_vec, 1, cond_branch_taken,
3529 NULL, NULL_TREE, 0, vect_epilogue);
3530 }
3531
3532 stmt_info_for_cost *si;
3533 int j;
3534 if (peel_iters_prologue)
3535 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3536 retval += record_stmt_cost (prologue_cost_vec,
3537 si->count * peel_iters_prologue,
3538 si->kind, si->stmt_info, si->misalign,
3539 vect_prologue);
3540 if (*peel_iters_epilogue)
3541 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3542 retval += record_stmt_cost (epilogue_cost_vec,
3543 si->count * *peel_iters_epilogue,
3544 si->kind, si->stmt_info, si->misalign,
3545 vect_epilogue);
3546
3547 return retval;
3548 }
3549
3550 /* Function vect_estimate_min_profitable_iters
3551
3552 Return the number of iterations required for the vector version of the
3553 loop to be profitable relative to the cost of the scalar version of the
3554 loop.
3555
3556 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3557 of iterations for vectorization. -1 value means loop vectorization
3558 is not profitable. This returned value may be used for dynamic
3559 profitability check.
3560
3561 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3562 for static check against estimated number of iterations. */
3563
3564 static void
3565 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3566 int *ret_min_profitable_niters,
3567 int *ret_min_profitable_estimate)
3568 {
3569 int min_profitable_iters;
3570 int min_profitable_estimate;
3571 int peel_iters_prologue;
3572 int peel_iters_epilogue;
3573 unsigned vec_inside_cost = 0;
3574 int vec_outside_cost = 0;
3575 unsigned vec_prologue_cost = 0;
3576 unsigned vec_epilogue_cost = 0;
3577 int scalar_single_iter_cost = 0;
3578 int scalar_outside_cost = 0;
3579 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3580 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3581 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3582
3583 /* Cost model disabled. */
3584 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3585 {
3586 if (dump_enabled_p ())
3587 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3588 *ret_min_profitable_niters = 0;
3589 *ret_min_profitable_estimate = 0;
3590 return;
3591 }
3592
3593 /* Requires loop versioning tests to handle misalignment. */
3594 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3595 {
3596 /* FIXME: Make cost depend on complexity of individual check. */
3597 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3598 (void) add_stmt_cost (loop_vinfo, target_cost_data, len, vector_stmt,
3599 NULL, NULL_TREE, 0, vect_prologue);
3600 if (dump_enabled_p ())
3601 dump_printf (MSG_NOTE,
3602 "cost model: Adding cost of checks for loop "
3603 "versioning to treat misalignment.\n");
3604 }
3605
3606 /* Requires loop versioning with alias checks. */
3607 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3608 {
3609 /* FIXME: Make cost depend on complexity of individual check. */
3610 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3611 (void) add_stmt_cost (loop_vinfo, target_cost_data, len, vector_stmt,
3612 NULL, NULL_TREE, 0, vect_prologue);
3613 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3614 if (len)
3615 /* Count LEN - 1 ANDs and LEN comparisons. */
3616 (void) add_stmt_cost (loop_vinfo, target_cost_data, len * 2 - 1,
3617 scalar_stmt, NULL, NULL_TREE, 0, vect_prologue);
3618 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3619 if (len)
3620 {
3621 /* Count LEN - 1 ANDs and LEN comparisons. */
3622 unsigned int nstmts = len * 2 - 1;
3623 /* +1 for each bias that needs adding. */
3624 for (unsigned int i = 0; i < len; ++i)
3625 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3626 nstmts += 1;
3627 (void) add_stmt_cost (loop_vinfo, target_cost_data, nstmts,
3628 scalar_stmt, NULL, NULL_TREE, 0, vect_prologue);
3629 }
3630 if (dump_enabled_p ())
3631 dump_printf (MSG_NOTE,
3632 "cost model: Adding cost of checks for loop "
3633 "versioning aliasing.\n");
3634 }
3635
3636 /* Requires loop versioning with niter checks. */
3637 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3638 {
3639 /* FIXME: Make cost depend on complexity of individual check. */
3640 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, vector_stmt,
3641 NULL, NULL_TREE, 0, vect_prologue);
3642 if (dump_enabled_p ())
3643 dump_printf (MSG_NOTE,
3644 "cost model: Adding cost of checks for loop "
3645 "versioning niters.\n");
3646 }
3647
3648 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3649 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
3650 NULL, NULL_TREE, 0, vect_prologue);
3651
3652 /* Count statements in scalar loop. Using this as scalar cost for a single
3653 iteration for now.
3654
3655 TODO: Add outer loop support.
3656
3657 TODO: Consider assigning different costs to different scalar
3658 statements. */
3659
3660 scalar_single_iter_cost
3661 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3662
3663 /* Add additional cost for the peeled instructions in prologue and epilogue
3664 loop. (For fully-masked loops there will be no peeling.)
3665
3666 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3667 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3668
3669 TODO: Build an expression that represents peel_iters for prologue and
3670 epilogue to be used in a run-time test. */
3671
3672 bool prologue_need_br_taken_cost = false;
3673 bool prologue_need_br_not_taken_cost = false;
3674
3675 /* Calculate peel_iters_prologue. */
3676 if (vect_use_loop_mask_for_alignment_p (loop_vinfo))
3677 peel_iters_prologue = 0;
3678 else if (npeel < 0)
3679 {
3680 peel_iters_prologue = assumed_vf / 2;
3681 if (dump_enabled_p ())
3682 dump_printf (MSG_NOTE, "cost model: "
3683 "prologue peel iters set to vf/2.\n");
3684
3685 /* If peeled iterations are unknown, count a taken branch and a not taken
3686 branch per peeled loop. Even if scalar loop iterations are known,
3687 vector iterations are not known since peeled prologue iterations are
3688 not known. Hence guards remain the same. */
3689 prologue_need_br_taken_cost = true;
3690 prologue_need_br_not_taken_cost = true;
3691 }
3692 else
3693 {
3694 peel_iters_prologue = npeel;
3695 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && peel_iters_prologue > 0)
3696 /* If peeled iterations are known but number of scalar loop
3697 iterations are unknown, count a taken branch per peeled loop. */
3698 prologue_need_br_taken_cost = true;
3699 }
3700
3701 bool epilogue_need_br_taken_cost = false;
3702 bool epilogue_need_br_not_taken_cost = false;
3703
3704 /* Calculate peel_iters_epilogue. */
3705 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
3706 /* We need to peel exactly one iteration for gaps. */
3707 peel_iters_epilogue = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
3708 else if (npeel < 0)
3709 {
3710 /* If peeling for alignment is unknown, loop bound of main loop
3711 becomes unknown. */
3712 peel_iters_epilogue = assumed_vf / 2;
3713 if (dump_enabled_p ())
3714 dump_printf (MSG_NOTE, "cost model: "
3715 "epilogue peel iters set to vf/2 because "
3716 "peeling for alignment is unknown.\n");
3717
3718 /* See the same reason above in peel_iters_prologue calculation. */
3719 epilogue_need_br_taken_cost = true;
3720 epilogue_need_br_not_taken_cost = true;
3721 }
3722 else
3723 {
3724 peel_iters_epilogue = vect_get_peel_iters_epilogue (loop_vinfo, npeel);
3725 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && peel_iters_epilogue > 0)
3726 /* If peeled iterations are known but number of scalar loop
3727 iterations are unknown, count a taken branch per peeled loop. */
3728 epilogue_need_br_taken_cost = true;
3729 }
3730
3731 stmt_info_for_cost *si;
3732 int j;
3733 /* Add costs associated with peel_iters_prologue. */
3734 if (peel_iters_prologue)
3735 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3736 {
3737 (void) add_stmt_cost (loop_vinfo, target_cost_data,
3738 si->count * peel_iters_prologue, si->kind,
3739 si->stmt_info, si->vectype, si->misalign,
3740 vect_prologue);
3741 }
3742
3743 /* Add costs associated with peel_iters_epilogue. */
3744 if (peel_iters_epilogue)
3745 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3746 {
3747 (void) add_stmt_cost (loop_vinfo, target_cost_data,
3748 si->count * peel_iters_epilogue, si->kind,
3749 si->stmt_info, si->vectype, si->misalign,
3750 vect_epilogue);
3751 }
3752
3753 /* Add possible cond_branch_taken/cond_branch_not_taken cost. */
3754
3755 if (prologue_need_br_taken_cost)
3756 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
3757 NULL, NULL_TREE, 0, vect_prologue);
3758
3759 if (prologue_need_br_not_taken_cost)
3760 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1,
3761 cond_branch_not_taken, NULL, NULL_TREE, 0,
3762 vect_prologue);
3763
3764 if (epilogue_need_br_taken_cost)
3765 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
3766 NULL, NULL_TREE, 0, vect_epilogue);
3767
3768 if (epilogue_need_br_not_taken_cost)
3769 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1,
3770 cond_branch_not_taken, NULL, NULL_TREE, 0,
3771 vect_epilogue);
3772
3773 /* Take care of special costs for rgroup controls of partial vectors. */
3774 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3775 {
3776 /* Calculate how many masks we need to generate. */
3777 unsigned int num_masks = 0;
3778 rgroup_controls *rgm;
3779 unsigned int num_vectors_m1;
3780 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), num_vectors_m1, rgm)
3781 if (rgm->type)
3782 num_masks += num_vectors_m1 + 1;
3783 gcc_assert (num_masks > 0);
3784
3785 /* In the worst case, we need to generate each mask in the prologue
3786 and in the loop body. One of the loop body mask instructions
3787 replaces the comparison in the scalar loop, and since we don't
3788 count the scalar comparison against the scalar body, we shouldn't
3789 count that vector instruction against the vector body either.
3790
3791 Sometimes we can use unpacks instead of generating prologue
3792 masks and sometimes the prologue mask will fold to a constant,
3793 so the actual prologue cost might be smaller. However, it's
3794 simpler and safer to use the worst-case cost; if this ends up
3795 being the tie-breaker between vectorizing or not, then it's
3796 probably better not to vectorize. */
3797 (void) add_stmt_cost (loop_vinfo, target_cost_data, num_masks,
3798 vector_stmt, NULL, NULL_TREE, 0, vect_prologue);
3799 (void) add_stmt_cost (loop_vinfo, target_cost_data, num_masks - 1,
3800 vector_stmt, NULL, NULL_TREE, 0, vect_body);
3801 }
3802 else if (LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo))
3803 {
3804 /* Referring to the functions vect_set_loop_condition_partial_vectors
3805 and vect_set_loop_controls_directly, we need to generate each
3806 length in the prologue and in the loop body if required. Although
3807 there are some possible optimizations, we consider the worst case
3808 here. */
3809
3810 bool niters_known_p = LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo);
3811 bool need_iterate_p
3812 = (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)
3813 && !vect_known_niters_smaller_than_vf (loop_vinfo));
3814
3815 /* Calculate how many statements to be added. */
3816 unsigned int prologue_stmts = 0;
3817 unsigned int body_stmts = 0;
3818
3819 rgroup_controls *rgc;
3820 unsigned int num_vectors_m1;
3821 FOR_EACH_VEC_ELT (LOOP_VINFO_LENS (loop_vinfo), num_vectors_m1, rgc)
3822 if (rgc->type)
3823 {
3824 /* May need one SHIFT for nitems_total computation. */
3825 unsigned nitems = rgc->max_nscalars_per_iter * rgc->factor;
3826 if (nitems != 1 && !niters_known_p)
3827 prologue_stmts += 1;
3828
3829 /* May need one MAX and one MINUS for wrap around. */
3830 if (vect_rgroup_iv_might_wrap_p (loop_vinfo, rgc))
3831 prologue_stmts += 2;
3832
3833 /* Need one MAX and one MINUS for each batch limit excepting for
3834 the 1st one. */
3835 prologue_stmts += num_vectors_m1 * 2;
3836
3837 unsigned int num_vectors = num_vectors_m1 + 1;
3838
3839 /* Need to set up lengths in prologue, only one MIN required
3840 for each since start index is zero. */
3841 prologue_stmts += num_vectors;
3842
3843 /* Each may need two MINs and one MINUS to update lengths in body
3844 for next iteration. */
3845 if (need_iterate_p)
3846 body_stmts += 3 * num_vectors;
3847 }
3848
3849 (void) add_stmt_cost (loop_vinfo, target_cost_data, prologue_stmts,
3850 scalar_stmt, NULL, NULL_TREE, 0, vect_prologue);
3851 (void) add_stmt_cost (loop_vinfo, target_cost_data, body_stmts,
3852 scalar_stmt, NULL, NULL_TREE, 0, vect_body);
3853 }
3854
3855 /* FORNOW: The scalar outside cost is incremented in one of the
3856 following ways:
3857
3858 1. The vectorizer checks for alignment and aliasing and generates
3859 a condition that allows dynamic vectorization. A cost model
3860 check is ANDED with the versioning condition. Hence scalar code
3861 path now has the added cost of the versioning check.
3862
3863 if (cost > th & versioning_check)
3864 jmp to vector code
3865
3866 Hence run-time scalar is incremented by not-taken branch cost.
3867
3868 2. The vectorizer then checks if a prologue is required. If the
3869 cost model check was not done before during versioning, it has to
3870 be done before the prologue check.
3871
3872 if (cost <= th)
3873 prologue = scalar_iters
3874 if (prologue == 0)
3875 jmp to vector code
3876 else
3877 execute prologue
3878 if (prologue == num_iters)
3879 go to exit
3880
3881 Hence the run-time scalar cost is incremented by a taken branch,
3882 plus a not-taken branch, plus a taken branch cost.
3883
3884 3. The vectorizer then checks if an epilogue is required. If the
3885 cost model check was not done before during prologue check, it
3886 has to be done with the epilogue check.
3887
3888 if (prologue == 0)
3889 jmp to vector code
3890 else
3891 execute prologue
3892 if (prologue == num_iters)
3893 go to exit
3894 vector code:
3895 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3896 jmp to epilogue
3897
3898 Hence the run-time scalar cost should be incremented by 2 taken
3899 branches.
3900
3901 TODO: The back end may reorder the BBS's differently and reverse
3902 conditions/branch directions. Change the estimates below to
3903 something more reasonable. */
3904
3905 /* If the number of iterations is known and we do not do versioning, we can
3906 decide whether to vectorize at compile time. Hence the scalar version
3907 do not carry cost model guard costs. */
3908 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3909 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3910 {
3911 /* Cost model check occurs at versioning. */
3912 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3913 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3914 else
3915 {
3916 /* Cost model check occurs at prologue generation. */
3917 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3918 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3919 + vect_get_stmt_cost (cond_branch_not_taken);
3920 /* Cost model check occurs at epilogue generation. */
3921 else
3922 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3923 }
3924 }
3925
3926 /* Complete the target-specific cost calculations. */
3927 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3928 &vec_inside_cost, &vec_epilogue_cost);
3929
3930 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3931
3932 /* Stash the costs so that we can compare two loop_vec_infos. */
3933 loop_vinfo->vec_inside_cost = vec_inside_cost;
3934 loop_vinfo->vec_outside_cost = vec_outside_cost;
3935
3936 if (dump_enabled_p ())
3937 {
3938 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3939 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3940 vec_inside_cost);
3941 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3942 vec_prologue_cost);
3943 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3944 vec_epilogue_cost);
3945 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3946 scalar_single_iter_cost);
3947 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3948 scalar_outside_cost);
3949 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3950 vec_outside_cost);
3951 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3952 peel_iters_prologue);
3953 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3954 peel_iters_epilogue);
3955 }
3956
3957 /* Calculate number of iterations required to make the vector version
3958 profitable, relative to the loop bodies only. The following condition
3959 must hold true:
3960 SIC * niters + SOC > VIC * ((niters - NPEEL) / VF) + VOC
3961 where
3962 SIC = scalar iteration cost, VIC = vector iteration cost,
3963 VOC = vector outside cost, VF = vectorization factor,
3964 NPEEL = prologue iterations + epilogue iterations,
3965 SOC = scalar outside cost for run time cost model check. */
3966
3967 int saving_per_viter = (scalar_single_iter_cost * assumed_vf
3968 - vec_inside_cost);
3969 if (saving_per_viter <= 0)
3970 {
3971 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3972 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3973 "vectorization did not happen for a simd loop");
3974
3975 if (dump_enabled_p ())
3976 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3977 "cost model: the vector iteration cost = %d "
3978 "divided by the scalar iteration cost = %d "
3979 "is greater or equal to the vectorization factor = %d"
3980 ".\n",
3981 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3982 *ret_min_profitable_niters = -1;
3983 *ret_min_profitable_estimate = -1;
3984 return;
3985 }
3986
3987 /* ??? The "if" arm is written to handle all cases; see below for what
3988 we would do for !LOOP_VINFO_USING_PARTIAL_VECTORS_P. */
3989 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
3990 {
3991 /* Rewriting the condition above in terms of the number of
3992 vector iterations (vniters) rather than the number of
3993 scalar iterations (niters) gives:
3994
3995 SIC * (vniters * VF + NPEEL) + SOC > VIC * vniters + VOC
3996
3997 <==> vniters * (SIC * VF - VIC) > VOC - SIC * NPEEL - SOC
3998
3999 For integer N, X and Y when X > 0:
4000
4001 N * X > Y <==> N >= (Y /[floor] X) + 1. */
4002 int outside_overhead = (vec_outside_cost
4003 - scalar_single_iter_cost * peel_iters_prologue
4004 - scalar_single_iter_cost * peel_iters_epilogue
4005 - scalar_outside_cost);
4006 /* We're only interested in cases that require at least one
4007 vector iteration. */
4008 int min_vec_niters = 1;
4009 if (outside_overhead > 0)
4010 min_vec_niters = outside_overhead / saving_per_viter + 1;
4011
4012 if (dump_enabled_p ())
4013 dump_printf (MSG_NOTE, " Minimum number of vector iterations: %d\n",
4014 min_vec_niters);
4015
4016 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
4017 {
4018 /* Now that we know the minimum number of vector iterations,
4019 find the minimum niters for which the scalar cost is larger:
4020
4021 SIC * niters > VIC * vniters + VOC - SOC
4022
4023 We know that the minimum niters is no more than
4024 vniters * VF + NPEEL, but it might be (and often is) less
4025 than that if a partial vector iteration is cheaper than the
4026 equivalent scalar code. */
4027 int threshold = (vec_inside_cost * min_vec_niters
4028 + vec_outside_cost
4029 - scalar_outside_cost);
4030 if (threshold <= 0)
4031 min_profitable_iters = 1;
4032 else
4033 min_profitable_iters = threshold / scalar_single_iter_cost + 1;
4034 }
4035 else
4036 /* Convert the number of vector iterations into a number of
4037 scalar iterations. */
4038 min_profitable_iters = (min_vec_niters * assumed_vf
4039 + peel_iters_prologue
4040 + peel_iters_epilogue);
4041 }
4042 else
4043 {
4044 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
4045 * assumed_vf
4046 - vec_inside_cost * peel_iters_prologue
4047 - vec_inside_cost * peel_iters_epilogue);
4048 if (min_profitable_iters <= 0)
4049 min_profitable_iters = 0;
4050 else
4051 {
4052 min_profitable_iters /= saving_per_viter;
4053
4054 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
4055 <= (((int) vec_inside_cost * min_profitable_iters)
4056 + (((int) vec_outside_cost - scalar_outside_cost)
4057 * assumed_vf)))
4058 min_profitable_iters++;
4059 }
4060 }
4061
4062 if (dump_enabled_p ())
4063 dump_printf (MSG_NOTE,
4064 " Calculated minimum iters for profitability: %d\n",
4065 min_profitable_iters);
4066
4067 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)
4068 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
4069 /* We want the vectorized loop to execute at least once. */
4070 min_profitable_iters = assumed_vf + peel_iters_prologue;
4071 else if (min_profitable_iters < peel_iters_prologue)
4072 /* For LOOP_VINFO_USING_PARTIAL_VECTORS_P, we need to ensure the
4073 vectorized loop executes at least once. */
4074 min_profitable_iters = peel_iters_prologue;
4075
4076 if (dump_enabled_p ())
4077 dump_printf_loc (MSG_NOTE, vect_location,
4078 " Runtime profitability threshold = %d\n",
4079 min_profitable_iters);
4080
4081 *ret_min_profitable_niters = min_profitable_iters;
4082
4083 /* Calculate number of iterations required to make the vector version
4084 profitable, relative to the loop bodies only.
4085
4086 Non-vectorized variant is SIC * niters and it must win over vector
4087 variant on the expected loop trip count. The following condition must hold true:
4088 SIC * niters > VIC * ((niters - NPEEL) / VF) + VOC + SOC */
4089
4090 if (vec_outside_cost <= 0)
4091 min_profitable_estimate = 0;
4092 else if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
4093 {
4094 /* This is a repeat of the code above, but with + SOC rather
4095 than - SOC. */
4096 int outside_overhead = (vec_outside_cost
4097 - scalar_single_iter_cost * peel_iters_prologue
4098 - scalar_single_iter_cost * peel_iters_epilogue
4099 + scalar_outside_cost);
4100 int min_vec_niters = 1;
4101 if (outside_overhead > 0)
4102 min_vec_niters = outside_overhead / saving_per_viter + 1;
4103
4104 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
4105 {
4106 int threshold = (vec_inside_cost * min_vec_niters
4107 + vec_outside_cost
4108 + scalar_outside_cost);
4109 min_profitable_estimate = threshold / scalar_single_iter_cost + 1;
4110 }
4111 else
4112 min_profitable_estimate = (min_vec_niters * assumed_vf
4113 + peel_iters_prologue
4114 + peel_iters_epilogue);
4115 }
4116 else
4117 {
4118 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
4119 * assumed_vf
4120 - vec_inside_cost * peel_iters_prologue
4121 - vec_inside_cost * peel_iters_epilogue)
4122 / ((scalar_single_iter_cost * assumed_vf)
4123 - vec_inside_cost);
4124 }
4125 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
4126 if (dump_enabled_p ())
4127 dump_printf_loc (MSG_NOTE, vect_location,
4128 " Static estimate profitability threshold = %d\n",
4129 min_profitable_estimate);
4130
4131 *ret_min_profitable_estimate = min_profitable_estimate;
4132 }
4133
4134 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
4135 vector elements (not bits) for a vector with NELT elements. */
4136 static void
4137 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
4138 vec_perm_builder *sel)
4139 {
4140 /* The encoding is a single stepped pattern. Any wrap-around is handled
4141 by vec_perm_indices. */
4142 sel->new_vector (nelt, 1, 3);
4143 for (unsigned int i = 0; i < 3; i++)
4144 sel->quick_push (i + offset);
4145 }
4146
4147 /* Checks whether the target supports whole-vector shifts for vectors of mode
4148 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
4149 it supports vec_perm_const with masks for all necessary shift amounts. */
4150 static bool
4151 have_whole_vector_shift (machine_mode mode)
4152 {
4153 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
4154 return true;
4155
4156 /* Variable-length vectors should be handled via the optab. */
4157 unsigned int nelt;
4158 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
4159 return false;
4160
4161 vec_perm_builder sel;
4162 vec_perm_indices indices;
4163 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
4164 {
4165 calc_vec_perm_mask_for_shift (i, nelt, &sel);
4166 indices.new_vector (sel, 2, nelt);
4167 if (!can_vec_perm_const_p (mode, indices, false))
4168 return false;
4169 }
4170 return true;
4171 }
4172
4173 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
4174 functions. Design better to avoid maintenance issues. */
4175
4176 /* Function vect_model_reduction_cost.
4177
4178 Models cost for a reduction operation, including the vector ops
4179 generated within the strip-mine loop, the initial definition before
4180 the loop, and the epilogue code that must be generated. */
4181
4182 static void
4183 vect_model_reduction_cost (loop_vec_info loop_vinfo,
4184 stmt_vec_info stmt_info, internal_fn reduc_fn,
4185 vect_reduction_type reduction_type,
4186 int ncopies, stmt_vector_for_cost *cost_vec)
4187 {
4188 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
4189 enum tree_code code;
4190 optab optab;
4191 tree vectype;
4192 machine_mode mode;
4193 class loop *loop = NULL;
4194
4195 if (loop_vinfo)
4196 loop = LOOP_VINFO_LOOP (loop_vinfo);
4197
4198 /* Condition reductions generate two reductions in the loop. */
4199 if (reduction_type == COND_REDUCTION)
4200 ncopies *= 2;
4201
4202 vectype = STMT_VINFO_VECTYPE (stmt_info);
4203 mode = TYPE_MODE (vectype);
4204 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
4205
4206 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4207
4208 if (reduction_type == EXTRACT_LAST_REDUCTION)
4209 /* No extra instructions are needed in the prologue. The loop body
4210 operations are costed in vectorizable_condition. */
4211 inside_cost = 0;
4212 else if (reduction_type == FOLD_LEFT_REDUCTION)
4213 {
4214 /* No extra instructions needed in the prologue. */
4215 prologue_cost = 0;
4216
4217 if (reduc_fn != IFN_LAST)
4218 /* Count one reduction-like operation per vector. */
4219 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
4220 stmt_info, 0, vect_body);
4221 else
4222 {
4223 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
4224 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
4225 inside_cost = record_stmt_cost (cost_vec, nelements,
4226 vec_to_scalar, stmt_info, 0,
4227 vect_body);
4228 inside_cost += record_stmt_cost (cost_vec, nelements,
4229 scalar_stmt, stmt_info, 0,
4230 vect_body);
4231 }
4232 }
4233 else
4234 {
4235 /* Add in cost for initial definition.
4236 For cond reduction we have four vectors: initial index, step,
4237 initial result of the data reduction, initial value of the index
4238 reduction. */
4239 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
4240 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
4241 scalar_to_vec, stmt_info, 0,
4242 vect_prologue);
4243
4244 /* Cost of reduction op inside loop. */
4245 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
4246 stmt_info, 0, vect_body);
4247 }
4248
4249 /* Determine cost of epilogue code.
4250
4251 We have a reduction operator that will reduce the vector in one statement.
4252 Also requires scalar extract. */
4253
4254 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
4255 {
4256 if (reduc_fn != IFN_LAST)
4257 {
4258 if (reduction_type == COND_REDUCTION)
4259 {
4260 /* An EQ stmt and an COND_EXPR stmt. */
4261 epilogue_cost += record_stmt_cost (cost_vec, 2,
4262 vector_stmt, stmt_info, 0,
4263 vect_epilogue);
4264 /* Reduction of the max index and a reduction of the found
4265 values. */
4266 epilogue_cost += record_stmt_cost (cost_vec, 2,
4267 vec_to_scalar, stmt_info, 0,
4268 vect_epilogue);
4269 /* A broadcast of the max value. */
4270 epilogue_cost += record_stmt_cost (cost_vec, 1,
4271 scalar_to_vec, stmt_info, 0,
4272 vect_epilogue);
4273 }
4274 else
4275 {
4276 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
4277 stmt_info, 0, vect_epilogue);
4278 epilogue_cost += record_stmt_cost (cost_vec, 1,
4279 vec_to_scalar, stmt_info, 0,
4280 vect_epilogue);
4281 }
4282 }
4283 else if (reduction_type == COND_REDUCTION)
4284 {
4285 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
4286 /* Extraction of scalar elements. */
4287 epilogue_cost += record_stmt_cost (cost_vec,
4288 2 * estimated_nunits,
4289 vec_to_scalar, stmt_info, 0,
4290 vect_epilogue);
4291 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
4292 epilogue_cost += record_stmt_cost (cost_vec,
4293 2 * estimated_nunits - 3,
4294 scalar_stmt, stmt_info, 0,
4295 vect_epilogue);
4296 }
4297 else if (reduction_type == EXTRACT_LAST_REDUCTION
4298 || reduction_type == FOLD_LEFT_REDUCTION)
4299 /* No extra instructions need in the epilogue. */
4300 ;
4301 else
4302 {
4303 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4304 tree bitsize =
4305 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
4306 int element_bitsize = tree_to_uhwi (bitsize);
4307 int nelements = vec_size_in_bits / element_bitsize;
4308
4309 if (code == COND_EXPR)
4310 code = MAX_EXPR;
4311
4312 optab = optab_for_tree_code (code, vectype, optab_default);
4313
4314 /* We have a whole vector shift available. */
4315 if (optab != unknown_optab
4316 && VECTOR_MODE_P (mode)
4317 && optab_handler (optab, mode) != CODE_FOR_nothing
4318 && have_whole_vector_shift (mode))
4319 {
4320 /* Final reduction via vector shifts and the reduction operator.
4321 Also requires scalar extract. */
4322 epilogue_cost += record_stmt_cost (cost_vec,
4323 exact_log2 (nelements) * 2,
4324 vector_stmt, stmt_info, 0,
4325 vect_epilogue);
4326 epilogue_cost += record_stmt_cost (cost_vec, 1,
4327 vec_to_scalar, stmt_info, 0,
4328 vect_epilogue);
4329 }
4330 else
4331 /* Use extracts and reduction op for final reduction. For N
4332 elements, we have N extracts and N-1 reduction ops. */
4333 epilogue_cost += record_stmt_cost (cost_vec,
4334 nelements + nelements - 1,
4335 vector_stmt, stmt_info, 0,
4336 vect_epilogue);
4337 }
4338 }
4339
4340 if (dump_enabled_p ())
4341 dump_printf (MSG_NOTE,
4342 "vect_model_reduction_cost: inside_cost = %d, "
4343 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
4344 prologue_cost, epilogue_cost);
4345 }
4346
4347
4348 /* Function vect_model_induction_cost.
4349
4350 Models cost for induction operations. */
4351
4352 static void
4353 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
4354 stmt_vector_for_cost *cost_vec)
4355 {
4356 unsigned inside_cost, prologue_cost;
4357
4358 if (PURE_SLP_STMT (stmt_info))
4359 return;
4360
4361 /* loop cost for vec_loop. */
4362 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
4363 stmt_info, 0, vect_body);
4364
4365 /* prologue cost for vec_init and vec_step. */
4366 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
4367 stmt_info, 0, vect_prologue);
4368
4369 if (dump_enabled_p ())
4370 dump_printf_loc (MSG_NOTE, vect_location,
4371 "vect_model_induction_cost: inside_cost = %d, "
4372 "prologue_cost = %d .\n", inside_cost, prologue_cost);
4373 }
4374
4375
4376
4377 /* Function get_initial_def_for_reduction
4378
4379 Input:
4380 STMT_VINFO - a stmt that performs a reduction operation in the loop.
4381 INIT_VAL - the initial value of the reduction variable
4382
4383 Output:
4384 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4385 of the reduction (used for adjusting the epilog - see below).
4386 Return a vector variable, initialized according to the operation that
4387 STMT_VINFO performs. This vector will be used as the initial value
4388 of the vector of partial results.
4389
4390 Option1 (adjust in epilog): Initialize the vector as follows:
4391 add/bit or/xor: [0,0,...,0,0]
4392 mult/bit and: [1,1,...,1,1]
4393 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4394 and when necessary (e.g. add/mult case) let the caller know
4395 that it needs to adjust the result by init_val.
4396
4397 Option2: Initialize the vector as follows:
4398 add/bit or/xor: [init_val,0,0,...,0]
4399 mult/bit and: [init_val,1,1,...,1]
4400 min/max/cond_expr: [init_val,init_val,...,init_val]
4401 and no adjustments are needed.
4402
4403 For example, for the following code:
4404
4405 s = init_val;
4406 for (i=0;i<n;i++)
4407 s = s + a[i];
4408
4409 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
4410 For a vector of 4 units, we want to return either [0,0,0,init_val],
4411 or [0,0,0,0] and let the caller know that it needs to adjust
4412 the result at the end by 'init_val'.
4413
4414 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4415 initialization vector is simpler (same element in all entries), if
4416 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4417
4418 A cost model should help decide between these two schemes. */
4419
4420 static tree
4421 get_initial_def_for_reduction (loop_vec_info loop_vinfo,
4422 stmt_vec_info stmt_vinfo,
4423 enum tree_code code, tree init_val,
4424 tree *adjustment_def)
4425 {
4426 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4427 tree scalar_type = TREE_TYPE (init_val);
4428 tree vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type);
4429 tree def_for_init;
4430 tree init_def;
4431 REAL_VALUE_TYPE real_init_val = dconst0;
4432 int int_init_val = 0;
4433 gimple_seq stmts = NULL;
4434
4435 gcc_assert (vectype);
4436
4437 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4438 || SCALAR_FLOAT_TYPE_P (scalar_type));
4439
4440 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
4441 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
4442
4443 /* ADJUSTMENT_DEF is NULL when called from
4444 vect_create_epilog_for_reduction to vectorize double reduction. */
4445 if (adjustment_def)
4446 *adjustment_def = NULL;
4447
4448 switch (code)
4449 {
4450 case WIDEN_SUM_EXPR:
4451 case DOT_PROD_EXPR:
4452 case SAD_EXPR:
4453 case PLUS_EXPR:
4454 case MINUS_EXPR:
4455 case BIT_IOR_EXPR:
4456 case BIT_XOR_EXPR:
4457 case MULT_EXPR:
4458 case BIT_AND_EXPR:
4459 {
4460 if (code == MULT_EXPR)
4461 {
4462 real_init_val = dconst1;
4463 int_init_val = 1;
4464 }
4465
4466 if (code == BIT_AND_EXPR)
4467 int_init_val = -1;
4468
4469 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4470 def_for_init = build_real (scalar_type, real_init_val);
4471 else
4472 def_for_init = build_int_cst (scalar_type, int_init_val);
4473
4474 if (adjustment_def || operand_equal_p (def_for_init, init_val, 0))
4475 {
4476 /* Option1: the first element is '0' or '1' as well. */
4477 if (!operand_equal_p (def_for_init, init_val, 0))
4478 *adjustment_def = init_val;
4479 init_def = gimple_build_vector_from_val (&stmts, vectype,
4480 def_for_init);
4481 }
4482 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4483 {
4484 /* Option2 (variable length): the first element is INIT_VAL. */
4485 init_def = gimple_build_vector_from_val (&stmts, vectype,
4486 def_for_init);
4487 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4488 vectype, init_def, init_val);
4489 }
4490 else
4491 {
4492 /* Option2: the first element is INIT_VAL. */
4493 tree_vector_builder elts (vectype, 1, 2);
4494 elts.quick_push (init_val);
4495 elts.quick_push (def_for_init);
4496 init_def = gimple_build_vector (&stmts, &elts);
4497 }
4498 }
4499 break;
4500
4501 case MIN_EXPR:
4502 case MAX_EXPR:
4503 case COND_EXPR:
4504 {
4505 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4506 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4507 }
4508 break;
4509
4510 default:
4511 gcc_unreachable ();
4512 }
4513
4514 if (stmts)
4515 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4516 return init_def;
4517 }
4518
4519 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4520 NUMBER_OF_VECTORS is the number of vector defs to create.
4521 If NEUTRAL_OP is nonnull, introducing extra elements of that
4522 value will not change the result. */
4523
4524 static void
4525 get_initial_defs_for_reduction (vec_info *vinfo,
4526 slp_tree slp_node,
4527 vec<tree> *vec_oprnds,
4528 unsigned int number_of_vectors,
4529 bool reduc_chain, tree neutral_op)
4530 {
4531 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4532 stmt_vec_info stmt_vinfo = stmts[0];
4533 unsigned HOST_WIDE_INT nunits;
4534 unsigned j, number_of_places_left_in_vector;
4535 tree vector_type;
4536 unsigned int group_size = stmts.length ();
4537 unsigned int i;
4538 class loop *loop;
4539
4540 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4541
4542 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4543
4544 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4545 gcc_assert (loop);
4546 edge pe = loop_preheader_edge (loop);
4547
4548 gcc_assert (!reduc_chain || neutral_op);
4549
4550 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4551 created vectors. It is greater than 1 if unrolling is performed.
4552
4553 For example, we have two scalar operands, s1 and s2 (e.g., group of
4554 strided accesses of size two), while NUNITS is four (i.e., four scalars
4555 of this type can be packed in a vector). The output vector will contain
4556 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4557 will be 2).
4558
4559 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4560 vectors containing the operands.
4561
4562 For example, NUNITS is four as before, and the group size is 8
4563 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4564 {s5, s6, s7, s8}. */
4565
4566 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4567 nunits = group_size;
4568
4569 number_of_places_left_in_vector = nunits;
4570 bool constant_p = true;
4571 tree_vector_builder elts (vector_type, nunits, 1);
4572 elts.quick_grow (nunits);
4573 gimple_seq ctor_seq = NULL;
4574 for (j = 0; j < nunits * number_of_vectors; ++j)
4575 {
4576 tree op;
4577 i = j % group_size;
4578 stmt_vinfo = stmts[i];
4579
4580 /* Get the def before the loop. In reduction chain we have only
4581 one initial value. Else we have as many as PHIs in the group. */
4582 if (reduc_chain)
4583 op = j != 0 ? neutral_op : PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4584 else if (((vec_oprnds->length () + 1) * nunits
4585 - number_of_places_left_in_vector >= group_size)
4586 && neutral_op)
4587 op = neutral_op;
4588 else
4589 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4590
4591 /* Create 'vect_ = {op0,op1,...,opn}'. */
4592 number_of_places_left_in_vector--;
4593 elts[nunits - number_of_places_left_in_vector - 1] = op;
4594 if (!CONSTANT_CLASS_P (op))
4595 constant_p = false;
4596
4597 if (number_of_places_left_in_vector == 0)
4598 {
4599 tree init;
4600 if (constant_p && !neutral_op
4601 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4602 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4603 /* Build the vector directly from ELTS. */
4604 init = gimple_build_vector (&ctor_seq, &elts);
4605 else if (neutral_op)
4606 {
4607 /* Build a vector of the neutral value and shift the
4608 other elements into place. */
4609 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4610 neutral_op);
4611 int k = nunits;
4612 while (k > 0 && elts[k - 1] == neutral_op)
4613 k -= 1;
4614 while (k > 0)
4615 {
4616 k -= 1;
4617 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4618 vector_type, init, elts[k]);
4619 }
4620 }
4621 else
4622 {
4623 /* First time round, duplicate ELTS to fill the
4624 required number of vectors. */
4625 duplicate_and_interleave (vinfo, &ctor_seq, vector_type, elts,
4626 number_of_vectors, *vec_oprnds);
4627 break;
4628 }
4629 vec_oprnds->quick_push (init);
4630
4631 number_of_places_left_in_vector = nunits;
4632 elts.new_vector (vector_type, nunits, 1);
4633 elts.quick_grow (nunits);
4634 constant_p = true;
4635 }
4636 }
4637 if (ctor_seq != NULL)
4638 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4639 }
4640
4641 /* For a statement STMT_INFO taking part in a reduction operation return
4642 the stmt_vec_info the meta information is stored on. */
4643
4644 stmt_vec_info
4645 info_for_reduction (vec_info *vinfo, stmt_vec_info stmt_info)
4646 {
4647 stmt_info = vect_orig_stmt (stmt_info);
4648 gcc_assert (STMT_VINFO_REDUC_DEF (stmt_info));
4649 if (!is_a <gphi *> (stmt_info->stmt)
4650 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
4651 stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
4652 gphi *phi = as_a <gphi *> (stmt_info->stmt);
4653 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
4654 {
4655 if (gimple_phi_num_args (phi) == 1)
4656 stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
4657 }
4658 else if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
4659 {
4660 edge pe = loop_preheader_edge (gimple_bb (phi)->loop_father);
4661 stmt_vec_info info
4662 = vinfo->lookup_def (PHI_ARG_DEF_FROM_EDGE (phi, pe));
4663 if (info && STMT_VINFO_DEF_TYPE (info) == vect_double_reduction_def)
4664 stmt_info = info;
4665 }
4666 return stmt_info;
4667 }
4668
4669 /* Function vect_create_epilog_for_reduction
4670
4671 Create code at the loop-epilog to finalize the result of a reduction
4672 computation.
4673
4674 STMT_INFO is the scalar reduction stmt that is being vectorized.
4675 SLP_NODE is an SLP node containing a group of reduction statements. The
4676 first one in this group is STMT_INFO.
4677 SLP_NODE_INSTANCE is the SLP node instance containing SLP_NODE
4678 REDUC_INDEX says which rhs operand of the STMT_INFO is the reduction phi
4679 (counting from 0)
4680
4681 This function:
4682 1. Completes the reduction def-use cycles.
4683 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4684 by calling the function specified by REDUC_FN if available, or by
4685 other means (whole-vector shifts or a scalar loop).
4686 The function also creates a new phi node at the loop exit to preserve
4687 loop-closed form, as illustrated below.
4688
4689 The flow at the entry to this function:
4690
4691 loop:
4692 vec_def = phi <vec_init, null> # REDUCTION_PHI
4693 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4694 s_loop = scalar_stmt # (scalar) STMT_INFO
4695 loop_exit:
4696 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4697 use <s_out0>
4698 use <s_out0>
4699
4700 The above is transformed by this function into:
4701
4702 loop:
4703 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4704 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4705 s_loop = scalar_stmt # (scalar) STMT_INFO
4706 loop_exit:
4707 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4708 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4709 v_out2 = reduce <v_out1>
4710 s_out3 = extract_field <v_out2, 0>
4711 s_out4 = adjust_result <s_out3>
4712 use <s_out4>
4713 use <s_out4>
4714 */
4715
4716 static void
4717 vect_create_epilog_for_reduction (loop_vec_info loop_vinfo,
4718 stmt_vec_info stmt_info,
4719 slp_tree slp_node,
4720 slp_instance slp_node_instance)
4721 {
4722 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
4723 gcc_assert (reduc_info->is_reduc_info);
4724 /* For double reductions we need to get at the inner loop reduction
4725 stmt which has the meta info attached. Our stmt_info is that of the
4726 loop-closed PHI of the inner loop which we remember as
4727 def for the reduction PHI generation. */
4728 bool double_reduc = false;
4729 stmt_vec_info rdef_info = stmt_info;
4730 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
4731 {
4732 gcc_assert (!slp_node);
4733 double_reduc = true;
4734 stmt_info = loop_vinfo->lookup_def (gimple_phi_arg_def
4735 (stmt_info->stmt, 0));
4736 stmt_info = vect_stmt_to_vectorize (stmt_info);
4737 }
4738 gphi *reduc_def_stmt
4739 = as_a <gphi *> (STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info))->stmt);
4740 enum tree_code code = STMT_VINFO_REDUC_CODE (reduc_info);
4741 internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
4742 tree vectype;
4743 machine_mode mode;
4744 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4745 basic_block exit_bb;
4746 tree scalar_dest;
4747 tree scalar_type;
4748 gimple *new_phi = NULL, *phi;
4749 gimple_stmt_iterator exit_gsi;
4750 tree new_temp = NULL_TREE, new_name, new_scalar_dest;
4751 gimple *epilog_stmt = NULL;
4752 gimple *exit_phi;
4753 tree bitsize;
4754 tree def;
4755 tree orig_name, scalar_result;
4756 imm_use_iterator imm_iter, phi_imm_iter;
4757 use_operand_p use_p, phi_use_p;
4758 gimple *use_stmt;
4759 bool nested_in_vect_loop = false;
4760 auto_vec<gimple *> new_phis;
4761 int j, i;
4762 auto_vec<tree> scalar_results;
4763 unsigned int group_size = 1, k;
4764 auto_vec<gimple *> phis;
4765 bool slp_reduc = false;
4766 bool direct_slp_reduc;
4767 tree new_phi_result;
4768 tree induction_index = NULL_TREE;
4769
4770 if (slp_node)
4771 group_size = SLP_TREE_LANES (slp_node);
4772
4773 if (nested_in_vect_loop_p (loop, stmt_info))
4774 {
4775 outer_loop = loop;
4776 loop = loop->inner;
4777 nested_in_vect_loop = true;
4778 gcc_assert (!slp_node);
4779 }
4780 gcc_assert (!nested_in_vect_loop || double_reduc);
4781
4782 vectype = STMT_VINFO_REDUC_VECTYPE (reduc_info);
4783 gcc_assert (vectype);
4784 mode = TYPE_MODE (vectype);
4785
4786 tree initial_def = NULL;
4787 tree induc_val = NULL_TREE;
4788 tree adjustment_def = NULL;
4789 if (slp_node)
4790 ;
4791 else
4792 {
4793 /* Get at the scalar def before the loop, that defines the initial value
4794 of the reduction variable. */
4795 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4796 loop_preheader_edge (loop));
4797 /* Optimize: for induction condition reduction, if we can't use zero
4798 for induc_val, use initial_def. */
4799 if (STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
4800 induc_val = STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info);
4801 else if (double_reduc)
4802 ;
4803 else if (nested_in_vect_loop)
4804 ;
4805 else
4806 adjustment_def = STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT (reduc_info);
4807 }
4808
4809 unsigned vec_num;
4810 int ncopies;
4811 if (slp_node)
4812 {
4813 vec_num = SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis).length ();
4814 ncopies = 1;
4815 }
4816 else
4817 {
4818 stmt_vec_info reduc_info = loop_vinfo->lookup_stmt (reduc_def_stmt);
4819 vec_num = 1;
4820 ncopies = STMT_VINFO_VEC_STMTS (reduc_info).length ();
4821 }
4822
4823 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4824 which is updated with the current index of the loop for every match of
4825 the original loop's cond_expr (VEC_STMT). This results in a vector
4826 containing the last time the condition passed for that vector lane.
4827 The first match will be a 1 to allow 0 to be used for non-matching
4828 indexes. If there are no matches at all then the vector will be all
4829 zeroes.
4830
4831 PR92772: This algorithm is broken for architectures that support
4832 masked vectors, but do not provide fold_extract_last. */
4833 if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION)
4834 {
4835 auto_vec<std::pair<tree, bool>, 2> ccompares;
4836 stmt_vec_info cond_info = STMT_VINFO_REDUC_DEF (reduc_info);
4837 cond_info = vect_stmt_to_vectorize (cond_info);
4838 while (cond_info != reduc_info)
4839 {
4840 if (gimple_assign_rhs_code (cond_info->stmt) == COND_EXPR)
4841 {
4842 gimple *vec_stmt = STMT_VINFO_VEC_STMTS (cond_info)[0];
4843 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4844 ccompares.safe_push
4845 (std::make_pair (unshare_expr (gimple_assign_rhs1 (vec_stmt)),
4846 STMT_VINFO_REDUC_IDX (cond_info) == 2));
4847 }
4848 cond_info
4849 = loop_vinfo->lookup_def (gimple_op (cond_info->stmt,
4850 1 + STMT_VINFO_REDUC_IDX
4851 (cond_info)));
4852 cond_info = vect_stmt_to_vectorize (cond_info);
4853 }
4854 gcc_assert (ccompares.length () != 0);
4855
4856 tree indx_before_incr, indx_after_incr;
4857 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4858 int scalar_precision
4859 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4860 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4861 tree cr_index_vector_type = get_related_vectype_for_scalar_type
4862 (TYPE_MODE (vectype), cr_index_scalar_type,
4863 TYPE_VECTOR_SUBPARTS (vectype));
4864
4865 /* First we create a simple vector induction variable which starts
4866 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4867 vector size (STEP). */
4868
4869 /* Create a {1,2,3,...} vector. */
4870 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4871
4872 /* Create a vector of the step value. */
4873 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4874 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4875
4876 /* Create an induction variable. */
4877 gimple_stmt_iterator incr_gsi;
4878 bool insert_after;
4879 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4880 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4881 insert_after, &indx_before_incr, &indx_after_incr);
4882
4883 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4884 filled with zeros (VEC_ZERO). */
4885
4886 /* Create a vector of 0s. */
4887 tree zero = build_zero_cst (cr_index_scalar_type);
4888 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4889
4890 /* Create a vector phi node. */
4891 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4892 new_phi = create_phi_node (new_phi_tree, loop->header);
4893 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4894 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4895
4896 /* Now take the condition from the loops original cond_exprs
4897 and produce a new cond_exprs (INDEX_COND_EXPR) which for
4898 every match uses values from the induction variable
4899 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4900 (NEW_PHI_TREE).
4901 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4902 the new cond_expr (INDEX_COND_EXPR). */
4903 gimple_seq stmts = NULL;
4904 for (int i = ccompares.length () - 1; i != -1; --i)
4905 {
4906 tree ccompare = ccompares[i].first;
4907 if (ccompares[i].second)
4908 new_phi_tree = gimple_build (&stmts, VEC_COND_EXPR,
4909 cr_index_vector_type,
4910 ccompare,
4911 indx_before_incr, new_phi_tree);
4912 else
4913 new_phi_tree = gimple_build (&stmts, VEC_COND_EXPR,
4914 cr_index_vector_type,
4915 ccompare,
4916 new_phi_tree, indx_before_incr);
4917 }
4918 gsi_insert_seq_before (&incr_gsi, stmts, GSI_SAME_STMT);
4919
4920 /* Update the phi with the vec cond. */
4921 induction_index = new_phi_tree;
4922 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4923 loop_latch_edge (loop), UNKNOWN_LOCATION);
4924 }
4925
4926 /* 2. Create epilog code.
4927 The reduction epilog code operates across the elements of the vector
4928 of partial results computed by the vectorized loop.
4929 The reduction epilog code consists of:
4930
4931 step 1: compute the scalar result in a vector (v_out2)
4932 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4933 step 3: adjust the scalar result (s_out3) if needed.
4934
4935 Step 1 can be accomplished using one the following three schemes:
4936 (scheme 1) using reduc_fn, if available.
4937 (scheme 2) using whole-vector shifts, if available.
4938 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4939 combined.
4940
4941 The overall epilog code looks like this:
4942
4943 s_out0 = phi <s_loop> # original EXIT_PHI
4944 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4945 v_out2 = reduce <v_out1> # step 1
4946 s_out3 = extract_field <v_out2, 0> # step 2
4947 s_out4 = adjust_result <s_out3> # step 3
4948
4949 (step 3 is optional, and steps 1 and 2 may be combined).
4950 Lastly, the uses of s_out0 are replaced by s_out4. */
4951
4952
4953 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4954 v_out1 = phi <VECT_DEF>
4955 Store them in NEW_PHIS. */
4956 if (double_reduc)
4957 loop = outer_loop;
4958 exit_bb = single_exit (loop)->dest;
4959 new_phis.create (slp_node ? vec_num : ncopies);
4960 for (unsigned i = 0; i < vec_num; i++)
4961 {
4962 if (slp_node)
4963 def = vect_get_slp_vect_def (slp_node, i);
4964 else
4965 def = gimple_get_lhs (STMT_VINFO_VEC_STMTS (rdef_info)[0]);
4966 for (j = 0; j < ncopies; j++)
4967 {
4968 tree new_def = copy_ssa_name (def);
4969 phi = create_phi_node (new_def, exit_bb);
4970 if (j == 0)
4971 new_phis.quick_push (phi);
4972 else
4973 {
4974 def = gimple_get_lhs (STMT_VINFO_VEC_STMTS (rdef_info)[j]);
4975 new_phis.quick_push (phi);
4976 }
4977
4978 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4979 }
4980 }
4981
4982 exit_gsi = gsi_after_labels (exit_bb);
4983
4984 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4985 (i.e. when reduc_fn is not available) and in the final adjustment
4986 code (if needed). Also get the original scalar reduction variable as
4987 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4988 represents a reduction pattern), the tree-code and scalar-def are
4989 taken from the original stmt that the pattern-stmt (STMT) replaces.
4990 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4991 are taken from STMT. */
4992
4993 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
4994 if (orig_stmt_info != stmt_info)
4995 {
4996 /* Reduction pattern */
4997 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4998 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4999 }
5000
5001 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
5002 scalar_type = TREE_TYPE (scalar_dest);
5003 scalar_results.create (group_size);
5004 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
5005 bitsize = TYPE_SIZE (scalar_type);
5006
5007 /* SLP reduction without reduction chain, e.g.,
5008 # a1 = phi <a2, a0>
5009 # b1 = phi <b2, b0>
5010 a2 = operation (a1)
5011 b2 = operation (b1) */
5012 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
5013
5014 /* True if we should implement SLP_REDUC using native reduction operations
5015 instead of scalar operations. */
5016 direct_slp_reduc = (reduc_fn != IFN_LAST
5017 && slp_reduc
5018 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
5019
5020 /* In case of reduction chain, e.g.,
5021 # a1 = phi <a3, a0>
5022 a2 = operation (a1)
5023 a3 = operation (a2),
5024
5025 we may end up with more than one vector result. Here we reduce them to
5026 one vector. */
5027 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
5028 {
5029 gimple_seq stmts = NULL;
5030 tree first_vect = PHI_RESULT (new_phis[0]);
5031 first_vect = gimple_convert (&stmts, vectype, first_vect);
5032 for (k = 1; k < new_phis.length (); k++)
5033 {
5034 gimple *next_phi = new_phis[k];
5035 tree second_vect = PHI_RESULT (next_phi);
5036 second_vect = gimple_convert (&stmts, vectype, second_vect);
5037 first_vect = gimple_build (&stmts, code, vectype,
5038 first_vect, second_vect);
5039 }
5040 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5041
5042 new_phi_result = first_vect;
5043 new_phis.truncate (0);
5044 new_phis.safe_push (SSA_NAME_DEF_STMT (first_vect));
5045 }
5046 /* Likewise if we couldn't use a single defuse cycle. */
5047 else if (ncopies > 1)
5048 {
5049 gimple_seq stmts = NULL;
5050 tree first_vect = PHI_RESULT (new_phis[0]);
5051 first_vect = gimple_convert (&stmts, vectype, first_vect);
5052 for (int k = 1; k < ncopies; ++k)
5053 {
5054 tree second_vect = PHI_RESULT (new_phis[k]);
5055 second_vect = gimple_convert (&stmts, vectype, second_vect);
5056 first_vect = gimple_build (&stmts, code, vectype,
5057 first_vect, second_vect);
5058 }
5059 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5060 new_phi_result = first_vect;
5061 new_phis.truncate (0);
5062 new_phis.safe_push (SSA_NAME_DEF_STMT (first_vect));
5063 }
5064 else
5065 new_phi_result = PHI_RESULT (new_phis[0]);
5066
5067 if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION
5068 && reduc_fn != IFN_LAST)
5069 {
5070 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
5071 various data values where the condition matched and another vector
5072 (INDUCTION_INDEX) containing all the indexes of those matches. We
5073 need to extract the last matching index (which will be the index with
5074 highest value) and use this to index into the data vector.
5075 For the case where there were no matches, the data vector will contain
5076 all default values and the index vector will be all zeros. */
5077
5078 /* Get various versions of the type of the vector of indexes. */
5079 tree index_vec_type = TREE_TYPE (induction_index);
5080 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
5081 tree index_scalar_type = TREE_TYPE (index_vec_type);
5082 tree index_vec_cmp_type = truth_type_for (index_vec_type);
5083
5084 /* Get an unsigned integer version of the type of the data vector. */
5085 int scalar_precision
5086 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
5087 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
5088 tree vectype_unsigned = build_vector_type
5089 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
5090
5091 /* First we need to create a vector (ZERO_VEC) of zeros and another
5092 vector (MAX_INDEX_VEC) filled with the last matching index, which we
5093 can create using a MAX reduction and then expanding.
5094 In the case where the loop never made any matches, the max index will
5095 be zero. */
5096
5097 /* Vector of {0, 0, 0,...}. */
5098 tree zero_vec = build_zero_cst (vectype);
5099
5100 gimple_seq stmts = NULL;
5101 new_phi_result = gimple_convert (&stmts, vectype, new_phi_result);
5102 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5103
5104 /* Find maximum value from the vector of found indexes. */
5105 tree max_index = make_ssa_name (index_scalar_type);
5106 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
5107 1, induction_index);
5108 gimple_call_set_lhs (max_index_stmt, max_index);
5109 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
5110
5111 /* Vector of {max_index, max_index, max_index,...}. */
5112 tree max_index_vec = make_ssa_name (index_vec_type);
5113 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
5114 max_index);
5115 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
5116 max_index_vec_rhs);
5117 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
5118
5119 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
5120 with the vector (INDUCTION_INDEX) of found indexes, choosing values
5121 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
5122 otherwise. Only one value should match, resulting in a vector
5123 (VEC_COND) with one data value and the rest zeros.
5124 In the case where the loop never made any matches, every index will
5125 match, resulting in a vector with all data values (which will all be
5126 the default value). */
5127
5128 /* Compare the max index vector to the vector of found indexes to find
5129 the position of the max value. */
5130 tree vec_compare = make_ssa_name (index_vec_cmp_type);
5131 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
5132 induction_index,
5133 max_index_vec);
5134 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
5135
5136 /* Use the compare to choose either values from the data vector or
5137 zero. */
5138 tree vec_cond = make_ssa_name (vectype);
5139 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
5140 vec_compare, new_phi_result,
5141 zero_vec);
5142 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
5143
5144 /* Finally we need to extract the data value from the vector (VEC_COND)
5145 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
5146 reduction, but because this doesn't exist, we can use a MAX reduction
5147 instead. The data value might be signed or a float so we need to cast
5148 it first.
5149 In the case where the loop never made any matches, the data values are
5150 all identical, and so will reduce down correctly. */
5151
5152 /* Make the matched data values unsigned. */
5153 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
5154 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
5155 vec_cond);
5156 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
5157 VIEW_CONVERT_EXPR,
5158 vec_cond_cast_rhs);
5159 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
5160
5161 /* Reduce down to a scalar value. */
5162 tree data_reduc = make_ssa_name (scalar_type_unsigned);
5163 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
5164 1, vec_cond_cast);
5165 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
5166 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
5167
5168 /* Convert the reduced value back to the result type and set as the
5169 result. */
5170 stmts = NULL;
5171 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
5172 data_reduc);
5173 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5174 scalar_results.safe_push (new_temp);
5175 }
5176 else if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION
5177 && reduc_fn == IFN_LAST)
5178 {
5179 /* Condition reduction without supported IFN_REDUC_MAX. Generate
5180 idx = 0;
5181 idx_val = induction_index[0];
5182 val = data_reduc[0];
5183 for (idx = 0, val = init, i = 0; i < nelts; ++i)
5184 if (induction_index[i] > idx_val)
5185 val = data_reduc[i], idx_val = induction_index[i];
5186 return val; */
5187
5188 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
5189 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
5190 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
5191 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
5192 /* Enforced by vectorizable_reduction, which ensures we have target
5193 support before allowing a conditional reduction on variable-length
5194 vectors. */
5195 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
5196 tree idx_val = NULL_TREE, val = NULL_TREE;
5197 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
5198 {
5199 tree old_idx_val = idx_val;
5200 tree old_val = val;
5201 idx_val = make_ssa_name (idx_eltype);
5202 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
5203 build3 (BIT_FIELD_REF, idx_eltype,
5204 induction_index,
5205 bitsize_int (el_size),
5206 bitsize_int (off)));
5207 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5208 val = make_ssa_name (data_eltype);
5209 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
5210 build3 (BIT_FIELD_REF,
5211 data_eltype,
5212 new_phi_result,
5213 bitsize_int (el_size),
5214 bitsize_int (off)));
5215 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5216 if (off != 0)
5217 {
5218 tree new_idx_val = idx_val;
5219 if (off != v_size - el_size)
5220 {
5221 new_idx_val = make_ssa_name (idx_eltype);
5222 epilog_stmt = gimple_build_assign (new_idx_val,
5223 MAX_EXPR, idx_val,
5224 old_idx_val);
5225 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5226 }
5227 tree new_val = make_ssa_name (data_eltype);
5228 epilog_stmt = gimple_build_assign (new_val,
5229 COND_EXPR,
5230 build2 (GT_EXPR,
5231 boolean_type_node,
5232 idx_val,
5233 old_idx_val),
5234 val, old_val);
5235 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5236 idx_val = new_idx_val;
5237 val = new_val;
5238 }
5239 }
5240 /* Convert the reduced value back to the result type and set as the
5241 result. */
5242 gimple_seq stmts = NULL;
5243 val = gimple_convert (&stmts, scalar_type, val);
5244 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5245 scalar_results.safe_push (val);
5246 }
5247
5248 /* 2.3 Create the reduction code, using one of the three schemes described
5249 above. In SLP we simply need to extract all the elements from the
5250 vector (without reducing them), so we use scalar shifts. */
5251 else if (reduc_fn != IFN_LAST && !slp_reduc)
5252 {
5253 tree tmp;
5254 tree vec_elem_type;
5255
5256 /* Case 1: Create:
5257 v_out2 = reduc_expr <v_out1> */
5258
5259 if (dump_enabled_p ())
5260 dump_printf_loc (MSG_NOTE, vect_location,
5261 "Reduce using direct vector reduction.\n");
5262
5263 gimple_seq stmts = NULL;
5264 new_phi_result = gimple_convert (&stmts, vectype, new_phi_result);
5265 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5266 new_temp = gimple_build (&stmts, as_combined_fn (reduc_fn),
5267 vec_elem_type, new_phi_result);
5268 new_temp = gimple_convert (&stmts, scalar_type, new_temp);
5269 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5270
5271 if ((STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
5272 && induc_val)
5273 {
5274 /* Earlier we set the initial value to be a vector if induc_val
5275 values. Check the result and if it is induc_val then replace
5276 with the original initial value, unless induc_val is
5277 the same as initial_def already. */
5278 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5279 induc_val);
5280
5281 tmp = make_ssa_name (new_scalar_dest);
5282 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5283 initial_def, new_temp);
5284 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5285 new_temp = tmp;
5286 }
5287
5288 scalar_results.safe_push (new_temp);
5289 }
5290 else if (direct_slp_reduc)
5291 {
5292 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5293 with the elements for other SLP statements replaced with the
5294 neutral value. We can then do a normal reduction on each vector. */
5295
5296 /* Enforced by vectorizable_reduction. */
5297 gcc_assert (new_phis.length () == 1);
5298 gcc_assert (pow2p_hwi (group_size));
5299
5300 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5301 vec<stmt_vec_info> orig_phis
5302 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5303 gimple_seq seq = NULL;
5304
5305 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5306 and the same element size as VECTYPE. */
5307 tree index = build_index_vector (vectype, 0, 1);
5308 tree index_type = TREE_TYPE (index);
5309 tree index_elt_type = TREE_TYPE (index_type);
5310 tree mask_type = truth_type_for (index_type);
5311
5312 /* Create a vector that, for each element, identifies which of
5313 the REDUC_GROUP_SIZE results should use it. */
5314 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5315 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5316 build_vector_from_val (index_type, index_mask));
5317
5318 /* Get a neutral vector value. This is simply a splat of the neutral
5319 scalar value if we have one, otherwise the initial scalar value
5320 is itself a neutral value. */
5321 tree vector_identity = NULL_TREE;
5322 tree neutral_op = NULL_TREE;
5323 if (slp_node)
5324 {
5325 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
5326 neutral_op
5327 = neutral_op_for_slp_reduction (slp_node_instance->reduc_phis,
5328 vectype, code, first != NULL);
5329 }
5330 if (neutral_op)
5331 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5332 neutral_op);
5333 for (unsigned int i = 0; i < group_size; ++i)
5334 {
5335 /* If there's no univeral neutral value, we can use the
5336 initial scalar value from the original PHI. This is used
5337 for MIN and MAX reduction, for example. */
5338 if (!neutral_op)
5339 {
5340 tree scalar_value
5341 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5342 loop_preheader_edge (loop));
5343 scalar_value = gimple_convert (&seq, TREE_TYPE (vectype),
5344 scalar_value);
5345 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5346 scalar_value);
5347 }
5348
5349 /* Calculate the equivalent of:
5350
5351 sel[j] = (index[j] == i);
5352
5353 which selects the elements of NEW_PHI_RESULT that should
5354 be included in the result. */
5355 tree compare_val = build_int_cst (index_elt_type, i);
5356 compare_val = build_vector_from_val (index_type, compare_val);
5357 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5358 index, compare_val);
5359
5360 /* Calculate the equivalent of:
5361
5362 vec = seq ? new_phi_result : vector_identity;
5363
5364 VEC is now suitable for a full vector reduction. */
5365 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5366 sel, new_phi_result, vector_identity);
5367
5368 /* Do the reduction and convert it to the appropriate type. */
5369 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5370 TREE_TYPE (vectype), vec);
5371 scalar = gimple_convert (&seq, scalar_type, scalar);
5372 scalar_results.safe_push (scalar);
5373 }
5374 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5375 }
5376 else
5377 {
5378 bool reduce_with_shift;
5379 tree vec_temp;
5380
5381 gcc_assert (slp_reduc || new_phis.length () == 1);
5382
5383 /* See if the target wants to do the final (shift) reduction
5384 in a vector mode of smaller size and first reduce upper/lower
5385 halves against each other. */
5386 enum machine_mode mode1 = mode;
5387 tree stype = TREE_TYPE (vectype);
5388 unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
5389 unsigned nunits1 = nunits;
5390 if ((mode1 = targetm.vectorize.split_reduction (mode)) != mode
5391 && new_phis.length () == 1)
5392 {
5393 nunits1 = GET_MODE_NUNITS (mode1).to_constant ();
5394 /* For SLP reductions we have to make sure lanes match up, but
5395 since we're doing individual element final reduction reducing
5396 vector width here is even more important.
5397 ??? We can also separate lanes with permutes, for the common
5398 case of power-of-two group-size odd/even extracts would work. */
5399 if (slp_reduc && nunits != nunits1)
5400 {
5401 nunits1 = least_common_multiple (nunits1, group_size);
5402 gcc_assert (exact_log2 (nunits1) != -1 && nunits1 <= nunits);
5403 }
5404 }
5405 if (!slp_reduc
5406 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5407 nunits1 = GET_MODE_NUNITS (mode1).to_constant ();
5408
5409 tree vectype1 = get_related_vectype_for_scalar_type (TYPE_MODE (vectype),
5410 stype, nunits1);
5411 reduce_with_shift = have_whole_vector_shift (mode1);
5412 if (!VECTOR_MODE_P (mode1))
5413 reduce_with_shift = false;
5414 else
5415 {
5416 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5417 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5418 reduce_with_shift = false;
5419 }
5420
5421 /* First reduce the vector to the desired vector size we should
5422 do shift reduction on by combining upper and lower halves. */
5423 new_temp = new_phi_result;
5424 while (nunits > nunits1)
5425 {
5426 nunits /= 2;
5427 vectype1 = get_related_vectype_for_scalar_type (TYPE_MODE (vectype),
5428 stype, nunits);
5429 unsigned int bitsize = tree_to_uhwi (TYPE_SIZE (vectype1));
5430
5431 /* The target has to make sure we support lowpart/highpart
5432 extraction, either via direct vector extract or through
5433 an integer mode punning. */
5434 tree dst1, dst2;
5435 if (convert_optab_handler (vec_extract_optab,
5436 TYPE_MODE (TREE_TYPE (new_temp)),
5437 TYPE_MODE (vectype1))
5438 != CODE_FOR_nothing)
5439 {
5440 /* Extract sub-vectors directly once vec_extract becomes
5441 a conversion optab. */
5442 dst1 = make_ssa_name (vectype1);
5443 epilog_stmt
5444 = gimple_build_assign (dst1, BIT_FIELD_REF,
5445 build3 (BIT_FIELD_REF, vectype1,
5446 new_temp, TYPE_SIZE (vectype1),
5447 bitsize_int (0)));
5448 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5449 dst2 = make_ssa_name (vectype1);
5450 epilog_stmt
5451 = gimple_build_assign (dst2, BIT_FIELD_REF,
5452 build3 (BIT_FIELD_REF, vectype1,
5453 new_temp, TYPE_SIZE (vectype1),
5454 bitsize_int (bitsize)));
5455 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5456 }
5457 else
5458 {
5459 /* Extract via punning to appropriately sized integer mode
5460 vector. */
5461 tree eltype = build_nonstandard_integer_type (bitsize, 1);
5462 tree etype = build_vector_type (eltype, 2);
5463 gcc_assert (convert_optab_handler (vec_extract_optab,
5464 TYPE_MODE (etype),
5465 TYPE_MODE (eltype))
5466 != CODE_FOR_nothing);
5467 tree tem = make_ssa_name (etype);
5468 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5469 build1 (VIEW_CONVERT_EXPR,
5470 etype, new_temp));
5471 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5472 new_temp = tem;
5473 tem = make_ssa_name (eltype);
5474 epilog_stmt
5475 = gimple_build_assign (tem, BIT_FIELD_REF,
5476 build3 (BIT_FIELD_REF, eltype,
5477 new_temp, TYPE_SIZE (eltype),
5478 bitsize_int (0)));
5479 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5480 dst1 = make_ssa_name (vectype1);
5481 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5482 build1 (VIEW_CONVERT_EXPR,
5483 vectype1, tem));
5484 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5485 tem = make_ssa_name (eltype);
5486 epilog_stmt
5487 = gimple_build_assign (tem, BIT_FIELD_REF,
5488 build3 (BIT_FIELD_REF, eltype,
5489 new_temp, TYPE_SIZE (eltype),
5490 bitsize_int (bitsize)));
5491 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5492 dst2 = make_ssa_name (vectype1);
5493 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5494 build1 (VIEW_CONVERT_EXPR,
5495 vectype1, tem));
5496 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5497 }
5498
5499 new_temp = make_ssa_name (vectype1);
5500 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5501 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5502 new_phis[0] = epilog_stmt;
5503 }
5504
5505 if (reduce_with_shift && !slp_reduc)
5506 {
5507 int element_bitsize = tree_to_uhwi (bitsize);
5508 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5509 for variable-length vectors and also requires direct target support
5510 for loop reductions. */
5511 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5512 int nelements = vec_size_in_bits / element_bitsize;
5513 vec_perm_builder sel;
5514 vec_perm_indices indices;
5515
5516 int elt_offset;
5517
5518 tree zero_vec = build_zero_cst (vectype1);
5519 /* Case 2: Create:
5520 for (offset = nelements/2; offset >= 1; offset/=2)
5521 {
5522 Create: va' = vec_shift <va, offset>
5523 Create: va = vop <va, va'>
5524 } */
5525
5526 tree rhs;
5527
5528 if (dump_enabled_p ())
5529 dump_printf_loc (MSG_NOTE, vect_location,
5530 "Reduce using vector shifts\n");
5531
5532 gimple_seq stmts = NULL;
5533 new_temp = gimple_convert (&stmts, vectype1, new_temp);
5534 for (elt_offset = nelements / 2;
5535 elt_offset >= 1;
5536 elt_offset /= 2)
5537 {
5538 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5539 indices.new_vector (sel, 2, nelements);
5540 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5541 new_name = gimple_build (&stmts, VEC_PERM_EXPR, vectype1,
5542 new_temp, zero_vec, mask);
5543 new_temp = gimple_build (&stmts, code,
5544 vectype1, new_name, new_temp);
5545 }
5546 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5547
5548 /* 2.4 Extract the final scalar result. Create:
5549 s_out3 = extract_field <v_out2, bitpos> */
5550
5551 if (dump_enabled_p ())
5552 dump_printf_loc (MSG_NOTE, vect_location,
5553 "extract scalar result\n");
5554
5555 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5556 bitsize, bitsize_zero_node);
5557 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5558 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5559 gimple_assign_set_lhs (epilog_stmt, new_temp);
5560 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5561 scalar_results.safe_push (new_temp);
5562 }
5563 else
5564 {
5565 /* Case 3: Create:
5566 s = extract_field <v_out2, 0>
5567 for (offset = element_size;
5568 offset < vector_size;
5569 offset += element_size;)
5570 {
5571 Create: s' = extract_field <v_out2, offset>
5572 Create: s = op <s, s'> // For non SLP cases
5573 } */
5574
5575 if (dump_enabled_p ())
5576 dump_printf_loc (MSG_NOTE, vect_location,
5577 "Reduce using scalar code.\n");
5578
5579 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5580 int element_bitsize = tree_to_uhwi (bitsize);
5581 tree compute_type = TREE_TYPE (vectype);
5582 gimple_seq stmts = NULL;
5583 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5584 {
5585 int bit_offset;
5586 if (gimple_code (new_phi) == GIMPLE_PHI)
5587 vec_temp = PHI_RESULT (new_phi);
5588 else
5589 vec_temp = gimple_assign_lhs (new_phi);
5590 new_temp = gimple_build (&stmts, BIT_FIELD_REF, compute_type,
5591 vec_temp, bitsize, bitsize_zero_node);
5592
5593 /* In SLP we don't need to apply reduction operation, so we just
5594 collect s' values in SCALAR_RESULTS. */
5595 if (slp_reduc)
5596 scalar_results.safe_push (new_temp);
5597
5598 for (bit_offset = element_bitsize;
5599 bit_offset < vec_size_in_bits;
5600 bit_offset += element_bitsize)
5601 {
5602 tree bitpos = bitsize_int (bit_offset);
5603 new_name = gimple_build (&stmts, BIT_FIELD_REF,
5604 compute_type, vec_temp,
5605 bitsize, bitpos);
5606 if (slp_reduc)
5607 {
5608 /* In SLP we don't need to apply reduction operation, so
5609 we just collect s' values in SCALAR_RESULTS. */
5610 new_temp = new_name;
5611 scalar_results.safe_push (new_name);
5612 }
5613 else
5614 new_temp = gimple_build (&stmts, code, compute_type,
5615 new_name, new_temp);
5616 }
5617 }
5618
5619 /* The only case where we need to reduce scalar results in SLP, is
5620 unrolling. If the size of SCALAR_RESULTS is greater than
5621 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5622 REDUC_GROUP_SIZE. */
5623 if (slp_reduc)
5624 {
5625 tree res, first_res, new_res;
5626
5627 /* Reduce multiple scalar results in case of SLP unrolling. */
5628 for (j = group_size; scalar_results.iterate (j, &res);
5629 j++)
5630 {
5631 first_res = scalar_results[j % group_size];
5632 new_res = gimple_build (&stmts, code, compute_type,
5633 first_res, res);
5634 scalar_results[j % group_size] = new_res;
5635 }
5636 for (k = 0; k < group_size; k++)
5637 scalar_results[k] = gimple_convert (&stmts, scalar_type,
5638 scalar_results[k]);
5639 }
5640 else
5641 {
5642 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5643 new_temp = gimple_convert (&stmts, scalar_type, new_temp);
5644 scalar_results.safe_push (new_temp);
5645 }
5646
5647 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5648 }
5649
5650 if ((STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
5651 && induc_val)
5652 {
5653 /* Earlier we set the initial value to be a vector if induc_val
5654 values. Check the result and if it is induc_val then replace
5655 with the original initial value, unless induc_val is
5656 the same as initial_def already. */
5657 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5658 induc_val);
5659
5660 tree tmp = make_ssa_name (new_scalar_dest);
5661 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5662 initial_def, new_temp);
5663 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5664 scalar_results[0] = tmp;
5665 }
5666 }
5667
5668 /* 2.5 Adjust the final result by the initial value of the reduction
5669 variable. (When such adjustment is not needed, then
5670 'adjustment_def' is zero). For example, if code is PLUS we create:
5671 new_temp = loop_exit_def + adjustment_def */
5672
5673 if (adjustment_def)
5674 {
5675 gcc_assert (!slp_reduc);
5676 gimple_seq stmts = NULL;
5677 if (nested_in_vect_loop)
5678 {
5679 new_phi = new_phis[0];
5680 gcc_assert (VECTOR_TYPE_P (TREE_TYPE (adjustment_def)));
5681 adjustment_def = gimple_convert (&stmts, vectype, adjustment_def);
5682 new_temp = gimple_build (&stmts, code, vectype,
5683 PHI_RESULT (new_phi), adjustment_def);
5684 }
5685 else
5686 {
5687 new_temp = scalar_results[0];
5688 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5689 adjustment_def = gimple_convert (&stmts, scalar_type, adjustment_def);
5690 new_temp = gimple_build (&stmts, code, scalar_type,
5691 new_temp, adjustment_def);
5692 }
5693
5694 epilog_stmt = gimple_seq_last_stmt (stmts);
5695 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5696 if (nested_in_vect_loop)
5697 {
5698 if (!double_reduc)
5699 scalar_results.quick_push (new_temp);
5700 else
5701 scalar_results[0] = new_temp;
5702 }
5703 else
5704 scalar_results[0] = new_temp;
5705
5706 new_phis[0] = epilog_stmt;
5707 }
5708
5709 if (double_reduc)
5710 loop = loop->inner;
5711
5712 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5713 phis with new adjusted scalar results, i.e., replace use <s_out0>
5714 with use <s_out4>.
5715
5716 Transform:
5717 loop_exit:
5718 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5719 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5720 v_out2 = reduce <v_out1>
5721 s_out3 = extract_field <v_out2, 0>
5722 s_out4 = adjust_result <s_out3>
5723 use <s_out0>
5724 use <s_out0>
5725
5726 into:
5727
5728 loop_exit:
5729 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5730 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5731 v_out2 = reduce <v_out1>
5732 s_out3 = extract_field <v_out2, 0>
5733 s_out4 = adjust_result <s_out3>
5734 use <s_out4>
5735 use <s_out4> */
5736
5737
5738 /* In SLP reduction chain we reduce vector results into one vector if
5739 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5740 LHS of the last stmt in the reduction chain, since we are looking for
5741 the loop exit phi node. */
5742 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5743 {
5744 stmt_vec_info dest_stmt_info
5745 = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
5746 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5747 group_size = 1;
5748 }
5749
5750 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5751 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5752 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5753 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5754 correspond to the first vector stmt, etc.
5755 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5756 if (group_size > new_phis.length ())
5757 gcc_assert (!(group_size % new_phis.length ()));
5758
5759 for (k = 0; k < group_size; k++)
5760 {
5761 if (slp_reduc)
5762 {
5763 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5764
5765 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5766 /* SLP statements can't participate in patterns. */
5767 gcc_assert (!orig_stmt_info);
5768 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5769 }
5770
5771 if (nested_in_vect_loop)
5772 {
5773 if (double_reduc)
5774 loop = outer_loop;
5775 else
5776 gcc_unreachable ();
5777 }
5778
5779 phis.create (3);
5780 /* Find the loop-closed-use at the loop exit of the original scalar
5781 result. (The reduction result is expected to have two immediate uses,
5782 one at the latch block, and one at the loop exit). For double
5783 reductions we are looking for exit phis of the outer loop. */
5784 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5785 {
5786 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5787 {
5788 if (!is_gimple_debug (USE_STMT (use_p)))
5789 phis.safe_push (USE_STMT (use_p));
5790 }
5791 else
5792 {
5793 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5794 {
5795 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5796
5797 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5798 {
5799 if (!flow_bb_inside_loop_p (loop,
5800 gimple_bb (USE_STMT (phi_use_p)))
5801 && !is_gimple_debug (USE_STMT (phi_use_p)))
5802 phis.safe_push (USE_STMT (phi_use_p));
5803 }
5804 }
5805 }
5806 }
5807
5808 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5809 {
5810 /* Replace the uses: */
5811 orig_name = PHI_RESULT (exit_phi);
5812 scalar_result = scalar_results[k];
5813 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5814 {
5815 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5816 SET_USE (use_p, scalar_result);
5817 update_stmt (use_stmt);
5818 }
5819 }
5820
5821 phis.release ();
5822 }
5823 }
5824
5825 /* Return a vector of type VECTYPE that is equal to the vector select
5826 operation "MASK ? VEC : IDENTITY". Insert the select statements
5827 before GSI. */
5828
5829 static tree
5830 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5831 tree vec, tree identity)
5832 {
5833 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5834 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5835 mask, vec, identity);
5836 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5837 return cond;
5838 }
5839
5840 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5841 order, starting with LHS. Insert the extraction statements before GSI and
5842 associate the new scalar SSA names with variable SCALAR_DEST.
5843 Return the SSA name for the result. */
5844
5845 static tree
5846 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5847 tree_code code, tree lhs, tree vector_rhs)
5848 {
5849 tree vectype = TREE_TYPE (vector_rhs);
5850 tree scalar_type = TREE_TYPE (vectype);
5851 tree bitsize = TYPE_SIZE (scalar_type);
5852 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5853 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5854
5855 for (unsigned HOST_WIDE_INT bit_offset = 0;
5856 bit_offset < vec_size_in_bits;
5857 bit_offset += element_bitsize)
5858 {
5859 tree bitpos = bitsize_int (bit_offset);
5860 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5861 bitsize, bitpos);
5862
5863 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5864 rhs = make_ssa_name (scalar_dest, stmt);
5865 gimple_assign_set_lhs (stmt, rhs);
5866 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5867
5868 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5869 tree new_name = make_ssa_name (scalar_dest, stmt);
5870 gimple_assign_set_lhs (stmt, new_name);
5871 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5872 lhs = new_name;
5873 }
5874 return lhs;
5875 }
5876
5877 /* Get a masked internal function equivalent to REDUC_FN. VECTYPE_IN is the
5878 type of the vector input. */
5879
5880 static internal_fn
5881 get_masked_reduction_fn (internal_fn reduc_fn, tree vectype_in)
5882 {
5883 internal_fn mask_reduc_fn;
5884
5885 switch (reduc_fn)
5886 {
5887 case IFN_FOLD_LEFT_PLUS:
5888 mask_reduc_fn = IFN_MASK_FOLD_LEFT_PLUS;
5889 break;
5890
5891 default:
5892 return IFN_LAST;
5893 }
5894
5895 if (direct_internal_fn_supported_p (mask_reduc_fn, vectype_in,
5896 OPTIMIZE_FOR_SPEED))
5897 return mask_reduc_fn;
5898 return IFN_LAST;
5899 }
5900
5901 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
5902 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5903 statement. CODE is the operation performed by STMT_INFO and OPS are
5904 its scalar operands. REDUC_INDEX is the index of the operand in
5905 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5906 implements in-order reduction, or IFN_LAST if we should open-code it.
5907 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5908 that should be used to control the operation in a fully-masked loop. */
5909
5910 static bool
5911 vectorize_fold_left_reduction (loop_vec_info loop_vinfo,
5912 stmt_vec_info stmt_info,
5913 gimple_stmt_iterator *gsi,
5914 gimple **vec_stmt, slp_tree slp_node,
5915 gimple *reduc_def_stmt,
5916 tree_code code, internal_fn reduc_fn,
5917 tree ops[3], tree vectype_in,
5918 int reduc_index, vec_loop_masks *masks)
5919 {
5920 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5921 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5922 internal_fn mask_reduc_fn = get_masked_reduction_fn (reduc_fn, vectype_in);
5923
5924 int ncopies;
5925 if (slp_node)
5926 ncopies = 1;
5927 else
5928 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5929
5930 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
5931 gcc_assert (ncopies == 1);
5932 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5933
5934 if (slp_node)
5935 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5936 TYPE_VECTOR_SUBPARTS (vectype_in)));
5937
5938 tree op0 = ops[1 - reduc_index];
5939
5940 int group_size = 1;
5941 stmt_vec_info scalar_dest_def_info;
5942 auto_vec<tree> vec_oprnds0;
5943 if (slp_node)
5944 {
5945 auto_vec<vec<tree> > vec_defs (2);
5946 vect_get_slp_defs (loop_vinfo, slp_node, &vec_defs);
5947 vec_oprnds0.safe_splice (vec_defs[1 - reduc_index]);
5948 vec_defs[0].release ();
5949 vec_defs[1].release ();
5950 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5951 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5952 }
5953 else
5954 {
5955 vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, 1,
5956 op0, &vec_oprnds0);
5957 scalar_dest_def_info = stmt_info;
5958 }
5959
5960 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5961 tree scalar_type = TREE_TYPE (scalar_dest);
5962 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5963
5964 int vec_num = vec_oprnds0.length ();
5965 gcc_assert (vec_num == 1 || slp_node);
5966 tree vec_elem_type = TREE_TYPE (vectype_out);
5967 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5968
5969 tree vector_identity = NULL_TREE;
5970 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5971 vector_identity = build_zero_cst (vectype_out);
5972
5973 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5974 int i;
5975 tree def0;
5976 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5977 {
5978 gimple *new_stmt;
5979 tree mask = NULL_TREE;
5980 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5981 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5982
5983 /* Handle MINUS by adding the negative. */
5984 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5985 {
5986 tree negated = make_ssa_name (vectype_out);
5987 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5988 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5989 def0 = negated;
5990 }
5991
5992 if (mask && mask_reduc_fn == IFN_LAST)
5993 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5994 vector_identity);
5995
5996 /* On the first iteration the input is simply the scalar phi
5997 result, and for subsequent iterations it is the output of
5998 the preceding operation. */
5999 if (reduc_fn != IFN_LAST || (mask && mask_reduc_fn != IFN_LAST))
6000 {
6001 if (mask && mask_reduc_fn != IFN_LAST)
6002 new_stmt = gimple_build_call_internal (mask_reduc_fn, 3, reduc_var,
6003 def0, mask);
6004 else
6005 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var,
6006 def0);
6007 /* For chained SLP reductions the output of the previous reduction
6008 operation serves as the input of the next. For the final statement
6009 the output cannot be a temporary - we reuse the original
6010 scalar destination of the last statement. */
6011 if (i != vec_num - 1)
6012 {
6013 gimple_set_lhs (new_stmt, scalar_dest_var);
6014 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
6015 gimple_set_lhs (new_stmt, reduc_var);
6016 }
6017 }
6018 else
6019 {
6020 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
6021 reduc_var, def0);
6022 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
6023 /* Remove the statement, so that we can use the same code paths
6024 as for statements that we've just created. */
6025 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
6026 gsi_remove (&tmp_gsi, true);
6027 }
6028
6029 if (i == vec_num - 1)
6030 {
6031 gimple_set_lhs (new_stmt, scalar_dest);
6032 vect_finish_replace_stmt (loop_vinfo,
6033 scalar_dest_def_info,
6034 new_stmt);
6035 }
6036 else
6037 vect_finish_stmt_generation (loop_vinfo,
6038 scalar_dest_def_info,
6039 new_stmt, gsi);
6040
6041 if (slp_node)
6042 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6043 else
6044 {
6045 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
6046 *vec_stmt = new_stmt;
6047 }
6048 }
6049
6050 return true;
6051 }
6052
6053 /* Function is_nonwrapping_integer_induction.
6054
6055 Check if STMT_VINO (which is part of loop LOOP) both increments and
6056 does not cause overflow. */
6057
6058 static bool
6059 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, class loop *loop)
6060 {
6061 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
6062 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
6063 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
6064 tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
6065 widest_int ni, max_loop_value, lhs_max;
6066 wi::overflow_type overflow = wi::OVF_NONE;
6067
6068 /* Make sure the loop is integer based. */
6069 if (TREE_CODE (base) != INTEGER_CST
6070 || TREE_CODE (step) != INTEGER_CST)
6071 return false;
6072
6073 /* Check that the max size of the loop will not wrap. */
6074
6075 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
6076 return true;
6077
6078 if (! max_stmt_executions (loop, &ni))
6079 return false;
6080
6081 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
6082 &overflow);
6083 if (overflow)
6084 return false;
6085
6086 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
6087 TYPE_SIGN (lhs_type), &overflow);
6088 if (overflow)
6089 return false;
6090
6091 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
6092 <= TYPE_PRECISION (lhs_type));
6093 }
6094
6095 /* Check if masking can be supported by inserting a conditional expression.
6096 CODE is the code for the operation. COND_FN is the conditional internal
6097 function, if it exists. VECTYPE_IN is the type of the vector input. */
6098 static bool
6099 use_mask_by_cond_expr_p (enum tree_code code, internal_fn cond_fn,
6100 tree vectype_in)
6101 {
6102 if (cond_fn != IFN_LAST
6103 && direct_internal_fn_supported_p (cond_fn, vectype_in,
6104 OPTIMIZE_FOR_SPEED))
6105 return false;
6106
6107 switch (code)
6108 {
6109 case DOT_PROD_EXPR:
6110 case SAD_EXPR:
6111 return true;
6112
6113 default:
6114 return false;
6115 }
6116 }
6117
6118 /* Insert a conditional expression to enable masked vectorization. CODE is the
6119 code for the operation. VOP is the array of operands. MASK is the loop
6120 mask. GSI is a statement iterator used to place the new conditional
6121 expression. */
6122 static void
6123 build_vect_cond_expr (enum tree_code code, tree vop[3], tree mask,
6124 gimple_stmt_iterator *gsi)
6125 {
6126 switch (code)
6127 {
6128 case DOT_PROD_EXPR:
6129 {
6130 tree vectype = TREE_TYPE (vop[1]);
6131 tree zero = build_zero_cst (vectype);
6132 tree masked_op1 = make_temp_ssa_name (vectype, NULL, "masked_op1");
6133 gassign *select = gimple_build_assign (masked_op1, VEC_COND_EXPR,
6134 mask, vop[1], zero);
6135 gsi_insert_before (gsi, select, GSI_SAME_STMT);
6136 vop[1] = masked_op1;
6137 break;
6138 }
6139
6140 case SAD_EXPR:
6141 {
6142 tree vectype = TREE_TYPE (vop[1]);
6143 tree masked_op1 = make_temp_ssa_name (vectype, NULL, "masked_op1");
6144 gassign *select = gimple_build_assign (masked_op1, VEC_COND_EXPR,
6145 mask, vop[1], vop[0]);
6146 gsi_insert_before (gsi, select, GSI_SAME_STMT);
6147 vop[1] = masked_op1;
6148 break;
6149 }
6150
6151 default:
6152 gcc_unreachable ();
6153 }
6154 }
6155
6156 /* Function vectorizable_reduction.
6157
6158 Check if STMT_INFO performs a reduction operation that can be vectorized.
6159 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6160 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6161 Return true if STMT_INFO is vectorizable in this way.
6162
6163 This function also handles reduction idioms (patterns) that have been
6164 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
6165 may be of this form:
6166 X = pattern_expr (arg0, arg1, ..., X)
6167 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
6168 sequence that had been detected and replaced by the pattern-stmt
6169 (STMT_INFO).
6170
6171 This function also handles reduction of condition expressions, for example:
6172 for (int i = 0; i < N; i++)
6173 if (a[i] < value)
6174 last = a[i];
6175 This is handled by vectorising the loop and creating an additional vector
6176 containing the loop indexes for which "a[i] < value" was true. In the
6177 function epilogue this is reduced to a single max value and then used to
6178 index into the vector of results.
6179
6180 In some cases of reduction patterns, the type of the reduction variable X is
6181 different than the type of the other arguments of STMT_INFO.
6182 In such cases, the vectype that is used when transforming STMT_INFO into
6183 a vector stmt is different than the vectype that is used to determine the
6184 vectorization factor, because it consists of a different number of elements
6185 than the actual number of elements that are being operated upon in parallel.
6186
6187 For example, consider an accumulation of shorts into an int accumulator.
6188 On some targets it's possible to vectorize this pattern operating on 8
6189 shorts at a time (hence, the vectype for purposes of determining the
6190 vectorization factor should be V8HI); on the other hand, the vectype that
6191 is used to create the vector form is actually V4SI (the type of the result).
6192
6193 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6194 indicates what is the actual level of parallelism (V8HI in the example), so
6195 that the right vectorization factor would be derived. This vectype
6196 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6197 be used to create the vectorized stmt. The right vectype for the vectorized
6198 stmt is obtained from the type of the result X:
6199 get_vectype_for_scalar_type (vinfo, TREE_TYPE (X))
6200
6201 This means that, contrary to "regular" reductions (or "regular" stmts in
6202 general), the following equation:
6203 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (vinfo, TREE_TYPE (X))
6204 does *NOT* necessarily hold for reduction patterns. */
6205
6206 bool
6207 vectorizable_reduction (loop_vec_info loop_vinfo,
6208 stmt_vec_info stmt_info, slp_tree slp_node,
6209 slp_instance slp_node_instance,
6210 stmt_vector_for_cost *cost_vec)
6211 {
6212 tree scalar_dest;
6213 tree vectype_in = NULL_TREE;
6214 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6215 enum vect_def_type cond_reduc_dt = vect_unknown_def_type;
6216 stmt_vec_info cond_stmt_vinfo = NULL;
6217 tree scalar_type;
6218 int i;
6219 int ncopies;
6220 bool single_defuse_cycle = false;
6221 bool nested_cycle = false;
6222 bool double_reduc = false;
6223 int vec_num;
6224 tree tem;
6225 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6226 tree cond_reduc_val = NULL_TREE;
6227
6228 /* Make sure it was already recognized as a reduction computation. */
6229 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6230 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def
6231 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6232 return false;
6233
6234 /* The stmt we store reduction analysis meta on. */
6235 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
6236 reduc_info->is_reduc_info = true;
6237
6238 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
6239 {
6240 if (is_a <gphi *> (stmt_info->stmt))
6241 /* Analysis for double-reduction is done on the outer
6242 loop PHI, nested cycles have no further restrictions. */
6243 STMT_VINFO_TYPE (stmt_info) = cycle_phi_info_type;
6244 else
6245 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6246 return true;
6247 }
6248
6249 stmt_vec_info orig_stmt_of_analysis = stmt_info;
6250 stmt_vec_info phi_info = stmt_info;
6251 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
6252 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
6253 {
6254 if (!is_a <gphi *> (stmt_info->stmt))
6255 {
6256 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6257 return true;
6258 }
6259 if (slp_node)
6260 {
6261 slp_node_instance->reduc_phis = slp_node;
6262 /* ??? We're leaving slp_node to point to the PHIs, we only
6263 need it to get at the number of vector stmts which wasn't
6264 yet initialized for the instance root. */
6265 }
6266 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
6267 stmt_info = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (stmt_info));
6268 else /* STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def */
6269 {
6270 use_operand_p use_p;
6271 gimple *use_stmt;
6272 bool res = single_imm_use (gimple_phi_result (stmt_info->stmt),
6273 &use_p, &use_stmt);
6274 gcc_assert (res);
6275 phi_info = loop_vinfo->lookup_stmt (use_stmt);
6276 stmt_info = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (phi_info));
6277 }
6278 }
6279
6280 /* PHIs should not participate in patterns. */
6281 gcc_assert (!STMT_VINFO_RELATED_STMT (phi_info));
6282 gphi *reduc_def_phi = as_a <gphi *> (phi_info->stmt);
6283
6284 /* Verify following REDUC_IDX from the latch def leads us back to the PHI
6285 and compute the reduction chain length. */
6286 tree reduc_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6287 loop_latch_edge (loop));
6288 unsigned reduc_chain_length = 0;
6289 bool only_slp_reduc_chain = true;
6290 stmt_info = NULL;
6291 while (reduc_def != PHI_RESULT (reduc_def_phi))
6292 {
6293 stmt_vec_info def = loop_vinfo->lookup_def (reduc_def);
6294 stmt_vec_info vdef = vect_stmt_to_vectorize (def);
6295 if (STMT_VINFO_REDUC_IDX (vdef) == -1)
6296 {
6297 if (dump_enabled_p ())
6298 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6299 "reduction chain broken by patterns.\n");
6300 return false;
6301 }
6302 if (!REDUC_GROUP_FIRST_ELEMENT (vdef))
6303 only_slp_reduc_chain = false;
6304 /* ??? For epilogue generation live members of the chain need
6305 to point back to the PHI via their original stmt for
6306 info_for_reduction to work. */
6307 if (STMT_VINFO_LIVE_P (vdef))
6308 STMT_VINFO_REDUC_DEF (def) = phi_info;
6309 gassign *assign = dyn_cast <gassign *> (vdef->stmt);
6310 if (!assign)
6311 {
6312 if (dump_enabled_p ())
6313 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6314 "reduction chain includes calls.\n");
6315 return false;
6316 }
6317 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
6318 {
6319 if (!tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (assign)),
6320 TREE_TYPE (gimple_assign_rhs1 (assign))))
6321 {
6322 if (dump_enabled_p ())
6323 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6324 "conversion in the reduction chain.\n");
6325 return false;
6326 }
6327 }
6328 else if (!stmt_info)
6329 /* First non-conversion stmt. */
6330 stmt_info = vdef;
6331 reduc_def = gimple_op (vdef->stmt, 1 + STMT_VINFO_REDUC_IDX (vdef));
6332 reduc_chain_length++;
6333 }
6334 /* PHIs should not participate in patterns. */
6335 gcc_assert (!STMT_VINFO_RELATED_STMT (phi_info));
6336
6337 if (nested_in_vect_loop_p (loop, stmt_info))
6338 {
6339 loop = loop->inner;
6340 nested_cycle = true;
6341 }
6342
6343 /* STMT_VINFO_REDUC_DEF doesn't point to the first but the last
6344 element. */
6345 if (slp_node && REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6346 {
6347 gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (stmt_info));
6348 stmt_info = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
6349 }
6350 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6351 gcc_assert (slp_node
6352 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6353
6354 /* 1. Is vectorizable reduction? */
6355 /* Not supportable if the reduction variable is used in the loop, unless
6356 it's a reduction chain. */
6357 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6358 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6359 return false;
6360
6361 /* Reductions that are not used even in an enclosing outer-loop,
6362 are expected to be "live" (used out of the loop). */
6363 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6364 && !STMT_VINFO_LIVE_P (stmt_info))
6365 return false;
6366
6367 /* 2. Has this been recognized as a reduction pattern?
6368
6369 Check if STMT represents a pattern that has been recognized
6370 in earlier analysis stages. For stmts that represent a pattern,
6371 the STMT_VINFO_RELATED_STMT field records the last stmt in
6372 the original sequence that constitutes the pattern. */
6373
6374 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6375 if (orig_stmt_info)
6376 {
6377 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6378 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6379 }
6380
6381 /* 3. Check the operands of the operation. The first operands are defined
6382 inside the loop body. The last operand is the reduction variable,
6383 which is defined by the loop-header-phi. */
6384
6385 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6386 STMT_VINFO_REDUC_VECTYPE (reduc_info) = vectype_out;
6387 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
6388 enum tree_code code = gimple_assign_rhs_code (stmt);
6389 bool lane_reduc_code_p
6390 = (code == DOT_PROD_EXPR || code == WIDEN_SUM_EXPR || code == SAD_EXPR);
6391 int op_type = TREE_CODE_LENGTH (code);
6392
6393 scalar_dest = gimple_assign_lhs (stmt);
6394 scalar_type = TREE_TYPE (scalar_dest);
6395 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6396 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6397 return false;
6398
6399 /* Do not try to vectorize bit-precision reductions. */
6400 if (!type_has_mode_precision_p (scalar_type))
6401 return false;
6402
6403 /* For lane-reducing ops we're reducing the number of reduction PHIs
6404 which means the only use of that may be in the lane-reducing operation. */
6405 if (lane_reduc_code_p
6406 && reduc_chain_length != 1
6407 && !only_slp_reduc_chain)
6408 {
6409 if (dump_enabled_p ())
6410 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6411 "lane-reducing reduction with extra stmts.\n");
6412 return false;
6413 }
6414
6415 /* All uses but the last are expected to be defined in the loop.
6416 The last use is the reduction variable. In case of nested cycle this
6417 assumption is not true: we use reduc_index to record the index of the
6418 reduction variable. */
6419 /* ??? To get at invariant/constant uses on the SLP node we have to
6420 get to it here, slp_node is still the reduction PHI. */
6421 slp_tree slp_for_stmt_info = NULL;
6422 if (slp_node)
6423 {
6424 slp_for_stmt_info = slp_node_instance->root;
6425 /* And then there's reduction chain with a conversion ... */
6426 if (SLP_TREE_REPRESENTATIVE (slp_for_stmt_info) != stmt_info)
6427 slp_for_stmt_info = SLP_TREE_CHILDREN (slp_for_stmt_info)[0];
6428 gcc_assert (SLP_TREE_REPRESENTATIVE (slp_for_stmt_info) == stmt_info);
6429 }
6430 slp_tree *slp_op = XALLOCAVEC (slp_tree, op_type);
6431 /* We need to skip an extra operand for COND_EXPRs with embedded
6432 comparison. */
6433 unsigned opno_adjust = 0;
6434 if (code == COND_EXPR
6435 && COMPARISON_CLASS_P (gimple_assign_rhs1 (stmt)))
6436 opno_adjust = 1;
6437 for (i = 0; i < op_type; i++)
6438 {
6439 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6440 if (i == 0 && code == COND_EXPR)
6441 continue;
6442
6443 stmt_vec_info def_stmt_info;
6444 enum vect_def_type dt;
6445 tree op;
6446 if (!vect_is_simple_use (loop_vinfo, stmt_info, slp_for_stmt_info,
6447 i + opno_adjust, &op, &slp_op[i], &dt, &tem,
6448 &def_stmt_info))
6449 {
6450 if (dump_enabled_p ())
6451 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6452 "use not simple.\n");
6453 return false;
6454 }
6455 if (i == STMT_VINFO_REDUC_IDX (stmt_info))
6456 continue;
6457
6458 /* There should be only one cycle def in the stmt, the one
6459 leading to reduc_def. */
6460 if (VECTORIZABLE_CYCLE_DEF (dt))
6461 return false;
6462
6463 /* To properly compute ncopies we are interested in the widest
6464 non-reduction input type in case we're looking at a widening
6465 accumulation that we later handle in vect_transform_reduction. */
6466 if (lane_reduc_code_p
6467 && tem
6468 && (!vectype_in
6469 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6470 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem))))))
6471 vectype_in = tem;
6472
6473 if (code == COND_EXPR)
6474 {
6475 /* Record how the non-reduction-def value of COND_EXPR is defined. */
6476 if (dt == vect_constant_def)
6477 {
6478 cond_reduc_dt = dt;
6479 cond_reduc_val = op;
6480 }
6481 if (dt == vect_induction_def
6482 && def_stmt_info
6483 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6484 {
6485 cond_reduc_dt = dt;
6486 cond_stmt_vinfo = def_stmt_info;
6487 }
6488 }
6489 }
6490 if (!vectype_in)
6491 vectype_in = STMT_VINFO_VECTYPE (phi_info);
6492 STMT_VINFO_REDUC_VECTYPE_IN (reduc_info) = vectype_in;
6493
6494 enum vect_reduction_type v_reduc_type = STMT_VINFO_REDUC_TYPE (phi_info);
6495 STMT_VINFO_REDUC_TYPE (reduc_info) = v_reduc_type;
6496 /* If we have a condition reduction, see if we can simplify it further. */
6497 if (v_reduc_type == COND_REDUCTION)
6498 {
6499 if (slp_node)
6500 return false;
6501
6502 /* When the condition uses the reduction value in the condition, fail. */
6503 if (STMT_VINFO_REDUC_IDX (stmt_info) == 0)
6504 {
6505 if (dump_enabled_p ())
6506 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6507 "condition depends on previous iteration\n");
6508 return false;
6509 }
6510
6511 if (reduc_chain_length == 1
6512 && direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6513 vectype_in, OPTIMIZE_FOR_SPEED))
6514 {
6515 if (dump_enabled_p ())
6516 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6517 "optimizing condition reduction with"
6518 " FOLD_EXTRACT_LAST.\n");
6519 STMT_VINFO_REDUC_TYPE (reduc_info) = EXTRACT_LAST_REDUCTION;
6520 }
6521 else if (cond_reduc_dt == vect_induction_def)
6522 {
6523 tree base
6524 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6525 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6526
6527 gcc_assert (TREE_CODE (base) == INTEGER_CST
6528 && TREE_CODE (step) == INTEGER_CST);
6529 cond_reduc_val = NULL_TREE;
6530 enum tree_code cond_reduc_op_code = ERROR_MARK;
6531 tree res = PHI_RESULT (STMT_VINFO_STMT (cond_stmt_vinfo));
6532 if (!types_compatible_p (TREE_TYPE (res), TREE_TYPE (base)))
6533 ;
6534 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6535 above base; punt if base is the minimum value of the type for
6536 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6537 else if (tree_int_cst_sgn (step) == -1)
6538 {
6539 cond_reduc_op_code = MIN_EXPR;
6540 if (tree_int_cst_sgn (base) == -1)
6541 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6542 else if (tree_int_cst_lt (base,
6543 TYPE_MAX_VALUE (TREE_TYPE (base))))
6544 cond_reduc_val
6545 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6546 }
6547 else
6548 {
6549 cond_reduc_op_code = MAX_EXPR;
6550 if (tree_int_cst_sgn (base) == 1)
6551 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6552 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6553 base))
6554 cond_reduc_val
6555 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6556 }
6557 if (cond_reduc_val)
6558 {
6559 if (dump_enabled_p ())
6560 dump_printf_loc (MSG_NOTE, vect_location,
6561 "condition expression based on "
6562 "integer induction.\n");
6563 STMT_VINFO_REDUC_CODE (reduc_info) = cond_reduc_op_code;
6564 STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info)
6565 = cond_reduc_val;
6566 STMT_VINFO_REDUC_TYPE (reduc_info) = INTEGER_INDUC_COND_REDUCTION;
6567 }
6568 }
6569 else if (cond_reduc_dt == vect_constant_def)
6570 {
6571 enum vect_def_type cond_initial_dt;
6572 tree cond_initial_val
6573 = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi, loop_preheader_edge (loop));
6574
6575 gcc_assert (cond_reduc_val != NULL_TREE);
6576 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6577 if (cond_initial_dt == vect_constant_def
6578 && types_compatible_p (TREE_TYPE (cond_initial_val),
6579 TREE_TYPE (cond_reduc_val)))
6580 {
6581 tree e = fold_binary (LE_EXPR, boolean_type_node,
6582 cond_initial_val, cond_reduc_val);
6583 if (e && (integer_onep (e) || integer_zerop (e)))
6584 {
6585 if (dump_enabled_p ())
6586 dump_printf_loc (MSG_NOTE, vect_location,
6587 "condition expression based on "
6588 "compile time constant.\n");
6589 /* Record reduction code at analysis stage. */
6590 STMT_VINFO_REDUC_CODE (reduc_info)
6591 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6592 STMT_VINFO_REDUC_TYPE (reduc_info) = CONST_COND_REDUCTION;
6593 }
6594 }
6595 }
6596 }
6597
6598 if (STMT_VINFO_LIVE_P (phi_info))
6599 return false;
6600
6601 if (slp_node)
6602 ncopies = 1;
6603 else
6604 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6605
6606 gcc_assert (ncopies >= 1);
6607
6608 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6609
6610 if (nested_cycle)
6611 {
6612 gcc_assert (STMT_VINFO_DEF_TYPE (reduc_info)
6613 == vect_double_reduction_def);
6614 double_reduc = true;
6615 }
6616
6617 /* 4.2. Check support for the epilog operation.
6618
6619 If STMT represents a reduction pattern, then the type of the
6620 reduction variable may be different than the type of the rest
6621 of the arguments. For example, consider the case of accumulation
6622 of shorts into an int accumulator; The original code:
6623 S1: int_a = (int) short_a;
6624 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6625
6626 was replaced with:
6627 STMT: int_acc = widen_sum <short_a, int_acc>
6628
6629 This means that:
6630 1. The tree-code that is used to create the vector operation in the
6631 epilog code (that reduces the partial results) is not the
6632 tree-code of STMT, but is rather the tree-code of the original
6633 stmt from the pattern that STMT is replacing. I.e, in the example
6634 above we want to use 'widen_sum' in the loop, but 'plus' in the
6635 epilog.
6636 2. The type (mode) we use to check available target support
6637 for the vector operation to be created in the *epilog*, is
6638 determined by the type of the reduction variable (in the example
6639 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6640 However the type (mode) we use to check available target support
6641 for the vector operation to be created *inside the loop*, is
6642 determined by the type of the other arguments to STMT (in the
6643 example we'd check this: optab_handler (widen_sum_optab,
6644 vect_short_mode)).
6645
6646 This is contrary to "regular" reductions, in which the types of all
6647 the arguments are the same as the type of the reduction variable.
6648 For "regular" reductions we can therefore use the same vector type
6649 (and also the same tree-code) when generating the epilog code and
6650 when generating the code inside the loop. */
6651
6652 enum tree_code orig_code = STMT_VINFO_REDUC_CODE (phi_info);
6653 STMT_VINFO_REDUC_CODE (reduc_info) = orig_code;
6654
6655 vect_reduction_type reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info);
6656 if (reduction_type == TREE_CODE_REDUCTION)
6657 {
6658 /* Check whether it's ok to change the order of the computation.
6659 Generally, when vectorizing a reduction we change the order of the
6660 computation. This may change the behavior of the program in some
6661 cases, so we need to check that this is ok. One exception is when
6662 vectorizing an outer-loop: the inner-loop is executed sequentially,
6663 and therefore vectorizing reductions in the inner-loop during
6664 outer-loop vectorization is safe. */
6665 if (needs_fold_left_reduction_p (scalar_type, orig_code))
6666 {
6667 /* When vectorizing a reduction chain w/o SLP the reduction PHI
6668 is not directy used in stmt. */
6669 if (!only_slp_reduc_chain
6670 && reduc_chain_length != 1)
6671 {
6672 if (dump_enabled_p ())
6673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6674 "in-order reduction chain without SLP.\n");
6675 return false;
6676 }
6677 STMT_VINFO_REDUC_TYPE (reduc_info)
6678 = reduction_type = FOLD_LEFT_REDUCTION;
6679 }
6680 else if (!commutative_tree_code (orig_code)
6681 || !associative_tree_code (orig_code))
6682 {
6683 if (dump_enabled_p ())
6684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6685 "reduction: not commutative/associative");
6686 return false;
6687 }
6688 }
6689
6690 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6691 && ncopies > 1)
6692 {
6693 if (dump_enabled_p ())
6694 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6695 "multiple types in double reduction or condition "
6696 "reduction or fold-left reduction.\n");
6697 return false;
6698 }
6699
6700 internal_fn reduc_fn = IFN_LAST;
6701 if (reduction_type == TREE_CODE_REDUCTION
6702 || reduction_type == FOLD_LEFT_REDUCTION
6703 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6704 || reduction_type == CONST_COND_REDUCTION)
6705 {
6706 if (reduction_type == FOLD_LEFT_REDUCTION
6707 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6708 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6709 {
6710 if (reduc_fn != IFN_LAST
6711 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6712 OPTIMIZE_FOR_SPEED))
6713 {
6714 if (dump_enabled_p ())
6715 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6716 "reduc op not supported by target.\n");
6717
6718 reduc_fn = IFN_LAST;
6719 }
6720 }
6721 else
6722 {
6723 if (!nested_cycle || double_reduc)
6724 {
6725 if (dump_enabled_p ())
6726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6727 "no reduc code for scalar code.\n");
6728
6729 return false;
6730 }
6731 }
6732 }
6733 else if (reduction_type == COND_REDUCTION)
6734 {
6735 int scalar_precision
6736 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6737 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6738 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6739 nunits_out);
6740
6741 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6742 OPTIMIZE_FOR_SPEED))
6743 reduc_fn = IFN_REDUC_MAX;
6744 }
6745 STMT_VINFO_REDUC_FN (reduc_info) = reduc_fn;
6746
6747 if (reduction_type != EXTRACT_LAST_REDUCTION
6748 && (!nested_cycle || double_reduc)
6749 && reduc_fn == IFN_LAST
6750 && !nunits_out.is_constant ())
6751 {
6752 if (dump_enabled_p ())
6753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6754 "missing target support for reduction on"
6755 " variable-length vectors.\n");
6756 return false;
6757 }
6758
6759 /* For SLP reductions, see if there is a neutral value we can use. */
6760 tree neutral_op = NULL_TREE;
6761 if (slp_node)
6762 neutral_op = neutral_op_for_slp_reduction
6763 (slp_node_instance->reduc_phis, vectype_out, orig_code,
6764 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
6765
6766 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6767 {
6768 /* We can't support in-order reductions of code such as this:
6769
6770 for (int i = 0; i < n1; ++i)
6771 for (int j = 0; j < n2; ++j)
6772 l += a[j];
6773
6774 since GCC effectively transforms the loop when vectorizing:
6775
6776 for (int i = 0; i < n1 / VF; ++i)
6777 for (int j = 0; j < n2; ++j)
6778 for (int k = 0; k < VF; ++k)
6779 l += a[j];
6780
6781 which is a reassociation of the original operation. */
6782 if (dump_enabled_p ())
6783 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6784 "in-order double reduction not supported.\n");
6785
6786 return false;
6787 }
6788
6789 if (reduction_type == FOLD_LEFT_REDUCTION
6790 && slp_node
6791 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6792 {
6793 /* We cannot use in-order reductions in this case because there is
6794 an implicit reassociation of the operations involved. */
6795 if (dump_enabled_p ())
6796 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6797 "in-order unchained SLP reductions not supported.\n");
6798 return false;
6799 }
6800
6801 /* For double reductions, and for SLP reductions with a neutral value,
6802 we construct a variable-length initial vector by loading a vector
6803 full of the neutral value and then shift-and-inserting the start
6804 values into the low-numbered elements. */
6805 if ((double_reduc || neutral_op)
6806 && !nunits_out.is_constant ()
6807 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6808 vectype_out, OPTIMIZE_FOR_SPEED))
6809 {
6810 if (dump_enabled_p ())
6811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6812 "reduction on variable-length vectors requires"
6813 " target support for a vector-shift-and-insert"
6814 " operation.\n");
6815 return false;
6816 }
6817
6818 /* Check extra constraints for variable-length unchained SLP reductions. */
6819 if (STMT_SLP_TYPE (stmt_info)
6820 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6821 && !nunits_out.is_constant ())
6822 {
6823 /* We checked above that we could build the initial vector when
6824 there's a neutral element value. Check here for the case in
6825 which each SLP statement has its own initial value and in which
6826 that value needs to be repeated for every instance of the
6827 statement within the initial vector. */
6828 unsigned int group_size = SLP_TREE_LANES (slp_node);
6829 if (!neutral_op
6830 && !can_duplicate_and_interleave_p (loop_vinfo, group_size,
6831 TREE_TYPE (vectype_out)))
6832 {
6833 if (dump_enabled_p ())
6834 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6835 "unsupported form of SLP reduction for"
6836 " variable-length vectors: cannot build"
6837 " initial vector.\n");
6838 return false;
6839 }
6840 /* The epilogue code relies on the number of elements being a multiple
6841 of the group size. The duplicate-and-interleave approach to setting
6842 up the initial vector does too. */
6843 if (!multiple_p (nunits_out, group_size))
6844 {
6845 if (dump_enabled_p ())
6846 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6847 "unsupported form of SLP reduction for"
6848 " variable-length vectors: the vector size"
6849 " is not a multiple of the number of results.\n");
6850 return false;
6851 }
6852 }
6853
6854 if (reduction_type == COND_REDUCTION)
6855 {
6856 widest_int ni;
6857
6858 if (! max_loop_iterations (loop, &ni))
6859 {
6860 if (dump_enabled_p ())
6861 dump_printf_loc (MSG_NOTE, vect_location,
6862 "loop count not known, cannot create cond "
6863 "reduction.\n");
6864 return false;
6865 }
6866 /* Convert backedges to iterations. */
6867 ni += 1;
6868
6869 /* The additional index will be the same type as the condition. Check
6870 that the loop can fit into this less one (because we'll use up the
6871 zero slot for when there are no matches). */
6872 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6873 if (wi::geu_p (ni, wi::to_widest (max_index)))
6874 {
6875 if (dump_enabled_p ())
6876 dump_printf_loc (MSG_NOTE, vect_location,
6877 "loop size is greater than data size.\n");
6878 return false;
6879 }
6880 }
6881
6882 /* In case the vectorization factor (VF) is bigger than the number
6883 of elements that we can fit in a vectype (nunits), we have to generate
6884 more than one vector stmt - i.e - we need to "unroll" the
6885 vector stmt by a factor VF/nunits. For more details see documentation
6886 in vectorizable_operation. */
6887
6888 /* If the reduction is used in an outer loop we need to generate
6889 VF intermediate results, like so (e.g. for ncopies=2):
6890 r0 = phi (init, r0)
6891 r1 = phi (init, r1)
6892 r0 = x0 + r0;
6893 r1 = x1 + r1;
6894 (i.e. we generate VF results in 2 registers).
6895 In this case we have a separate def-use cycle for each copy, and therefore
6896 for each copy we get the vector def for the reduction variable from the
6897 respective phi node created for this copy.
6898
6899 Otherwise (the reduction is unused in the loop nest), we can combine
6900 together intermediate results, like so (e.g. for ncopies=2):
6901 r = phi (init, r)
6902 r = x0 + r;
6903 r = x1 + r;
6904 (i.e. we generate VF/2 results in a single register).
6905 In this case for each copy we get the vector def for the reduction variable
6906 from the vectorized reduction operation generated in the previous iteration.
6907
6908 This only works when we see both the reduction PHI and its only consumer
6909 in vectorizable_reduction and there are no intermediate stmts
6910 participating. */
6911 if (ncopies > 1
6912 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6913 && reduc_chain_length == 1)
6914 single_defuse_cycle = true;
6915
6916 if (single_defuse_cycle || lane_reduc_code_p)
6917 {
6918 gcc_assert (code != COND_EXPR);
6919
6920 /* 4. Supportable by target? */
6921 bool ok = true;
6922
6923 /* 4.1. check support for the operation in the loop */
6924 optab optab = optab_for_tree_code (code, vectype_in, optab_vector);
6925 if (!optab)
6926 {
6927 if (dump_enabled_p ())
6928 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6929 "no optab.\n");
6930 ok = false;
6931 }
6932
6933 machine_mode vec_mode = TYPE_MODE (vectype_in);
6934 if (ok && optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6935 {
6936 if (dump_enabled_p ())
6937 dump_printf (MSG_NOTE, "op not supported by target.\n");
6938 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6939 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6940 ok = false;
6941 else
6942 if (dump_enabled_p ())
6943 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6944 }
6945
6946 /* Worthwhile without SIMD support? */
6947 if (ok
6948 && !VECTOR_MODE_P (TYPE_MODE (vectype_in))
6949 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6950 {
6951 if (dump_enabled_p ())
6952 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6953 "not worthwhile without SIMD support.\n");
6954 ok = false;
6955 }
6956
6957 /* lane-reducing operations have to go through vect_transform_reduction.
6958 For the other cases try without the single cycle optimization. */
6959 if (!ok)
6960 {
6961 if (lane_reduc_code_p)
6962 return false;
6963 else
6964 single_defuse_cycle = false;
6965 }
6966 }
6967 STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info) = single_defuse_cycle;
6968
6969 /* If the reduction stmt is one of the patterns that have lane
6970 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6971 if ((ncopies > 1 && ! single_defuse_cycle)
6972 && lane_reduc_code_p)
6973 {
6974 if (dump_enabled_p ())
6975 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6976 "multi def-use cycle not possible for lane-reducing "
6977 "reduction operation\n");
6978 return false;
6979 }
6980
6981 if (slp_node
6982 && !(!single_defuse_cycle
6983 && code != DOT_PROD_EXPR
6984 && code != WIDEN_SUM_EXPR
6985 && code != SAD_EXPR
6986 && reduction_type != FOLD_LEFT_REDUCTION))
6987 for (i = 0; i < op_type; i++)
6988 if (!vect_maybe_update_slp_op_vectype (slp_op[i], vectype_in))
6989 {
6990 if (dump_enabled_p ())
6991 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6992 "incompatible vector types for invariants\n");
6993 return false;
6994 }
6995
6996 if (slp_node)
6997 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6998 else
6999 vec_num = 1;
7000
7001 vect_model_reduction_cost (loop_vinfo, stmt_info, reduc_fn,
7002 reduction_type, ncopies, cost_vec);
7003 if (dump_enabled_p ()
7004 && reduction_type == FOLD_LEFT_REDUCTION)
7005 dump_printf_loc (MSG_NOTE, vect_location,
7006 "using an in-order (fold-left) reduction.\n");
7007 STMT_VINFO_TYPE (orig_stmt_of_analysis) = cycle_phi_info_type;
7008 /* All but single defuse-cycle optimized, lane-reducing and fold-left
7009 reductions go through their own vectorizable_* routines. */
7010 if (!single_defuse_cycle
7011 && code != DOT_PROD_EXPR
7012 && code != WIDEN_SUM_EXPR
7013 && code != SAD_EXPR
7014 && reduction_type != FOLD_LEFT_REDUCTION)
7015 {
7016 stmt_vec_info tem
7017 = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (phi_info));
7018 if (slp_node && REDUC_GROUP_FIRST_ELEMENT (tem))
7019 {
7020 gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (tem));
7021 tem = REDUC_GROUP_FIRST_ELEMENT (tem);
7022 }
7023 STMT_VINFO_DEF_TYPE (vect_orig_stmt (tem)) = vect_internal_def;
7024 STMT_VINFO_DEF_TYPE (tem) = vect_internal_def;
7025 }
7026 else if (loop_vinfo && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
7027 {
7028 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
7029 internal_fn cond_fn = get_conditional_internal_fn (code);
7030
7031 if (reduction_type != FOLD_LEFT_REDUCTION
7032 && !use_mask_by_cond_expr_p (code, cond_fn, vectype_in)
7033 && (cond_fn == IFN_LAST
7034 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
7035 OPTIMIZE_FOR_SPEED)))
7036 {
7037 if (dump_enabled_p ())
7038 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7039 "can't operate on partial vectors because"
7040 " no conditional operation is available.\n");
7041 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
7042 }
7043 else if (reduction_type == FOLD_LEFT_REDUCTION
7044 && reduc_fn == IFN_LAST
7045 && !expand_vec_cond_expr_p (vectype_in,
7046 truth_type_for (vectype_in),
7047 SSA_NAME))
7048 {
7049 if (dump_enabled_p ())
7050 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7051 "can't operate on partial vectors because"
7052 " no conditional operation is available.\n");
7053 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
7054 }
7055 else
7056 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
7057 vectype_in, NULL);
7058 }
7059 return true;
7060 }
7061
7062 /* Transform the definition stmt STMT_INFO of a reduction PHI backedge
7063 value. */
7064
7065 bool
7066 vect_transform_reduction (loop_vec_info loop_vinfo,
7067 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
7068 gimple **vec_stmt, slp_tree slp_node)
7069 {
7070 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
7071 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7072 int i;
7073 int ncopies;
7074 int vec_num;
7075
7076 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
7077 gcc_assert (reduc_info->is_reduc_info);
7078
7079 if (nested_in_vect_loop_p (loop, stmt_info))
7080 {
7081 loop = loop->inner;
7082 gcc_assert (STMT_VINFO_DEF_TYPE (reduc_info) == vect_double_reduction_def);
7083 }
7084
7085 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
7086 enum tree_code code = gimple_assign_rhs_code (stmt);
7087 int op_type = TREE_CODE_LENGTH (code);
7088
7089 /* Flatten RHS. */
7090 tree ops[3];
7091 switch (get_gimple_rhs_class (code))
7092 {
7093 case GIMPLE_TERNARY_RHS:
7094 ops[2] = gimple_assign_rhs3 (stmt);
7095 /* Fall thru. */
7096 case GIMPLE_BINARY_RHS:
7097 ops[0] = gimple_assign_rhs1 (stmt);
7098 ops[1] = gimple_assign_rhs2 (stmt);
7099 break;
7100 default:
7101 gcc_unreachable ();
7102 }
7103
7104 /* All uses but the last are expected to be defined in the loop.
7105 The last use is the reduction variable. In case of nested cycle this
7106 assumption is not true: we use reduc_index to record the index of the
7107 reduction variable. */
7108 stmt_vec_info phi_info = STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info));
7109 gphi *reduc_def_phi = as_a <gphi *> (phi_info->stmt);
7110 int reduc_index = STMT_VINFO_REDUC_IDX (stmt_info);
7111 tree vectype_in = STMT_VINFO_REDUC_VECTYPE_IN (reduc_info);
7112
7113 if (slp_node)
7114 {
7115 ncopies = 1;
7116 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7117 }
7118 else
7119 {
7120 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
7121 vec_num = 1;
7122 }
7123
7124 internal_fn cond_fn = get_conditional_internal_fn (code);
7125 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
7126 bool mask_by_cond_expr = use_mask_by_cond_expr_p (code, cond_fn, vectype_in);
7127
7128 /* Transform. */
7129 tree new_temp = NULL_TREE;
7130 auto_vec<tree> vec_oprnds0;
7131 auto_vec<tree> vec_oprnds1;
7132 auto_vec<tree> vec_oprnds2;
7133 tree def0;
7134
7135 if (dump_enabled_p ())
7136 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
7137
7138 /* FORNOW: Multiple types are not supported for condition. */
7139 if (code == COND_EXPR)
7140 gcc_assert (ncopies == 1);
7141
7142 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
7143
7144 vect_reduction_type reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info);
7145 if (reduction_type == FOLD_LEFT_REDUCTION)
7146 {
7147 internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
7148 return vectorize_fold_left_reduction
7149 (loop_vinfo, stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
7150 reduc_fn, ops, vectype_in, reduc_index, masks);
7151 }
7152
7153 bool single_defuse_cycle = STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info);
7154 gcc_assert (single_defuse_cycle
7155 || code == DOT_PROD_EXPR
7156 || code == WIDEN_SUM_EXPR
7157 || code == SAD_EXPR);
7158
7159 /* Create the destination vector */
7160 tree scalar_dest = gimple_assign_lhs (stmt);
7161 tree vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7162
7163 vect_get_vec_defs (loop_vinfo, stmt_info, slp_node, ncopies,
7164 single_defuse_cycle && reduc_index == 0
7165 ? NULL_TREE : ops[0], &vec_oprnds0,
7166 single_defuse_cycle && reduc_index == 1
7167 ? NULL_TREE : ops[1], &vec_oprnds1,
7168 op_type == ternary_op
7169 && !(single_defuse_cycle && reduc_index == 2)
7170 ? ops[2] : NULL_TREE, &vec_oprnds2);
7171 if (single_defuse_cycle)
7172 {
7173 gcc_assert (!slp_node);
7174 vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, 1,
7175 ops[reduc_index],
7176 reduc_index == 0 ? &vec_oprnds0
7177 : (reduc_index == 1 ? &vec_oprnds1
7178 : &vec_oprnds2));
7179 }
7180
7181 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7182 {
7183 gimple *new_stmt;
7184 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7185 if (masked_loop_p && !mask_by_cond_expr)
7186 {
7187 /* Make sure that the reduction accumulator is vop[0]. */
7188 if (reduc_index == 1)
7189 {
7190 gcc_assert (commutative_tree_code (code));
7191 std::swap (vop[0], vop[1]);
7192 }
7193 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7194 vectype_in, i);
7195 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7196 vop[0], vop[1], vop[0]);
7197 new_temp = make_ssa_name (vec_dest, call);
7198 gimple_call_set_lhs (call, new_temp);
7199 gimple_call_set_nothrow (call, true);
7200 vect_finish_stmt_generation (loop_vinfo, stmt_info, call, gsi);
7201 new_stmt = call;
7202 }
7203 else
7204 {
7205 if (op_type == ternary_op)
7206 vop[2] = vec_oprnds2[i];
7207
7208 if (masked_loop_p && mask_by_cond_expr)
7209 {
7210 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7211 vectype_in, i);
7212 build_vect_cond_expr (code, vop, mask, gsi);
7213 }
7214
7215 new_stmt = gimple_build_assign (vec_dest, code,
7216 vop[0], vop[1], vop[2]);
7217 new_temp = make_ssa_name (vec_dest, new_stmt);
7218 gimple_assign_set_lhs (new_stmt, new_temp);
7219 vect_finish_stmt_generation (loop_vinfo, stmt_info, new_stmt, gsi);
7220 }
7221
7222 if (slp_node)
7223 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7224 else if (single_defuse_cycle
7225 && i < ncopies - 1)
7226 {
7227 if (reduc_index == 0)
7228 vec_oprnds0.safe_push (gimple_get_lhs (new_stmt));
7229 else if (reduc_index == 1)
7230 vec_oprnds1.safe_push (gimple_get_lhs (new_stmt));
7231 else if (reduc_index == 2)
7232 vec_oprnds2.safe_push (gimple_get_lhs (new_stmt));
7233 }
7234 else
7235 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
7236 }
7237
7238 if (!slp_node)
7239 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
7240
7241 return true;
7242 }
7243
7244 /* Transform phase of a cycle PHI. */
7245
7246 bool
7247 vect_transform_cycle_phi (loop_vec_info loop_vinfo,
7248 stmt_vec_info stmt_info, gimple **vec_stmt,
7249 slp_tree slp_node, slp_instance slp_node_instance)
7250 {
7251 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
7252 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7253 int i;
7254 int ncopies;
7255 int j;
7256 bool nested_cycle = false;
7257 int vec_num;
7258
7259 if (nested_in_vect_loop_p (loop, stmt_info))
7260 {
7261 loop = loop->inner;
7262 nested_cycle = true;
7263 }
7264
7265 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
7266 reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
7267 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
7268 gcc_assert (reduc_info->is_reduc_info);
7269
7270 if (STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION
7271 || STMT_VINFO_REDUC_TYPE (reduc_info) == FOLD_LEFT_REDUCTION)
7272 /* Leave the scalar phi in place. */
7273 return true;
7274
7275 tree vectype_in = STMT_VINFO_REDUC_VECTYPE_IN (reduc_info);
7276 /* For a nested cycle we do not fill the above. */
7277 if (!vectype_in)
7278 vectype_in = STMT_VINFO_VECTYPE (stmt_info);
7279 gcc_assert (vectype_in);
7280
7281 if (slp_node)
7282 {
7283 /* The size vect_schedule_slp_instance computes is off for us. */
7284 vec_num = vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
7285 * SLP_TREE_LANES (slp_node), vectype_in);
7286 ncopies = 1;
7287 }
7288 else
7289 {
7290 vec_num = 1;
7291 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
7292 }
7293
7294 /* Check whether we should use a single PHI node and accumulate
7295 vectors to one before the backedge. */
7296 if (STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info))
7297 ncopies = 1;
7298
7299 /* Create the destination vector */
7300 gphi *phi = as_a <gphi *> (stmt_info->stmt);
7301 tree vec_dest = vect_create_destination_var (gimple_phi_result (phi),
7302 vectype_out);
7303
7304 /* Get the loop-entry arguments. */
7305 tree vec_initial_def;
7306 auto_vec<tree> vec_initial_defs;
7307 if (slp_node)
7308 {
7309 vec_initial_defs.reserve (vec_num);
7310 gcc_assert (slp_node == slp_node_instance->reduc_phis);
7311 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info);
7312 tree neutral_op
7313 = neutral_op_for_slp_reduction (slp_node, vectype_out,
7314 STMT_VINFO_REDUC_CODE (reduc_info),
7315 first != NULL);
7316 get_initial_defs_for_reduction (loop_vinfo, slp_node_instance->reduc_phis,
7317 &vec_initial_defs, vec_num,
7318 first != NULL, neutral_op);
7319 }
7320 else
7321 {
7322 /* Get at the scalar def before the loop, that defines the initial
7323 value of the reduction variable. */
7324 tree initial_def = PHI_ARG_DEF_FROM_EDGE (phi,
7325 loop_preheader_edge (loop));
7326 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
7327 and we can't use zero for induc_val, use initial_def. Similarly
7328 for REDUC_MIN and initial_def larger than the base. */
7329 if (STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
7330 {
7331 tree induc_val = STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info);
7332 if (TREE_CODE (initial_def) == INTEGER_CST
7333 && !integer_zerop (induc_val)
7334 && ((STMT_VINFO_REDUC_CODE (reduc_info) == MAX_EXPR
7335 && tree_int_cst_lt (initial_def, induc_val))
7336 || (STMT_VINFO_REDUC_CODE (reduc_info) == MIN_EXPR
7337 && tree_int_cst_lt (induc_val, initial_def))))
7338 {
7339 induc_val = initial_def;
7340 /* Communicate we used the initial_def to epilouge
7341 generation. */
7342 STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info) = NULL_TREE;
7343 }
7344 vec_initial_def = build_vector_from_val (vectype_out, induc_val);
7345 vec_initial_defs.create (ncopies);
7346 for (i = 0; i < ncopies; ++i)
7347 vec_initial_defs.quick_push (vec_initial_def);
7348 }
7349 else if (nested_cycle)
7350 {
7351 /* Do not use an adjustment def as that case is not supported
7352 correctly if ncopies is not one. */
7353 vect_get_vec_defs_for_operand (loop_vinfo, reduc_stmt_info,
7354 ncopies, initial_def,
7355 &vec_initial_defs);
7356 }
7357 else
7358 {
7359 tree adjustment_def = NULL_TREE;
7360 tree *adjustment_defp = &adjustment_def;
7361 enum tree_code code = STMT_VINFO_REDUC_CODE (reduc_info);
7362 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
7363 adjustment_defp = NULL;
7364 vec_initial_def
7365 = get_initial_def_for_reduction (loop_vinfo, reduc_stmt_info, code,
7366 initial_def, adjustment_defp);
7367 STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT (reduc_info) = adjustment_def;
7368 vec_initial_defs.create (ncopies);
7369 for (i = 0; i < ncopies; ++i)
7370 vec_initial_defs.quick_push (vec_initial_def);
7371 }
7372 }
7373
7374 /* Generate the reduction PHIs upfront. */
7375 for (i = 0; i < vec_num; i++)
7376 {
7377 tree vec_init_def = vec_initial_defs[i];
7378 for (j = 0; j < ncopies; j++)
7379 {
7380 /* Create the reduction-phi that defines the reduction
7381 operand. */
7382 gphi *new_phi = create_phi_node (vec_dest, loop->header);
7383
7384 /* Set the loop-entry arg of the reduction-phi. */
7385 if (j != 0 && nested_cycle)
7386 vec_init_def = vec_initial_defs[j];
7387 add_phi_arg (new_phi, vec_init_def, loop_preheader_edge (loop),
7388 UNKNOWN_LOCATION);
7389
7390 /* The loop-latch arg is set in epilogue processing. */
7391
7392 if (slp_node)
7393 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
7394 else
7395 {
7396 if (j == 0)
7397 *vec_stmt = new_phi;
7398 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_phi);
7399 }
7400 }
7401 }
7402
7403 return true;
7404 }
7405
7406 /* Vectorizes LC PHIs. */
7407
7408 bool
7409 vectorizable_lc_phi (loop_vec_info loop_vinfo,
7410 stmt_vec_info stmt_info, gimple **vec_stmt,
7411 slp_tree slp_node)
7412 {
7413 if (!loop_vinfo
7414 || !is_a <gphi *> (stmt_info->stmt)
7415 || gimple_phi_num_args (stmt_info->stmt) != 1)
7416 return false;
7417
7418 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7419 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
7420 return false;
7421
7422 if (!vec_stmt) /* transformation not required. */
7423 {
7424 STMT_VINFO_TYPE (stmt_info) = lc_phi_info_type;
7425 return true;
7426 }
7427
7428 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7429 tree scalar_dest = gimple_phi_result (stmt_info->stmt);
7430 basic_block bb = gimple_bb (stmt_info->stmt);
7431 edge e = single_pred_edge (bb);
7432 tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
7433 auto_vec<tree> vec_oprnds;
7434 vect_get_vec_defs (loop_vinfo, stmt_info, slp_node,
7435 !slp_node ? vect_get_num_copies (loop_vinfo, vectype) : 1,
7436 gimple_phi_arg_def (stmt_info->stmt, 0), &vec_oprnds);
7437 for (unsigned i = 0; i < vec_oprnds.length (); i++)
7438 {
7439 /* Create the vectorized LC PHI node. */
7440 gphi *new_phi = create_phi_node (vec_dest, bb);
7441 add_phi_arg (new_phi, vec_oprnds[i], e, UNKNOWN_LOCATION);
7442 if (slp_node)
7443 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
7444 else
7445 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_phi);
7446 }
7447 if (!slp_node)
7448 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
7449
7450 return true;
7451 }
7452
7453
7454 /* Function vect_min_worthwhile_factor.
7455
7456 For a loop where we could vectorize the operation indicated by CODE,
7457 return the minimum vectorization factor that makes it worthwhile
7458 to use generic vectors. */
7459 static unsigned int
7460 vect_min_worthwhile_factor (enum tree_code code)
7461 {
7462 switch (code)
7463 {
7464 case PLUS_EXPR:
7465 case MINUS_EXPR:
7466 case NEGATE_EXPR:
7467 return 4;
7468
7469 case BIT_AND_EXPR:
7470 case BIT_IOR_EXPR:
7471 case BIT_XOR_EXPR:
7472 case BIT_NOT_EXPR:
7473 return 2;
7474
7475 default:
7476 return INT_MAX;
7477 }
7478 }
7479
7480 /* Return true if VINFO indicates we are doing loop vectorization and if
7481 it is worth decomposing CODE operations into scalar operations for
7482 that loop's vectorization factor. */
7483
7484 bool
7485 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7486 {
7487 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7488 unsigned HOST_WIDE_INT value;
7489 return (loop_vinfo
7490 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7491 && value >= vect_min_worthwhile_factor (code));
7492 }
7493
7494 /* Function vectorizable_induction
7495
7496 Check if STMT_INFO performs an induction computation that can be vectorized.
7497 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7498 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7499 Return true if STMT_INFO is vectorizable in this way. */
7500
7501 bool
7502 vectorizable_induction (loop_vec_info loop_vinfo,
7503 stmt_vec_info stmt_info,
7504 gimple **vec_stmt, slp_tree slp_node,
7505 stmt_vector_for_cost *cost_vec)
7506 {
7507 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7508 unsigned ncopies;
7509 bool nested_in_vect_loop = false;
7510 class loop *iv_loop;
7511 tree vec_def;
7512 edge pe = loop_preheader_edge (loop);
7513 basic_block new_bb;
7514 tree new_vec, vec_init, vec_step, t;
7515 tree new_name;
7516 gimple *new_stmt;
7517 gphi *induction_phi;
7518 tree induc_def, vec_dest;
7519 tree init_expr, step_expr;
7520 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7521 unsigned i;
7522 tree expr;
7523 gimple_seq stmts;
7524 gimple_stmt_iterator si;
7525
7526 gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
7527 if (!phi)
7528 return false;
7529
7530 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7531 return false;
7532
7533 /* Make sure it was recognized as induction computation. */
7534 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7535 return false;
7536
7537 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7538 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7539
7540 if (slp_node)
7541 ncopies = 1;
7542 else
7543 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7544 gcc_assert (ncopies >= 1);
7545
7546 /* FORNOW. These restrictions should be relaxed. */
7547 if (nested_in_vect_loop_p (loop, stmt_info))
7548 {
7549 imm_use_iterator imm_iter;
7550 use_operand_p use_p;
7551 gimple *exit_phi;
7552 edge latch_e;
7553 tree loop_arg;
7554
7555 if (ncopies > 1)
7556 {
7557 if (dump_enabled_p ())
7558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7559 "multiple types in nested loop.\n");
7560 return false;
7561 }
7562
7563 /* FORNOW: outer loop induction with SLP not supported. */
7564 if (STMT_SLP_TYPE (stmt_info))
7565 return false;
7566
7567 exit_phi = NULL;
7568 latch_e = loop_latch_edge (loop->inner);
7569 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7570 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7571 {
7572 gimple *use_stmt = USE_STMT (use_p);
7573 if (is_gimple_debug (use_stmt))
7574 continue;
7575
7576 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7577 {
7578 exit_phi = use_stmt;
7579 break;
7580 }
7581 }
7582 if (exit_phi)
7583 {
7584 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7585 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7586 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7587 {
7588 if (dump_enabled_p ())
7589 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7590 "inner-loop induction only used outside "
7591 "of the outer vectorized loop.\n");
7592 return false;
7593 }
7594 }
7595
7596 nested_in_vect_loop = true;
7597 iv_loop = loop->inner;
7598 }
7599 else
7600 iv_loop = loop;
7601 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7602
7603 if (slp_node && !nunits.is_constant ())
7604 {
7605 /* The current SLP code creates the initial value element-by-element. */
7606 if (dump_enabled_p ())
7607 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7608 "SLP induction not supported for variable-length"
7609 " vectors.\n");
7610 return false;
7611 }
7612
7613 if (!vec_stmt) /* transformation not required. */
7614 {
7615 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7616 DUMP_VECT_SCOPE ("vectorizable_induction");
7617 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7618 return true;
7619 }
7620
7621 /* Transform. */
7622
7623 /* Compute a vector variable, initialized with the first VF values of
7624 the induction variable. E.g., for an iv with IV_PHI='X' and
7625 evolution S, for a vector of 4 units, we want to compute:
7626 [X, X + S, X + 2*S, X + 3*S]. */
7627
7628 if (dump_enabled_p ())
7629 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7630
7631 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7632 gcc_assert (step_expr != NULL_TREE);
7633 tree step_vectype = get_same_sized_vectype (TREE_TYPE (step_expr), vectype);
7634
7635 pe = loop_preheader_edge (iv_loop);
7636 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7637 loop_preheader_edge (iv_loop));
7638
7639 stmts = NULL;
7640 if (!nested_in_vect_loop)
7641 {
7642 /* Convert the initial value to the IV update type. */
7643 tree new_type = TREE_TYPE (step_expr);
7644 init_expr = gimple_convert (&stmts, new_type, init_expr);
7645
7646 /* If we are using the loop mask to "peel" for alignment then we need
7647 to adjust the start value here. */
7648 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7649 if (skip_niters != NULL_TREE)
7650 {
7651 if (FLOAT_TYPE_P (vectype))
7652 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7653 skip_niters);
7654 else
7655 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7656 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7657 skip_niters, step_expr);
7658 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7659 init_expr, skip_step);
7660 }
7661 }
7662
7663 if (stmts)
7664 {
7665 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7666 gcc_assert (!new_bb);
7667 }
7668
7669 /* Find the first insertion point in the BB. */
7670 basic_block bb = gimple_bb (phi);
7671 si = gsi_after_labels (bb);
7672
7673 /* For SLP induction we have to generate several IVs as for example
7674 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7675 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7676 [VF*S, VF*S, VF*S, VF*S] for all. */
7677 if (slp_node)
7678 {
7679 /* Enforced above. */
7680 unsigned int const_nunits = nunits.to_constant ();
7681
7682 /* Generate [VF*S, VF*S, ... ]. */
7683 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7684 {
7685 expr = build_int_cst (integer_type_node, vf);
7686 expr = fold_convert (TREE_TYPE (step_expr), expr);
7687 }
7688 else
7689 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7690 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7691 expr, step_expr);
7692 if (! CONSTANT_CLASS_P (new_name))
7693 new_name = vect_init_vector (loop_vinfo, stmt_info, new_name,
7694 TREE_TYPE (step_expr), NULL);
7695 new_vec = build_vector_from_val (step_vectype, new_name);
7696 vec_step = vect_init_vector (loop_vinfo, stmt_info,
7697 new_vec, step_vectype, NULL);
7698
7699 /* Now generate the IVs. */
7700 unsigned group_size = SLP_TREE_LANES (slp_node);
7701 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7702 unsigned elts = const_nunits * nvects;
7703 /* Compute the number of distinct IVs we need. First reduce
7704 group_size if it is a multiple of const_nunits so we get
7705 one IV for a group_size of 4 but const_nunits 2. */
7706 unsigned group_sizep = group_size;
7707 if (group_sizep % const_nunits == 0)
7708 group_sizep = group_sizep / const_nunits;
7709 unsigned nivs = least_common_multiple (group_sizep,
7710 const_nunits) / const_nunits;
7711 gcc_assert (elts % group_size == 0);
7712 tree elt = init_expr;
7713 unsigned ivn;
7714 for (ivn = 0; ivn < nivs; ++ivn)
7715 {
7716 tree_vector_builder elts (step_vectype, const_nunits, 1);
7717 stmts = NULL;
7718 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7719 {
7720 if (ivn*const_nunits + eltn >= group_size
7721 && (ivn * const_nunits + eltn) % group_size == 0)
7722 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7723 elt, step_expr);
7724 elts.quick_push (elt);
7725 }
7726 vec_init = gimple_build_vector (&stmts, &elts);
7727 vec_init = gimple_convert (&stmts, vectype, vec_init);
7728 if (stmts)
7729 {
7730 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7731 gcc_assert (!new_bb);
7732 }
7733
7734 /* Create the induction-phi that defines the induction-operand. */
7735 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7736 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7737 induc_def = PHI_RESULT (induction_phi);
7738
7739 /* Create the iv update inside the loop */
7740 gimple_seq stmts = NULL;
7741 vec_def = gimple_convert (&stmts, step_vectype, induc_def);
7742 vec_def = gimple_build (&stmts,
7743 PLUS_EXPR, step_vectype, vec_def, vec_step);
7744 vec_def = gimple_convert (&stmts, vectype, vec_def);
7745 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
7746
7747 /* Set the arguments of the phi node: */
7748 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7749 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7750 UNKNOWN_LOCATION);
7751
7752 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi);
7753 }
7754 /* Fill up to the number of vectors we need for the whole group. */
7755 nivs = least_common_multiple (group_size,
7756 const_nunits) / const_nunits;
7757 for (; ivn < nivs; ++ivn)
7758 SLP_TREE_VEC_STMTS (slp_node)
7759 .quick_push (SLP_TREE_VEC_STMTS (slp_node)[0]);
7760
7761 /* Re-use IVs when we can. */
7762 if (ivn < nvects)
7763 {
7764 unsigned vfp
7765 = least_common_multiple (group_size, const_nunits) / group_size;
7766 /* Generate [VF'*S, VF'*S, ... ]. */
7767 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7768 {
7769 expr = build_int_cst (integer_type_node, vfp);
7770 expr = fold_convert (TREE_TYPE (step_expr), expr);
7771 }
7772 else
7773 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7774 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7775 expr, step_expr);
7776 if (! CONSTANT_CLASS_P (new_name))
7777 new_name = vect_init_vector (loop_vinfo, stmt_info, new_name,
7778 TREE_TYPE (step_expr), NULL);
7779 new_vec = build_vector_from_val (step_vectype, new_name);
7780 vec_step = vect_init_vector (loop_vinfo, stmt_info, new_vec,
7781 step_vectype, NULL);
7782 for (; ivn < nvects; ++ivn)
7783 {
7784 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs];
7785 tree def;
7786 if (gimple_code (iv) == GIMPLE_PHI)
7787 def = gimple_phi_result (iv);
7788 else
7789 def = gimple_assign_lhs (iv);
7790 gimple_seq stmts = NULL;
7791 def = gimple_convert (&stmts, step_vectype, def);
7792 def = gimple_build (&stmts,
7793 PLUS_EXPR, step_vectype, def, vec_step);
7794 def = gimple_convert (&stmts, vectype, def);
7795 if (gimple_code (iv) == GIMPLE_PHI)
7796 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
7797 else
7798 {
7799 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7800 gsi_insert_seq_after (&tgsi, stmts, GSI_CONTINUE_LINKING);
7801 }
7802 SLP_TREE_VEC_STMTS (slp_node)
7803 .quick_push (SSA_NAME_DEF_STMT (def));
7804 }
7805 }
7806
7807 return true;
7808 }
7809
7810 /* Create the vector that holds the initial_value of the induction. */
7811 if (nested_in_vect_loop)
7812 {
7813 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7814 been created during vectorization of previous stmts. We obtain it
7815 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7816 auto_vec<tree> vec_inits;
7817 vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, 1,
7818 init_expr, &vec_inits);
7819 vec_init = vec_inits[0];
7820 /* If the initial value is not of proper type, convert it. */
7821 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7822 {
7823 new_stmt
7824 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7825 vect_simple_var,
7826 "vec_iv_"),
7827 VIEW_CONVERT_EXPR,
7828 build1 (VIEW_CONVERT_EXPR, vectype,
7829 vec_init));
7830 vec_init = gimple_assign_lhs (new_stmt);
7831 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7832 new_stmt);
7833 gcc_assert (!new_bb);
7834 }
7835 }
7836 else
7837 {
7838 /* iv_loop is the loop to be vectorized. Create:
7839 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7840 stmts = NULL;
7841 new_name = gimple_convert (&stmts, TREE_TYPE (step_expr), init_expr);
7842
7843 unsigned HOST_WIDE_INT const_nunits;
7844 if (nunits.is_constant (&const_nunits))
7845 {
7846 tree_vector_builder elts (step_vectype, const_nunits, 1);
7847 elts.quick_push (new_name);
7848 for (i = 1; i < const_nunits; i++)
7849 {
7850 /* Create: new_name_i = new_name + step_expr */
7851 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7852 new_name, step_expr);
7853 elts.quick_push (new_name);
7854 }
7855 /* Create a vector from [new_name_0, new_name_1, ...,
7856 new_name_nunits-1] */
7857 vec_init = gimple_build_vector (&stmts, &elts);
7858 }
7859 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7860 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7861 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, step_vectype,
7862 new_name, step_expr);
7863 else
7864 {
7865 /* Build:
7866 [base, base, base, ...]
7867 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7868 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7869 gcc_assert (flag_associative_math);
7870 tree index = build_index_vector (step_vectype, 0, 1);
7871 tree base_vec = gimple_build_vector_from_val (&stmts, step_vectype,
7872 new_name);
7873 tree step_vec = gimple_build_vector_from_val (&stmts, step_vectype,
7874 step_expr);
7875 vec_init = gimple_build (&stmts, FLOAT_EXPR, step_vectype, index);
7876 vec_init = gimple_build (&stmts, MULT_EXPR, step_vectype,
7877 vec_init, step_vec);
7878 vec_init = gimple_build (&stmts, PLUS_EXPR, step_vectype,
7879 vec_init, base_vec);
7880 }
7881 vec_init = gimple_convert (&stmts, vectype, vec_init);
7882
7883 if (stmts)
7884 {
7885 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7886 gcc_assert (!new_bb);
7887 }
7888 }
7889
7890
7891 /* Create the vector that holds the step of the induction. */
7892 if (nested_in_vect_loop)
7893 /* iv_loop is nested in the loop to be vectorized. Generate:
7894 vec_step = [S, S, S, S] */
7895 new_name = step_expr;
7896 else
7897 {
7898 /* iv_loop is the loop to be vectorized. Generate:
7899 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7900 gimple_seq seq = NULL;
7901 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7902 {
7903 expr = build_int_cst (integer_type_node, vf);
7904 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7905 }
7906 else
7907 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7908 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7909 expr, step_expr);
7910 if (seq)
7911 {
7912 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7913 gcc_assert (!new_bb);
7914 }
7915 }
7916
7917 t = unshare_expr (new_name);
7918 gcc_assert (CONSTANT_CLASS_P (new_name)
7919 || TREE_CODE (new_name) == SSA_NAME);
7920 new_vec = build_vector_from_val (step_vectype, t);
7921 vec_step = vect_init_vector (loop_vinfo, stmt_info,
7922 new_vec, step_vectype, NULL);
7923
7924
7925 /* Create the following def-use cycle:
7926 loop prolog:
7927 vec_init = ...
7928 vec_step = ...
7929 loop:
7930 vec_iv = PHI <vec_init, vec_loop>
7931 ...
7932 STMT
7933 ...
7934 vec_loop = vec_iv + vec_step; */
7935
7936 /* Create the induction-phi that defines the induction-operand. */
7937 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7938 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7939 induc_def = PHI_RESULT (induction_phi);
7940
7941 /* Create the iv update inside the loop */
7942 stmts = NULL;
7943 vec_def = gimple_convert (&stmts, step_vectype, induc_def);
7944 vec_def = gimple_build (&stmts, PLUS_EXPR, step_vectype, vec_def, vec_step);
7945 vec_def = gimple_convert (&stmts, vectype, vec_def);
7946 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
7947 new_stmt = SSA_NAME_DEF_STMT (vec_def);
7948
7949 /* Set the arguments of the phi node: */
7950 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7951 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7952 UNKNOWN_LOCATION);
7953
7954 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (induction_phi);
7955 *vec_stmt = induction_phi;
7956
7957 /* In case that vectorization factor (VF) is bigger than the number
7958 of elements that we can fit in a vectype (nunits), we have to generate
7959 more than one vector stmt - i.e - we need to "unroll" the
7960 vector stmt by a factor VF/nunits. For more details see documentation
7961 in vectorizable_operation. */
7962
7963 if (ncopies > 1)
7964 {
7965 gimple_seq seq = NULL;
7966 /* FORNOW. This restriction should be relaxed. */
7967 gcc_assert (!nested_in_vect_loop);
7968
7969 /* Create the vector that holds the step of the induction. */
7970 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7971 {
7972 expr = build_int_cst (integer_type_node, nunits);
7973 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7974 }
7975 else
7976 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7977 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7978 expr, step_expr);
7979 if (seq)
7980 {
7981 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7982 gcc_assert (!new_bb);
7983 }
7984
7985 t = unshare_expr (new_name);
7986 gcc_assert (CONSTANT_CLASS_P (new_name)
7987 || TREE_CODE (new_name) == SSA_NAME);
7988 new_vec = build_vector_from_val (step_vectype, t);
7989 vec_step = vect_init_vector (loop_vinfo, stmt_info,
7990 new_vec, step_vectype, NULL);
7991
7992 vec_def = induc_def;
7993 for (i = 1; i < ncopies; i++)
7994 {
7995 /* vec_i = vec_prev + vec_step */
7996 gimple_seq stmts = NULL;
7997 vec_def = gimple_convert (&stmts, step_vectype, vec_def);
7998 vec_def = gimple_build (&stmts,
7999 PLUS_EXPR, step_vectype, vec_def, vec_step);
8000 vec_def = gimple_convert (&stmts, vectype, vec_def);
8001
8002 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
8003 new_stmt = SSA_NAME_DEF_STMT (vec_def);
8004 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
8005 }
8006 }
8007
8008 if (dump_enabled_p ())
8009 dump_printf_loc (MSG_NOTE, vect_location,
8010 "transform induction: created def-use cycle: %G%G",
8011 induction_phi, SSA_NAME_DEF_STMT (vec_def));
8012
8013 return true;
8014 }
8015
8016 /* Function vectorizable_live_operation.
8017
8018 STMT_INFO computes a value that is used outside the loop. Check if
8019 it can be supported. */
8020
8021 bool
8022 vectorizable_live_operation (loop_vec_info loop_vinfo,
8023 stmt_vec_info stmt_info,
8024 gimple_stmt_iterator *gsi,
8025 slp_tree slp_node, slp_instance slp_node_instance,
8026 int slp_index, bool vec_stmt_p,
8027 stmt_vector_for_cost *)
8028 {
8029 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8030 imm_use_iterator imm_iter;
8031 tree lhs, lhs_type, bitsize, vec_bitsize;
8032 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8033 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8034 int ncopies;
8035 gimple *use_stmt;
8036 auto_vec<tree> vec_oprnds;
8037 int vec_entry = 0;
8038 poly_uint64 vec_index = 0;
8039
8040 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
8041
8042 /* If a stmt of a reduction is live, vectorize it via
8043 vect_create_epilog_for_reduction. vectorizable_reduction assessed
8044 validity so just trigger the transform here. */
8045 if (STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info)))
8046 {
8047 if (!vec_stmt_p)
8048 return true;
8049 if (slp_node)
8050 {
8051 /* For reduction chains the meta-info is attached to
8052 the group leader. */
8053 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
8054 stmt_info = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
8055 /* For SLP reductions we vectorize the epilogue for
8056 all involved stmts together. */
8057 else if (slp_index != 0)
8058 return true;
8059 else
8060 /* For SLP reductions the meta-info is attached to
8061 the representative. */
8062 stmt_info = SLP_TREE_REPRESENTATIVE (slp_node);
8063 }
8064 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
8065 gcc_assert (reduc_info->is_reduc_info);
8066 if (STMT_VINFO_REDUC_TYPE (reduc_info) == FOLD_LEFT_REDUCTION
8067 || STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION)
8068 return true;
8069 vect_create_epilog_for_reduction (loop_vinfo, stmt_info, slp_node,
8070 slp_node_instance);
8071 return true;
8072 }
8073
8074 /* FORNOW. CHECKME. */
8075 if (nested_in_vect_loop_p (loop, stmt_info))
8076 return false;
8077
8078 /* If STMT is not relevant and it is a simple assignment and its inputs are
8079 invariant then it can remain in place, unvectorized. The original last
8080 scalar value that it computes will be used. */
8081 if (!STMT_VINFO_RELEVANT_P (stmt_info))
8082 {
8083 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
8084 if (dump_enabled_p ())
8085 dump_printf_loc (MSG_NOTE, vect_location,
8086 "statement is simple and uses invariant. Leaving in "
8087 "place.\n");
8088 return true;
8089 }
8090
8091 if (slp_node)
8092 ncopies = 1;
8093 else
8094 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8095
8096 if (slp_node)
8097 {
8098 gcc_assert (slp_index >= 0);
8099
8100 int num_scalar = SLP_TREE_LANES (slp_node);
8101 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
8102
8103 /* Get the last occurrence of the scalar index from the concatenation of
8104 all the slp vectors. Calculate which slp vector it is and the index
8105 within. */
8106 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
8107
8108 /* Calculate which vector contains the result, and which lane of
8109 that vector we need. */
8110 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
8111 {
8112 if (dump_enabled_p ())
8113 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8114 "Cannot determine which vector holds the"
8115 " final result.\n");
8116 return false;
8117 }
8118 }
8119
8120 if (!vec_stmt_p)
8121 {
8122 /* No transformation required. */
8123 if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
8124 {
8125 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
8126 OPTIMIZE_FOR_SPEED))
8127 {
8128 if (dump_enabled_p ())
8129 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8130 "can't operate on partial vectors "
8131 "because the target doesn't support extract "
8132 "last reduction.\n");
8133 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
8134 }
8135 else if (slp_node)
8136 {
8137 if (dump_enabled_p ())
8138 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8139 "can't operate on partial vectors "
8140 "because an SLP statement is live after "
8141 "the loop.\n");
8142 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
8143 }
8144 else if (ncopies > 1)
8145 {
8146 if (dump_enabled_p ())
8147 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8148 "can't operate on partial vectors "
8149 "because ncopies is greater than 1.\n");
8150 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
8151 }
8152 else
8153 {
8154 gcc_assert (ncopies == 1 && !slp_node);
8155 vect_record_loop_mask (loop_vinfo,
8156 &LOOP_VINFO_MASKS (loop_vinfo),
8157 1, vectype, NULL);
8158 }
8159 }
8160 return true;
8161 }
8162
8163 /* Use the lhs of the original scalar statement. */
8164 gimple *stmt = vect_orig_stmt (stmt_info)->stmt;
8165
8166 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
8167 : gimple_get_lhs (stmt);
8168 lhs_type = TREE_TYPE (lhs);
8169
8170 bitsize = vector_element_bits_tree (vectype);
8171 vec_bitsize = TYPE_SIZE (vectype);
8172
8173 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
8174 tree vec_lhs, bitstart;
8175 if (slp_node)
8176 {
8177 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
8178
8179 /* Get the correct slp vectorized stmt. */
8180 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry];
8181 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
8182 vec_lhs = gimple_phi_result (phi);
8183 else
8184 vec_lhs = gimple_get_lhs (vec_stmt);
8185
8186 /* Get entry to use. */
8187 bitstart = bitsize_int (vec_index);
8188 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
8189 }
8190 else
8191 {
8192 /* For multiple copies, get the last copy. */
8193 vec_lhs = gimple_get_lhs (STMT_VINFO_VEC_STMTS (stmt_info).last ());
8194
8195 /* Get the last lane in the vector. */
8196 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
8197 }
8198
8199 /* Ensure the VEC_LHS for lane extraction stmts satisfy loop-closed PHI
8200 requirement, insert one phi node for it. It looks like:
8201 loop;
8202 BB:
8203 # lhs' = PHI <lhs>
8204 ==>
8205 loop;
8206 BB:
8207 # vec_lhs' = PHI <vec_lhs>
8208 new_tree = lane_extract <vec_lhs', ...>;
8209 lhs' = new_tree; */
8210
8211 basic_block exit_bb = single_exit (loop)->dest;
8212 gcc_assert (single_pred_p (exit_bb));
8213
8214 tree vec_lhs_phi = copy_ssa_name (vec_lhs);
8215 gimple *phi = create_phi_node (vec_lhs_phi, exit_bb);
8216 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, vec_lhs);
8217
8218 gimple_seq stmts = NULL;
8219 tree new_tree;
8220 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8221 {
8222 /* Emit:
8223
8224 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
8225
8226 where VEC_LHS is the vectorized live-out result and MASK is
8227 the loop mask for the final iteration. */
8228 gcc_assert (ncopies == 1 && !slp_node);
8229 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
8230 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo), 1,
8231 vectype, 0);
8232 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST, scalar_type,
8233 mask, vec_lhs_phi);
8234
8235 /* Convert the extracted vector element to the required scalar type. */
8236 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
8237 }
8238 else
8239 {
8240 tree bftype = TREE_TYPE (vectype);
8241 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8242 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
8243 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs_phi, bitsize, bitstart);
8244 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
8245 &stmts, true, NULL_TREE);
8246 }
8247
8248 if (stmts)
8249 {
8250 gimple_stmt_iterator exit_gsi = gsi_after_labels (exit_bb);
8251 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
8252
8253 /* Remove existing phi from lhs and create one copy from new_tree. */
8254 tree lhs_phi = NULL_TREE;
8255 gimple_stmt_iterator gsi;
8256 for (gsi = gsi_start_phis (exit_bb); !gsi_end_p (gsi); gsi_next (&gsi))
8257 {
8258 gimple *phi = gsi_stmt (gsi);
8259 if ((gimple_phi_arg_def (phi, 0) == lhs))
8260 {
8261 remove_phi_node (&gsi, false);
8262 lhs_phi = gimple_phi_result (phi);
8263 gimple *copy = gimple_build_assign (lhs_phi, new_tree);
8264 gsi_insert_before (&exit_gsi, copy, GSI_SAME_STMT);
8265 break;
8266 }
8267 }
8268 }
8269
8270 /* Replace use of lhs with newly computed result. If the use stmt is a
8271 single arg PHI, just replace all uses of PHI result. It's necessary
8272 because lcssa PHI defining lhs may be before newly inserted stmt. */
8273 use_operand_p use_p;
8274 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
8275 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
8276 && !is_gimple_debug (use_stmt))
8277 {
8278 if (gimple_code (use_stmt) == GIMPLE_PHI
8279 && gimple_phi_num_args (use_stmt) == 1)
8280 {
8281 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
8282 }
8283 else
8284 {
8285 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
8286 SET_USE (use_p, new_tree);
8287 }
8288 update_stmt (use_stmt);
8289 }
8290
8291 return true;
8292 }
8293
8294 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
8295
8296 static void
8297 vect_loop_kill_debug_uses (class loop *loop, stmt_vec_info stmt_info)
8298 {
8299 ssa_op_iter op_iter;
8300 imm_use_iterator imm_iter;
8301 def_operand_p def_p;
8302 gimple *ustmt;
8303
8304 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
8305 {
8306 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
8307 {
8308 basic_block bb;
8309
8310 if (!is_gimple_debug (ustmt))
8311 continue;
8312
8313 bb = gimple_bb (ustmt);
8314
8315 if (!flow_bb_inside_loop_p (loop, bb))
8316 {
8317 if (gimple_debug_bind_p (ustmt))
8318 {
8319 if (dump_enabled_p ())
8320 dump_printf_loc (MSG_NOTE, vect_location,
8321 "killing debug use\n");
8322
8323 gimple_debug_bind_reset_value (ustmt);
8324 update_stmt (ustmt);
8325 }
8326 else
8327 gcc_unreachable ();
8328 }
8329 }
8330 }
8331 }
8332
8333 /* Given loop represented by LOOP_VINFO, return true if computation of
8334 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8335 otherwise. */
8336
8337 static bool
8338 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8339 {
8340 /* Constant case. */
8341 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8342 {
8343 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8344 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8345
8346 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8347 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8348 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8349 return true;
8350 }
8351
8352 widest_int max;
8353 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8354 /* Check the upper bound of loop niters. */
8355 if (get_max_loop_iterations (loop, &max))
8356 {
8357 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8358 signop sgn = TYPE_SIGN (type);
8359 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8360 if (max < type_max)
8361 return true;
8362 }
8363 return false;
8364 }
8365
8366 /* Return a mask type with half the number of elements as OLD_TYPE,
8367 given that it should have mode NEW_MODE. */
8368
8369 tree
8370 vect_halve_mask_nunits (tree old_type, machine_mode new_mode)
8371 {
8372 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (old_type), 2);
8373 return build_truth_vector_type_for_mode (nunits, new_mode);
8374 }
8375
8376 /* Return a mask type with twice as many elements as OLD_TYPE,
8377 given that it should have mode NEW_MODE. */
8378
8379 tree
8380 vect_double_mask_nunits (tree old_type, machine_mode new_mode)
8381 {
8382 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (old_type) * 2;
8383 return build_truth_vector_type_for_mode (nunits, new_mode);
8384 }
8385
8386 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8387 contain a sequence of NVECTORS masks that each control a vector of type
8388 VECTYPE. If SCALAR_MASK is nonnull, the fully-masked loop would AND
8389 these vector masks with the vector version of SCALAR_MASK. */
8390
8391 void
8392 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8393 unsigned int nvectors, tree vectype, tree scalar_mask)
8394 {
8395 gcc_assert (nvectors != 0);
8396 if (masks->length () < nvectors)
8397 masks->safe_grow_cleared (nvectors, true);
8398 rgroup_controls *rgm = &(*masks)[nvectors - 1];
8399 /* The number of scalars per iteration and the number of vectors are
8400 both compile-time constants. */
8401 unsigned int nscalars_per_iter
8402 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8403 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8404
8405 if (scalar_mask)
8406 {
8407 scalar_cond_masked_key cond (scalar_mask, nvectors);
8408 loop_vinfo->scalar_cond_masked_set.add (cond);
8409 }
8410
8411 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8412 {
8413 rgm->max_nscalars_per_iter = nscalars_per_iter;
8414 rgm->type = truth_type_for (vectype);
8415 rgm->factor = 1;
8416 }
8417 }
8418
8419 /* Given a complete set of masks MASKS, extract mask number INDEX
8420 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8421 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8422
8423 See the comment above vec_loop_masks for more details about the mask
8424 arrangement. */
8425
8426 tree
8427 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8428 unsigned int nvectors, tree vectype, unsigned int index)
8429 {
8430 rgroup_controls *rgm = &(*masks)[nvectors - 1];
8431 tree mask_type = rgm->type;
8432
8433 /* Populate the rgroup's mask array, if this is the first time we've
8434 used it. */
8435 if (rgm->controls.is_empty ())
8436 {
8437 rgm->controls.safe_grow_cleared (nvectors, true);
8438 for (unsigned int i = 0; i < nvectors; ++i)
8439 {
8440 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8441 /* Provide a dummy definition until the real one is available. */
8442 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8443 rgm->controls[i] = mask;
8444 }
8445 }
8446
8447 tree mask = rgm->controls[index];
8448 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8449 TYPE_VECTOR_SUBPARTS (vectype)))
8450 {
8451 /* A loop mask for data type X can be reused for data type Y
8452 if X has N times more elements than Y and if Y's elements
8453 are N times bigger than X's. In this case each sequence
8454 of N elements in the loop mask will be all-zero or all-one.
8455 We can then view-convert the mask so that each sequence of
8456 N elements is replaced by a single element. */
8457 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8458 TYPE_VECTOR_SUBPARTS (vectype)));
8459 gimple_seq seq = NULL;
8460 mask_type = truth_type_for (vectype);
8461 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8462 if (seq)
8463 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8464 }
8465 return mask;
8466 }
8467
8468 /* Record that LOOP_VINFO would need LENS to contain a sequence of NVECTORS
8469 lengths for controlling an operation on VECTYPE. The operation splits
8470 each element of VECTYPE into FACTOR separate subelements, measuring the
8471 length as a number of these subelements. */
8472
8473 void
8474 vect_record_loop_len (loop_vec_info loop_vinfo, vec_loop_lens *lens,
8475 unsigned int nvectors, tree vectype, unsigned int factor)
8476 {
8477 gcc_assert (nvectors != 0);
8478 if (lens->length () < nvectors)
8479 lens->safe_grow_cleared (nvectors, true);
8480 rgroup_controls *rgl = &(*lens)[nvectors - 1];
8481
8482 /* The number of scalars per iteration, scalar occupied bytes and
8483 the number of vectors are both compile-time constants. */
8484 unsigned int nscalars_per_iter
8485 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8486 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8487
8488 if (rgl->max_nscalars_per_iter < nscalars_per_iter)
8489 {
8490 /* For now, we only support cases in which all loads and stores fall back
8491 to VnQI or none do. */
8492 gcc_assert (!rgl->max_nscalars_per_iter
8493 || (rgl->factor == 1 && factor == 1)
8494 || (rgl->max_nscalars_per_iter * rgl->factor
8495 == nscalars_per_iter * factor));
8496 rgl->max_nscalars_per_iter = nscalars_per_iter;
8497 rgl->type = vectype;
8498 rgl->factor = factor;
8499 }
8500 }
8501
8502 /* Given a complete set of length LENS, extract length number INDEX for an
8503 rgroup that operates on NVECTORS vectors, where 0 <= INDEX < NVECTORS. */
8504
8505 tree
8506 vect_get_loop_len (loop_vec_info loop_vinfo, vec_loop_lens *lens,
8507 unsigned int nvectors, unsigned int index)
8508 {
8509 rgroup_controls *rgl = &(*lens)[nvectors - 1];
8510
8511 /* Populate the rgroup's len array, if this is the first time we've
8512 used it. */
8513 if (rgl->controls.is_empty ())
8514 {
8515 rgl->controls.safe_grow_cleared (nvectors, true);
8516 for (unsigned int i = 0; i < nvectors; ++i)
8517 {
8518 tree len_type = LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo);
8519 gcc_assert (len_type != NULL_TREE);
8520 tree len = make_temp_ssa_name (len_type, NULL, "loop_len");
8521
8522 /* Provide a dummy definition until the real one is available. */
8523 SSA_NAME_DEF_STMT (len) = gimple_build_nop ();
8524 rgl->controls[i] = len;
8525 }
8526 }
8527
8528 return rgl->controls[index];
8529 }
8530
8531 /* Scale profiling counters by estimation for LOOP which is vectorized
8532 by factor VF. */
8533
8534 static void
8535 scale_profile_for_vect_loop (class loop *loop, unsigned vf)
8536 {
8537 edge preheader = loop_preheader_edge (loop);
8538 /* Reduce loop iterations by the vectorization factor. */
8539 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8540 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8541
8542 if (freq_h.nonzero_p ())
8543 {
8544 profile_probability p;
8545
8546 /* Avoid dropping loop body profile counter to 0 because of zero count
8547 in loop's preheader. */
8548 if (!(freq_e == profile_count::zero ()))
8549 freq_e = freq_e.force_nonzero ();
8550 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8551 scale_loop_frequencies (loop, p);
8552 }
8553
8554 edge exit_e = single_exit (loop);
8555 exit_e->probability = profile_probability::always ()
8556 .apply_scale (1, new_est_niter + 1);
8557
8558 edge exit_l = single_pred_edge (loop->latch);
8559 profile_probability prob = exit_l->probability;
8560 exit_l->probability = exit_e->probability.invert ();
8561 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8562 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8563 }
8564
8565 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
8566 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its
8567 stmt_vec_info. */
8568
8569 static void
8570 vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
8571 gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
8572 {
8573 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8574 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8575
8576 if (dump_enabled_p ())
8577 dump_printf_loc (MSG_NOTE, vect_location,
8578 "------>vectorizing statement: %G", stmt_info->stmt);
8579
8580 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8581 vect_loop_kill_debug_uses (loop, stmt_info);
8582
8583 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8584 && !STMT_VINFO_LIVE_P (stmt_info))
8585 return;
8586
8587 if (STMT_VINFO_VECTYPE (stmt_info))
8588 {
8589 poly_uint64 nunits
8590 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8591 if (!STMT_SLP_TYPE (stmt_info)
8592 && maybe_ne (nunits, vf)
8593 && dump_enabled_p ())
8594 /* For SLP VF is set according to unrolling factor, and not
8595 to vector size, hence for SLP this print is not valid. */
8596 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8597 }
8598
8599 /* Pure SLP statements have already been vectorized. We still need
8600 to apply loop vectorization to hybrid SLP statements. */
8601 if (PURE_SLP_STMT (stmt_info))
8602 return;
8603
8604 if (dump_enabled_p ())
8605 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8606
8607 if (vect_transform_stmt (loop_vinfo, stmt_info, gsi, NULL, NULL))
8608 *seen_store = stmt_info;
8609 }
8610
8611 /* Helper function to pass to simplify_replace_tree to enable replacing tree's
8612 in the hash_map with its corresponding values. */
8613
8614 static tree
8615 find_in_mapping (tree t, void *context)
8616 {
8617 hash_map<tree,tree>* mapping = (hash_map<tree, tree>*) context;
8618
8619 tree *value = mapping->get (t);
8620 return value ? *value : t;
8621 }
8622
8623 /* Update EPILOGUE's loop_vec_info. EPILOGUE was constructed as a copy of the
8624 original loop that has now been vectorized.
8625
8626 The inits of the data_references need to be advanced with the number of
8627 iterations of the main loop. This has been computed in vect_do_peeling and
8628 is stored in parameter ADVANCE. We first restore the data_references
8629 initial offset with the values recored in ORIG_DRS_INIT.
8630
8631 Since the loop_vec_info of this EPILOGUE was constructed for the original
8632 loop, its stmt_vec_infos all point to the original statements. These need
8633 to be updated to point to their corresponding copies as well as the SSA_NAMES
8634 in their PATTERN_DEF_SEQs and RELATED_STMTs.
8635
8636 The data_reference's connections also need to be updated. Their
8637 corresponding dr_vec_info need to be reconnected to the EPILOGUE's
8638 stmt_vec_infos, their statements need to point to their corresponding copy,
8639 if they are gather loads or scatter stores then their reference needs to be
8640 updated to point to its corresponding copy and finally we set
8641 'base_misaligned' to false as we have already peeled for alignment in the
8642 prologue of the main loop. */
8643
8644 static void
8645 update_epilogue_loop_vinfo (class loop *epilogue, tree advance)
8646 {
8647 loop_vec_info epilogue_vinfo = loop_vec_info_for_loop (epilogue);
8648 auto_vec<gimple *> stmt_worklist;
8649 hash_map<tree,tree> mapping;
8650 gimple *orig_stmt, *new_stmt;
8651 gimple_stmt_iterator epilogue_gsi;
8652 gphi_iterator epilogue_phi_gsi;
8653 stmt_vec_info stmt_vinfo = NULL, related_vinfo;
8654 basic_block *epilogue_bbs = get_loop_body (epilogue);
8655 unsigned i;
8656
8657 LOOP_VINFO_BBS (epilogue_vinfo) = epilogue_bbs;
8658
8659 /* Advance data_reference's with the number of iterations of the previous
8660 loop and its prologue. */
8661 vect_update_inits_of_drs (epilogue_vinfo, advance, PLUS_EXPR);
8662
8663
8664 /* The EPILOGUE loop is a copy of the original loop so they share the same
8665 gimple UIDs. In this loop we update the loop_vec_info of the EPILOGUE to
8666 point to the copied statements. We also create a mapping of all LHS' in
8667 the original loop and all the LHS' in the EPILOGUE and create worklists to
8668 update teh STMT_VINFO_PATTERN_DEF_SEQs and STMT_VINFO_RELATED_STMTs. */
8669 for (unsigned i = 0; i < epilogue->num_nodes; ++i)
8670 {
8671 for (epilogue_phi_gsi = gsi_start_phis (epilogue_bbs[i]);
8672 !gsi_end_p (epilogue_phi_gsi); gsi_next (&epilogue_phi_gsi))
8673 {
8674 new_stmt = epilogue_phi_gsi.phi ();
8675
8676 gcc_assert (gimple_uid (new_stmt) > 0);
8677 stmt_vinfo
8678 = epilogue_vinfo->stmt_vec_infos[gimple_uid (new_stmt) - 1];
8679
8680 orig_stmt = STMT_VINFO_STMT (stmt_vinfo);
8681 STMT_VINFO_STMT (stmt_vinfo) = new_stmt;
8682
8683 mapping.put (gimple_phi_result (orig_stmt),
8684 gimple_phi_result (new_stmt));
8685 /* PHI nodes can not have patterns or related statements. */
8686 gcc_assert (STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo) == NULL
8687 && STMT_VINFO_RELATED_STMT (stmt_vinfo) == NULL);
8688 }
8689
8690 for (epilogue_gsi = gsi_start_bb (epilogue_bbs[i]);
8691 !gsi_end_p (epilogue_gsi); gsi_next (&epilogue_gsi))
8692 {
8693 new_stmt = gsi_stmt (epilogue_gsi);
8694 if (is_gimple_debug (new_stmt))
8695 continue;
8696
8697 gcc_assert (gimple_uid (new_stmt) > 0);
8698 stmt_vinfo
8699 = epilogue_vinfo->stmt_vec_infos[gimple_uid (new_stmt) - 1];
8700
8701 orig_stmt = STMT_VINFO_STMT (stmt_vinfo);
8702 STMT_VINFO_STMT (stmt_vinfo) = new_stmt;
8703
8704 if (tree old_lhs = gimple_get_lhs (orig_stmt))
8705 mapping.put (old_lhs, gimple_get_lhs (new_stmt));
8706
8707 if (STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo))
8708 {
8709 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo);
8710 for (gimple_stmt_iterator gsi = gsi_start (seq);
8711 !gsi_end_p (gsi); gsi_next (&gsi))
8712 stmt_worklist.safe_push (gsi_stmt (gsi));
8713 }
8714
8715 related_vinfo = STMT_VINFO_RELATED_STMT (stmt_vinfo);
8716 if (related_vinfo != NULL && related_vinfo != stmt_vinfo)
8717 {
8718 gimple *stmt = STMT_VINFO_STMT (related_vinfo);
8719 stmt_worklist.safe_push (stmt);
8720 /* Set BB such that the assert in
8721 'get_initial_def_for_reduction' is able to determine that
8722 the BB of the related stmt is inside this loop. */
8723 gimple_set_bb (stmt,
8724 gimple_bb (new_stmt));
8725 related_vinfo = STMT_VINFO_RELATED_STMT (related_vinfo);
8726 gcc_assert (related_vinfo == NULL
8727 || related_vinfo == stmt_vinfo);
8728 }
8729 }
8730 }
8731
8732 /* The PATTERN_DEF_SEQs and RELATED_STMTs in the epilogue were constructed
8733 using the original main loop and thus need to be updated to refer to the
8734 cloned variables used in the epilogue. */
8735 for (unsigned i = 0; i < stmt_worklist.length (); ++i)
8736 {
8737 gimple *stmt = stmt_worklist[i];
8738 tree *new_op;
8739
8740 for (unsigned j = 1; j < gimple_num_ops (stmt); ++j)
8741 {
8742 tree op = gimple_op (stmt, j);
8743 if ((new_op = mapping.get(op)))
8744 gimple_set_op (stmt, j, *new_op);
8745 else
8746 {
8747 /* PR92429: The last argument of simplify_replace_tree disables
8748 folding when replacing arguments. This is required as
8749 otherwise you might end up with different statements than the
8750 ones analyzed in vect_loop_analyze, leading to different
8751 vectorization. */
8752 op = simplify_replace_tree (op, NULL_TREE, NULL_TREE,
8753 &find_in_mapping, &mapping, false);
8754 gimple_set_op (stmt, j, op);
8755 }
8756 }
8757 }
8758
8759 struct data_reference *dr;
8760 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (epilogue_vinfo);
8761 FOR_EACH_VEC_ELT (datarefs, i, dr)
8762 {
8763 orig_stmt = DR_STMT (dr);
8764 gcc_assert (gimple_uid (orig_stmt) > 0);
8765 stmt_vinfo = epilogue_vinfo->stmt_vec_infos[gimple_uid (orig_stmt) - 1];
8766 /* Data references for gather loads and scatter stores do not use the
8767 updated offset we set using ADVANCE. Instead we have to make sure the
8768 reference in the data references point to the corresponding copy of
8769 the original in the epilogue. */
8770 if (STMT_VINFO_MEMORY_ACCESS_TYPE (vect_stmt_to_vectorize (stmt_vinfo))
8771 == VMAT_GATHER_SCATTER)
8772 {
8773 DR_REF (dr)
8774 = simplify_replace_tree (DR_REF (dr), NULL_TREE, NULL_TREE,
8775 &find_in_mapping, &mapping);
8776 DR_BASE_ADDRESS (dr)
8777 = simplify_replace_tree (DR_BASE_ADDRESS (dr), NULL_TREE, NULL_TREE,
8778 &find_in_mapping, &mapping);
8779 }
8780 DR_STMT (dr) = STMT_VINFO_STMT (stmt_vinfo);
8781 stmt_vinfo->dr_aux.stmt = stmt_vinfo;
8782 /* The vector size of the epilogue is smaller than that of the main loop
8783 so the alignment is either the same or lower. This means the dr will
8784 thus by definition be aligned. */
8785 STMT_VINFO_DR_INFO (stmt_vinfo)->base_misaligned = false;
8786 }
8787
8788 epilogue_vinfo->shared->datarefs_copy.release ();
8789 epilogue_vinfo->shared->save_datarefs ();
8790 }
8791
8792 /* Function vect_transform_loop.
8793
8794 The analysis phase has determined that the loop is vectorizable.
8795 Vectorize the loop - created vectorized stmts to replace the scalar
8796 stmts in the loop, and update the loop exit condition.
8797 Returns scalar epilogue loop if any. */
8798
8799 class loop *
8800 vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
8801 {
8802 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8803 class loop *epilogue = NULL;
8804 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8805 int nbbs = loop->num_nodes;
8806 int i;
8807 tree niters_vector = NULL_TREE;
8808 tree step_vector = NULL_TREE;
8809 tree niters_vector_mult_vf = NULL_TREE;
8810 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8811 unsigned int lowest_vf = constant_lower_bound (vf);
8812 gimple *stmt;
8813 bool check_profitability = false;
8814 unsigned int th;
8815
8816 DUMP_VECT_SCOPE ("vec_transform_loop");
8817
8818 loop_vinfo->shared->check_datarefs ();
8819
8820 /* Use the more conservative vectorization threshold. If the number
8821 of iterations is constant assume the cost check has been performed
8822 by our caller. If the threshold makes all loops profitable that
8823 run at least the (estimated) vectorization factor number of times
8824 checking is pointless, too. */
8825 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8826 if (vect_apply_runtime_profitability_check_p (loop_vinfo))
8827 {
8828 if (dump_enabled_p ())
8829 dump_printf_loc (MSG_NOTE, vect_location,
8830 "Profitability threshold is %d loop iterations.\n",
8831 th);
8832 check_profitability = true;
8833 }
8834
8835 /* Make sure there exists a single-predecessor exit bb. Do this before
8836 versioning. */
8837 edge e = single_exit (loop);
8838 if (! single_pred_p (e->dest))
8839 {
8840 split_loop_exit_edge (e, true);
8841 if (dump_enabled_p ())
8842 dump_printf (MSG_NOTE, "split exit edge\n");
8843 }
8844
8845 /* Version the loop first, if required, so the profitability check
8846 comes first. */
8847
8848 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8849 {
8850 class loop *sloop
8851 = vect_loop_versioning (loop_vinfo, loop_vectorized_call);
8852 sloop->force_vectorize = false;
8853 check_profitability = false;
8854 }
8855
8856 /* Make sure there exists a single-predecessor exit bb also on the
8857 scalar loop copy. Do this after versioning but before peeling
8858 so CFG structure is fine for both scalar and if-converted loop
8859 to make slpeel_duplicate_current_defs_from_edges face matched
8860 loop closed PHI nodes on the exit. */
8861 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8862 {
8863 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8864 if (! single_pred_p (e->dest))
8865 {
8866 split_loop_exit_edge (e, true);
8867 if (dump_enabled_p ())
8868 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8869 }
8870 }
8871
8872 tree niters = vect_build_loop_niters (loop_vinfo);
8873 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8874 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8875 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8876 tree advance;
8877 drs_init_vec orig_drs_init;
8878
8879 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8880 &step_vector, &niters_vector_mult_vf, th,
8881 check_profitability, niters_no_overflow,
8882 &advance);
8883
8884 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)
8885 && LOOP_VINFO_SCALAR_LOOP_SCALING (loop_vinfo).initialized_p ())
8886 scale_loop_frequencies (LOOP_VINFO_SCALAR_LOOP (loop_vinfo),
8887 LOOP_VINFO_SCALAR_LOOP_SCALING (loop_vinfo));
8888
8889 if (niters_vector == NULL_TREE)
8890 {
8891 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8892 && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)
8893 && known_eq (lowest_vf, vf))
8894 {
8895 niters_vector
8896 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8897 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8898 step_vector = build_one_cst (TREE_TYPE (niters));
8899 }
8900 else if (vect_use_loop_mask_for_alignment_p (loop_vinfo))
8901 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8902 &step_vector, niters_no_overflow);
8903 else
8904 /* vect_do_peeling subtracted the number of peeled prologue
8905 iterations from LOOP_VINFO_NITERS. */
8906 vect_gen_vector_loop_niters (loop_vinfo, LOOP_VINFO_NITERS (loop_vinfo),
8907 &niters_vector, &step_vector,
8908 niters_no_overflow);
8909 }
8910
8911 /* 1) Make sure the loop header has exactly two entries
8912 2) Make sure we have a preheader basic block. */
8913
8914 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8915
8916 split_edge (loop_preheader_edge (loop));
8917
8918 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8919 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8920 /* This will deal with any possible peeling. */
8921 vect_prepare_for_masked_peels (loop_vinfo);
8922
8923 /* Schedule the SLP instances first, then handle loop vectorization
8924 below. */
8925 if (!loop_vinfo->slp_instances.is_empty ())
8926 {
8927 DUMP_VECT_SCOPE ("scheduling SLP instances");
8928 vect_schedule_slp (loop_vinfo);
8929 }
8930
8931 /* FORNOW: the vectorizer supports only loops which body consist
8932 of one basic block (header + empty latch). When the vectorizer will
8933 support more involved loop forms, the order by which the BBs are
8934 traversed need to be reconsidered. */
8935
8936 for (i = 0; i < nbbs; i++)
8937 {
8938 basic_block bb = bbs[i];
8939 stmt_vec_info stmt_info;
8940
8941 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8942 gsi_next (&si))
8943 {
8944 gphi *phi = si.phi ();
8945 if (dump_enabled_p ())
8946 dump_printf_loc (MSG_NOTE, vect_location,
8947 "------>vectorizing phi: %G", phi);
8948 stmt_info = loop_vinfo->lookup_stmt (phi);
8949 if (!stmt_info)
8950 continue;
8951
8952 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8953 vect_loop_kill_debug_uses (loop, stmt_info);
8954
8955 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8956 && !STMT_VINFO_LIVE_P (stmt_info))
8957 continue;
8958
8959 if (STMT_VINFO_VECTYPE (stmt_info)
8960 && (maybe_ne
8961 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8962 && dump_enabled_p ())
8963 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8964
8965 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8966 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8967 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def
8968 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8969 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def)
8970 && ! PURE_SLP_STMT (stmt_info))
8971 {
8972 if (dump_enabled_p ())
8973 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8974 vect_transform_stmt (loop_vinfo, stmt_info, NULL, NULL, NULL);
8975 }
8976 }
8977
8978 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8979 !gsi_end_p (si);)
8980 {
8981 stmt = gsi_stmt (si);
8982 /* During vectorization remove existing clobber stmts. */
8983 if (gimple_clobber_p (stmt))
8984 {
8985 unlink_stmt_vdef (stmt);
8986 gsi_remove (&si, true);
8987 release_defs (stmt);
8988 }
8989 else
8990 {
8991 /* Ignore vector stmts created in the outer loop. */
8992 stmt_info = loop_vinfo->lookup_stmt (stmt);
8993
8994 /* vector stmts created in the outer-loop during vectorization of
8995 stmts in an inner-loop may not have a stmt_info, and do not
8996 need to be vectorized. */
8997 stmt_vec_info seen_store = NULL;
8998 if (stmt_info)
8999 {
9000 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9001 {
9002 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
9003 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
9004 !gsi_end_p (subsi); gsi_next (&subsi))
9005 {
9006 stmt_vec_info pat_stmt_info
9007 = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
9008 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
9009 &si, &seen_store);
9010 }
9011 stmt_vec_info pat_stmt_info
9012 = STMT_VINFO_RELATED_STMT (stmt_info);
9013 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info, &si,
9014 &seen_store);
9015 }
9016 vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
9017 &seen_store);
9018 }
9019 gsi_next (&si);
9020 if (seen_store)
9021 {
9022 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
9023 /* Interleaving. If IS_STORE is TRUE, the
9024 vectorization of the interleaving chain was
9025 completed - free all the stores in the chain. */
9026 vect_remove_stores (loop_vinfo,
9027 DR_GROUP_FIRST_ELEMENT (seen_store));
9028 else
9029 /* Free the attached stmt_vec_info and remove the stmt. */
9030 loop_vinfo->remove_stmt (stmt_info);
9031 }
9032 }
9033 }
9034
9035 /* Fill in backedge defs of reductions. */
9036 for (unsigned i = 0; i < loop_vinfo->reduc_latch_defs.length (); ++i)
9037 {
9038 stmt_vec_info stmt_info = loop_vinfo->reduc_latch_defs[i];
9039 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
9040 vec<gimple *> &phi_info
9041 = STMT_VINFO_VEC_STMTS (STMT_VINFO_REDUC_DEF (orig_stmt_info));
9042 vec<gimple *> &vec_stmt
9043 = STMT_VINFO_VEC_STMTS (stmt_info);
9044 gcc_assert (phi_info.length () == vec_stmt.length ());
9045 gphi *phi
9046 = dyn_cast <gphi *> (STMT_VINFO_REDUC_DEF (orig_stmt_info)->stmt);
9047 edge e = loop_latch_edge (gimple_bb (phi_info[0])->loop_father);
9048 for (unsigned j = 0; j < phi_info.length (); ++j)
9049 add_phi_arg (as_a <gphi *> (phi_info[j]),
9050 gimple_get_lhs (vec_stmt[j]), e,
9051 gimple_phi_arg_location (phi, e->dest_idx));
9052 }
9053 for (unsigned i = 0; i < loop_vinfo->reduc_latch_slp_defs.length (); ++i)
9054 {
9055 slp_tree slp_node = loop_vinfo->reduc_latch_slp_defs[i].first;
9056 slp_tree phi_node = loop_vinfo->reduc_latch_slp_defs[i].second;
9057 gphi *phi = as_a <gphi *> (SLP_TREE_SCALAR_STMTS (phi_node)[0]->stmt);
9058 e = loop_latch_edge (gimple_bb (phi)->loop_father);
9059 gcc_assert (SLP_TREE_VEC_STMTS (phi_node).length ()
9060 == SLP_TREE_VEC_STMTS (slp_node).length ());
9061 for (unsigned j = 0; j < SLP_TREE_VEC_STMTS (phi_node).length (); ++j)
9062 add_phi_arg (as_a <gphi *> (SLP_TREE_VEC_STMTS (phi_node)[j]),
9063 vect_get_slp_vect_def (slp_node, j),
9064 e, gimple_phi_arg_location (phi, e->dest_idx));
9065 }
9066
9067 /* Stub out scalar statements that must not survive vectorization.
9068 Doing this here helps with grouped statements, or statements that
9069 are involved in patterns. */
9070 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
9071 !gsi_end_p (gsi); gsi_next (&gsi))
9072 {
9073 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
9074 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
9075 {
9076 tree lhs = gimple_get_lhs (call);
9077 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
9078 {
9079 tree zero = build_zero_cst (TREE_TYPE (lhs));
9080 gimple *new_stmt = gimple_build_assign (lhs, zero);
9081 gsi_replace (&gsi, new_stmt, true);
9082 }
9083 }
9084 }
9085 } /* BBs in loop */
9086
9087 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
9088 a zero NITERS becomes a nonzero NITERS_VECTOR. */
9089 if (integer_onep (step_vector))
9090 niters_no_overflow = true;
9091 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
9092 niters_vector_mult_vf, !niters_no_overflow);
9093
9094 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
9095 scale_profile_for_vect_loop (loop, assumed_vf);
9096
9097 /* True if the final iteration might not handle a full vector's
9098 worth of scalar iterations. */
9099 bool final_iter_may_be_partial
9100 = LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo);
9101 /* The minimum number of iterations performed by the epilogue. This
9102 is 1 when peeling for gaps because we always need a final scalar
9103 iteration. */
9104 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
9105 /* +1 to convert latch counts to loop iteration counts,
9106 -min_epilogue_iters to remove iterations that cannot be performed
9107 by the vector code. */
9108 int bias_for_lowest = 1 - min_epilogue_iters;
9109 int bias_for_assumed = bias_for_lowest;
9110 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
9111 if (alignment_npeels && LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
9112 {
9113 /* When the amount of peeling is known at compile time, the first
9114 iteration will have exactly alignment_npeels active elements.
9115 In the worst case it will have at least one. */
9116 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
9117 bias_for_lowest += lowest_vf - min_first_active;
9118 bias_for_assumed += assumed_vf - min_first_active;
9119 }
9120 /* In these calculations the "- 1" converts loop iteration counts
9121 back to latch counts. */
9122 if (loop->any_upper_bound)
9123 loop->nb_iterations_upper_bound
9124 = (final_iter_may_be_partial
9125 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
9126 lowest_vf) - 1
9127 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
9128 lowest_vf) - 1);
9129 if (loop->any_likely_upper_bound)
9130 loop->nb_iterations_likely_upper_bound
9131 = (final_iter_may_be_partial
9132 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
9133 + bias_for_lowest, lowest_vf) - 1
9134 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
9135 + bias_for_lowest, lowest_vf) - 1);
9136 if (loop->any_estimate)
9137 loop->nb_iterations_estimate
9138 = (final_iter_may_be_partial
9139 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
9140 assumed_vf) - 1
9141 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
9142 assumed_vf) - 1);
9143
9144 if (dump_enabled_p ())
9145 {
9146 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
9147 {
9148 dump_printf_loc (MSG_NOTE, vect_location,
9149 "LOOP VECTORIZED\n");
9150 if (loop->inner)
9151 dump_printf_loc (MSG_NOTE, vect_location,
9152 "OUTER LOOP VECTORIZED\n");
9153 dump_printf (MSG_NOTE, "\n");
9154 }
9155 else
9156 dump_printf_loc (MSG_NOTE, vect_location,
9157 "LOOP EPILOGUE VECTORIZED (MODE=%s)\n",
9158 GET_MODE_NAME (loop_vinfo->vector_mode));
9159 }
9160
9161 /* Loops vectorized with a variable factor won't benefit from
9162 unrolling/peeling. */
9163 if (!vf.is_constant ())
9164 {
9165 loop->unroll = 1;
9166 if (dump_enabled_p ())
9167 dump_printf_loc (MSG_NOTE, vect_location, "Disabling unrolling due to"
9168 " variable-length vectorization factor\n");
9169 }
9170 /* Free SLP instances here because otherwise stmt reference counting
9171 won't work. */
9172 slp_instance instance;
9173 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
9174 vect_free_slp_instance (instance, true);
9175 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
9176 /* Clear-up safelen field since its value is invalid after vectorization
9177 since vectorized loop can have loop-carried dependencies. */
9178 loop->safelen = 0;
9179
9180 if (epilogue)
9181 {
9182 update_epilogue_loop_vinfo (epilogue, advance);
9183
9184 epilogue->simduid = loop->simduid;
9185 epilogue->force_vectorize = loop->force_vectorize;
9186 epilogue->dont_vectorize = false;
9187 }
9188
9189 return epilogue;
9190 }
9191
9192 /* The code below is trying to perform simple optimization - revert
9193 if-conversion for masked stores, i.e. if the mask of a store is zero
9194 do not perform it and all stored value producers also if possible.
9195 For example,
9196 for (i=0; i<n; i++)
9197 if (c[i])
9198 {
9199 p1[i] += 1;
9200 p2[i] = p3[i] +2;
9201 }
9202 this transformation will produce the following semi-hammock:
9203
9204 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
9205 {
9206 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
9207 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
9208 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
9209 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
9210 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
9211 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
9212 }
9213 */
9214
9215 void
9216 optimize_mask_stores (class loop *loop)
9217 {
9218 basic_block *bbs = get_loop_body (loop);
9219 unsigned nbbs = loop->num_nodes;
9220 unsigned i;
9221 basic_block bb;
9222 class loop *bb_loop;
9223 gimple_stmt_iterator gsi;
9224 gimple *stmt;
9225 auto_vec<gimple *> worklist;
9226 auto_purge_vect_location sentinel;
9227
9228 vect_location = find_loop_location (loop);
9229 /* Pick up all masked stores in loop if any. */
9230 for (i = 0; i < nbbs; i++)
9231 {
9232 bb = bbs[i];
9233 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
9234 gsi_next (&gsi))
9235 {
9236 stmt = gsi_stmt (gsi);
9237 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
9238 worklist.safe_push (stmt);
9239 }
9240 }
9241
9242 free (bbs);
9243 if (worklist.is_empty ())
9244 return;
9245
9246 /* Loop has masked stores. */
9247 while (!worklist.is_empty ())
9248 {
9249 gimple *last, *last_store;
9250 edge e, efalse;
9251 tree mask;
9252 basic_block store_bb, join_bb;
9253 gimple_stmt_iterator gsi_to;
9254 tree vdef, new_vdef;
9255 gphi *phi;
9256 tree vectype;
9257 tree zero;
9258
9259 last = worklist.pop ();
9260 mask = gimple_call_arg (last, 2);
9261 bb = gimple_bb (last);
9262 /* Create then_bb and if-then structure in CFG, then_bb belongs to
9263 the same loop as if_bb. It could be different to LOOP when two
9264 level loop-nest is vectorized and mask_store belongs to the inner
9265 one. */
9266 e = split_block (bb, last);
9267 bb_loop = bb->loop_father;
9268 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
9269 join_bb = e->dest;
9270 store_bb = create_empty_bb (bb);
9271 add_bb_to_loop (store_bb, bb_loop);
9272 e->flags = EDGE_TRUE_VALUE;
9273 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
9274 /* Put STORE_BB to likely part. */
9275 efalse->probability = profile_probability::unlikely ();
9276 store_bb->count = efalse->count ();
9277 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
9278 if (dom_info_available_p (CDI_DOMINATORS))
9279 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
9280 if (dump_enabled_p ())
9281 dump_printf_loc (MSG_NOTE, vect_location,
9282 "Create new block %d to sink mask stores.",
9283 store_bb->index);
9284 /* Create vector comparison with boolean result. */
9285 vectype = TREE_TYPE (mask);
9286 zero = build_zero_cst (vectype);
9287 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
9288 gsi = gsi_last_bb (bb);
9289 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
9290 /* Create new PHI node for vdef of the last masked store:
9291 .MEM_2 = VDEF <.MEM_1>
9292 will be converted to
9293 .MEM.3 = VDEF <.MEM_1>
9294 and new PHI node will be created in join bb
9295 .MEM_2 = PHI <.MEM_1, .MEM_3>
9296 */
9297 vdef = gimple_vdef (last);
9298 new_vdef = make_ssa_name (gimple_vop (cfun), last);
9299 gimple_set_vdef (last, new_vdef);
9300 phi = create_phi_node (vdef, join_bb);
9301 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
9302
9303 /* Put all masked stores with the same mask to STORE_BB if possible. */
9304 while (true)
9305 {
9306 gimple_stmt_iterator gsi_from;
9307 gimple *stmt1 = NULL;
9308
9309 /* Move masked store to STORE_BB. */
9310 last_store = last;
9311 gsi = gsi_for_stmt (last);
9312 gsi_from = gsi;
9313 /* Shift GSI to the previous stmt for further traversal. */
9314 gsi_prev (&gsi);
9315 gsi_to = gsi_start_bb (store_bb);
9316 gsi_move_before (&gsi_from, &gsi_to);
9317 /* Setup GSI_TO to the non-empty block start. */
9318 gsi_to = gsi_start_bb (store_bb);
9319 if (dump_enabled_p ())
9320 dump_printf_loc (MSG_NOTE, vect_location,
9321 "Move stmt to created bb\n%G", last);
9322 /* Move all stored value producers if possible. */
9323 while (!gsi_end_p (gsi))
9324 {
9325 tree lhs;
9326 imm_use_iterator imm_iter;
9327 use_operand_p use_p;
9328 bool res;
9329
9330 /* Skip debug statements. */
9331 if (is_gimple_debug (gsi_stmt (gsi)))
9332 {
9333 gsi_prev (&gsi);
9334 continue;
9335 }
9336 stmt1 = gsi_stmt (gsi);
9337 /* Do not consider statements writing to memory or having
9338 volatile operand. */
9339 if (gimple_vdef (stmt1)
9340 || gimple_has_volatile_ops (stmt1))
9341 break;
9342 gsi_from = gsi;
9343 gsi_prev (&gsi);
9344 lhs = gimple_get_lhs (stmt1);
9345 if (!lhs)
9346 break;
9347
9348 /* LHS of vectorized stmt must be SSA_NAME. */
9349 if (TREE_CODE (lhs) != SSA_NAME)
9350 break;
9351
9352 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
9353 {
9354 /* Remove dead scalar statement. */
9355 if (has_zero_uses (lhs))
9356 {
9357 gsi_remove (&gsi_from, true);
9358 continue;
9359 }
9360 }
9361
9362 /* Check that LHS does not have uses outside of STORE_BB. */
9363 res = true;
9364 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
9365 {
9366 gimple *use_stmt;
9367 use_stmt = USE_STMT (use_p);
9368 if (is_gimple_debug (use_stmt))
9369 continue;
9370 if (gimple_bb (use_stmt) != store_bb)
9371 {
9372 res = false;
9373 break;
9374 }
9375 }
9376 if (!res)
9377 break;
9378
9379 if (gimple_vuse (stmt1)
9380 && gimple_vuse (stmt1) != gimple_vuse (last_store))
9381 break;
9382
9383 /* Can move STMT1 to STORE_BB. */
9384 if (dump_enabled_p ())
9385 dump_printf_loc (MSG_NOTE, vect_location,
9386 "Move stmt to created bb\n%G", stmt1);
9387 gsi_move_before (&gsi_from, &gsi_to);
9388 /* Shift GSI_TO for further insertion. */
9389 gsi_prev (&gsi_to);
9390 }
9391 /* Put other masked stores with the same mask to STORE_BB. */
9392 if (worklist.is_empty ()
9393 || gimple_call_arg (worklist.last (), 2) != mask
9394 || worklist.last () != stmt1)
9395 break;
9396 last = worklist.pop ();
9397 }
9398 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);
9399 }
9400 }
9401
9402 /* Decide whether it is possible to use a zero-based induction variable
9403 when vectorizing LOOP_VINFO with partial vectors. If it is, return
9404 the value that the induction variable must be able to hold in order
9405 to ensure that the rgroups eventually have no active vector elements.
9406 Return -1 otherwise. */
9407
9408 widest_int
9409 vect_iv_limit_for_partial_vectors (loop_vec_info loop_vinfo)
9410 {
9411 tree niters_skip = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
9412 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
9413 unsigned HOST_WIDE_INT max_vf = vect_max_vf (loop_vinfo);
9414
9415 /* Calculate the value that the induction variable must be able
9416 to hit in order to ensure that we end the loop with an all-false mask.
9417 This involves adding the maximum number of inactive trailing scalar
9418 iterations. */
9419 widest_int iv_limit = -1;
9420 if (max_loop_iterations (loop, &iv_limit))
9421 {
9422 if (niters_skip)
9423 {
9424 /* Add the maximum number of skipped iterations to the
9425 maximum iteration count. */
9426 if (TREE_CODE (niters_skip) == INTEGER_CST)
9427 iv_limit += wi::to_widest (niters_skip);
9428 else
9429 iv_limit += max_vf - 1;
9430 }
9431 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
9432 /* Make a conservatively-correct assumption. */
9433 iv_limit += max_vf - 1;
9434
9435 /* IV_LIMIT is the maximum number of latch iterations, which is also
9436 the maximum in-range IV value. Round this value down to the previous
9437 vector alignment boundary and then add an extra full iteration. */
9438 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
9439 iv_limit = (iv_limit & -(int) known_alignment (vf)) + max_vf;
9440 }
9441 return iv_limit;
9442 }
9443
9444 /* For the given rgroup_controls RGC, check whether an induction variable
9445 would ever hit a value that produces a set of all-false masks or zero
9446 lengths before wrapping around. Return true if it's possible to wrap
9447 around before hitting the desirable value, otherwise return false. */
9448
9449 bool
9450 vect_rgroup_iv_might_wrap_p (loop_vec_info loop_vinfo, rgroup_controls *rgc)
9451 {
9452 widest_int iv_limit = vect_iv_limit_for_partial_vectors (loop_vinfo);
9453
9454 if (iv_limit == -1)
9455 return true;
9456
9457 tree compare_type = LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo);
9458 unsigned int compare_precision = TYPE_PRECISION (compare_type);
9459 unsigned nitems = rgc->max_nscalars_per_iter * rgc->factor;
9460
9461 if (wi::min_precision (iv_limit * nitems, UNSIGNED) > compare_precision)
9462 return true;
9463
9464 return false;
9465 }