]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vect-loop.c
[38/46] Use dr_vec_info to represent a data reference
[thirdparty/gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
56 #include "tree-eh.h"
57
58 /* Loop Vectorization Pass.
59
60 This pass tries to vectorize loops.
61
62 For example, the vectorizer transforms the following simple loop:
63
64 short a[N]; short b[N]; short c[N]; int i;
65
66 for (i=0; i<N; i++){
67 a[i] = b[i] + c[i];
68 }
69
70 as if it was manually vectorized by rewriting the source code into:
71
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 v8hi va, vb, vc;
76
77 for (i=0; i<N/8; i++){
78 vb = pb[i];
79 vc = pc[i];
80 va = vb + vc;
81 pa[i] = va;
82 }
83
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
95
96 Analysis phase:
97 ===============
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
101
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
106
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
117
118 For example, say stmt S1 was vectorized into stmt VS1:
119
120 VS1: vb = px[i];
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 S2: a = b;
123
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
128
129 VS1: vb = px[i];
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
131 VS2: va = vb;
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
133
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
136
137 Target modeling:
138 =================
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
144
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
151
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
154 */
155
156 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
157
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
161
162 static bool
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
164 bool vectype_maybe_set_p,
165 poly_uint64 *vf,
166 vec<stmt_vec_info > *mask_producers)
167 {
168 gimple *stmt = stmt_info->stmt;
169
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
173 {
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return true;
177 }
178
179 tree stmt_vectype, nunits_vectype;
180 if (!vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
181 &nunits_vectype))
182 return false;
183
184 if (stmt_vectype)
185 {
186 if (STMT_VINFO_VECTYPE (stmt_info))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
191 || vectype_maybe_set_p)
192 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
193 else if (stmt_vectype == boolean_type_node)
194 mask_producers->safe_push (stmt_info);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
197 }
198
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
201
202 return true;
203 }
204
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
211
212 static bool
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
214 vec<stmt_vec_info > *mask_producers)
215 {
216 vec_info *vinfo = stmt_info->vinfo;
217 if (dump_enabled_p ())
218 {
219 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
221 }
222 if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers))
223 return false;
224
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
226 && STMT_VINFO_RELATED_STMT (stmt_info))
227 {
228 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
229 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
230
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
233 !gsi_end_p (si); gsi_next (&si))
234 {
235 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
236 if (dump_enabled_p ())
237 {
238 dump_printf_loc (MSG_NOTE, vect_location,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
241 def_stmt_info->stmt, 0);
242 }
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
244 vf, mask_producers))
245 return false;
246 }
247
248 if (dump_enabled_p ())
249 {
250 dump_printf_loc (MSG_NOTE, vect_location,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
253 }
254 if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers))
255 return false;
256 }
257
258 return true;
259 }
260
261 /* Function vect_determine_vectorization_factor
262
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
268
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
272 in the loop.
273
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
275 original loop:
276 for (i=0; i<N; i++){
277 a[i] = b[i] + c[i];
278 }
279
280 vectorized loop:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
283 }
284 */
285
286 static bool
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
288 {
289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
290 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
291 unsigned nbbs = loop->num_nodes;
292 poly_uint64 vectorization_factor = 1;
293 tree scalar_type = NULL_TREE;
294 gphi *phi;
295 tree vectype;
296 stmt_vec_info stmt_info;
297 unsigned i;
298 auto_vec<stmt_vec_info> mask_producers;
299
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
301
302 for (i = 0; i < nbbs; i++)
303 {
304 basic_block bb = bbs[i];
305
306 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
307 gsi_next (&si))
308 {
309 phi = si.phi ();
310 stmt_info = loop_vinfo->lookup_stmt (phi);
311 if (dump_enabled_p ())
312 {
313 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
315 }
316
317 gcc_assert (stmt_info);
318
319 if (STMT_VINFO_RELEVANT_P (stmt_info)
320 || STMT_VINFO_LIVE_P (stmt_info))
321 {
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
323 scalar_type = TREE_TYPE (PHI_RESULT (phi));
324
325 if (dump_enabled_p ())
326 {
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
330 dump_printf (MSG_NOTE, "\n");
331 }
332
333 vectype = get_vectype_for_scalar_type (scalar_type);
334 if (!vectype)
335 {
336 if (dump_enabled_p ())
337 {
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
339 "not vectorized: unsupported "
340 "data-type ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
342 scalar_type);
343 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
344 }
345 return false;
346 }
347 STMT_VINFO_VECTYPE (stmt_info) = vectype;
348
349 if (dump_enabled_p ())
350 {
351 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
352 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
353 dump_printf (MSG_NOTE, "\n");
354 }
355
356 if (dump_enabled_p ())
357 {
358 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
359 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
360 dump_printf (MSG_NOTE, "\n");
361 }
362
363 vect_update_max_nunits (&vectorization_factor, vectype);
364 }
365 }
366
367 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
368 gsi_next (&si))
369 {
370 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
371 if (!vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
372 &mask_producers))
373 return false;
374 }
375 }
376
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
379 {
380 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
381 dump_dec (MSG_NOTE, vectorization_factor);
382 dump_printf (MSG_NOTE, "\n");
383 }
384
385 if (known_le (vectorization_factor, 1U))
386 {
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
389 "not vectorized: unsupported data-type\n");
390 return false;
391 }
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
393
394 for (i = 0; i < mask_producers.length (); i++)
395 {
396 stmt_info = mask_producers[i];
397 tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
398 if (!mask_type)
399 return false;
400 STMT_VINFO_VECTYPE (stmt_info) = mask_type;
401 }
402
403 return true;
404 }
405
406
407 /* Function vect_is_simple_iv_evolution.
408
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
411
412 static bool
413 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
414 tree * step)
415 {
416 tree init_expr;
417 tree step_expr;
418 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
419 basic_block bb;
420
421 /* When there is no evolution in this loop, the evolution function
422 is not "simple". */
423 if (evolution_part == NULL_TREE)
424 return false;
425
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part))
429 return false;
430
431 step_expr = evolution_part;
432 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
433
434 if (dump_enabled_p ())
435 {
436 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
437 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
438 dump_printf (MSG_NOTE, ", init: ");
439 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
440 dump_printf (MSG_NOTE, "\n");
441 }
442
443 *init = init_expr;
444 *step = step_expr;
445
446 if (TREE_CODE (step_expr) != INTEGER_CST
447 && (TREE_CODE (step_expr) != SSA_NAME
448 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
449 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
452 || !flag_associative_math)))
453 && (TREE_CODE (step_expr) != REAL_CST
454 || !flag_associative_math))
455 {
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
458 "step unknown.\n");
459 return false;
460 }
461
462 return true;
463 }
464
465 /* Function vect_analyze_scalar_cycles_1.
466
467 Examine the cross iteration def-use cycles of scalar variables
468 in LOOP. LOOP_VINFO represents the loop that is now being
469 considered for vectorization (can be LOOP, or an outer-loop
470 enclosing LOOP). */
471
472 static void
473 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
474 {
475 basic_block bb = loop->header;
476 tree init, step;
477 auto_vec<stmt_vec_info, 64> worklist;
478 gphi_iterator gsi;
479 bool double_reduc;
480
481 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
482
483 /* First - identify all inductions. Reduction detection assumes that all the
484 inductions have been identified, therefore, this order must not be
485 changed. */
486 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
487 {
488 gphi *phi = gsi.phi ();
489 tree access_fn = NULL;
490 tree def = PHI_RESULT (phi);
491 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
492
493 if (dump_enabled_p ())
494 {
495 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
496 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
497 }
498
499 /* Skip virtual phi's. The data dependences that are associated with
500 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
501 if (virtual_operand_p (def))
502 continue;
503
504 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
505
506 /* Analyze the evolution function. */
507 access_fn = analyze_scalar_evolution (loop, def);
508 if (access_fn)
509 {
510 STRIP_NOPS (access_fn);
511 if (dump_enabled_p ())
512 {
513 dump_printf_loc (MSG_NOTE, vect_location,
514 "Access function of PHI: ");
515 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
516 dump_printf (MSG_NOTE, "\n");
517 }
518 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
519 = initial_condition_in_loop_num (access_fn, loop->num);
520 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
521 = evolution_part_in_loop_num (access_fn, loop->num);
522 }
523
524 if (!access_fn
525 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
526 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
527 && TREE_CODE (step) != INTEGER_CST))
528 {
529 worklist.safe_push (stmt_vinfo);
530 continue;
531 }
532
533 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
534 != NULL_TREE);
535 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
536
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
539 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
540 }
541
542
543 /* Second - identify all reductions and nested cycles. */
544 while (worklist.length () > 0)
545 {
546 stmt_vec_info stmt_vinfo = worklist.pop ();
547 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
548 tree def = PHI_RESULT (phi);
549
550 if (dump_enabled_p ())
551 {
552 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
553 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
554 }
555
556 gcc_assert (!virtual_operand_p (def)
557 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
558
559 stmt_vec_info reduc_stmt_info
560 = vect_force_simple_reduction (loop_vinfo, stmt_vinfo,
561 &double_reduc, false);
562 if (reduc_stmt_info)
563 {
564 if (double_reduc)
565 {
566 if (dump_enabled_p ())
567 dump_printf_loc (MSG_NOTE, vect_location,
568 "Detected double reduction.\n");
569
570 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
571 STMT_VINFO_DEF_TYPE (reduc_stmt_info)
572 = vect_double_reduction_def;
573 }
574 else
575 {
576 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
577 {
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE, vect_location,
580 "Detected vectorizable nested cycle.\n");
581
582 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
583 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_nested_cycle;
584 }
585 else
586 {
587 if (dump_enabled_p ())
588 dump_printf_loc (MSG_NOTE, vect_location,
589 "Detected reduction.\n");
590
591 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
592 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
593 /* Store the reduction cycles for possible vectorization in
594 loop-aware SLP if it was not detected as reduction
595 chain. */
596 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info))
597 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
598 (reduc_stmt_info);
599 }
600 }
601 }
602 else
603 if (dump_enabled_p ())
604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
605 "Unknown def-use cycle pattern.\n");
606 }
607 }
608
609
610 /* Function vect_analyze_scalar_cycles.
611
612 Examine the cross iteration def-use cycles of scalar variables, by
613 analyzing the loop-header PHIs of scalar variables. Classify each
614 cycle as one of the following: invariant, induction, reduction, unknown.
615 We do that for the loop represented by LOOP_VINFO, and also to its
616 inner-loop, if exists.
617 Examples for scalar cycles:
618
619 Example1: reduction:
620
621 loop1:
622 for (i=0; i<N; i++)
623 sum += a[i];
624
625 Example2: induction:
626
627 loop2:
628 for (i=0; i<N; i++)
629 a[i] = i; */
630
631 static void
632 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
633 {
634 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
635
636 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
637
638 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
639 Reductions in such inner-loop therefore have different properties than
640 the reductions in the nest that gets vectorized:
641 1. When vectorized, they are executed in the same order as in the original
642 scalar loop, so we can't change the order of computation when
643 vectorizing them.
644 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
645 current checks are too strict. */
646
647 if (loop->inner)
648 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
649 }
650
651 /* Transfer group and reduction information from STMT_INFO to its
652 pattern stmt. */
653
654 static void
655 vect_fixup_reduc_chain (stmt_vec_info stmt_info)
656 {
657 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
658 stmt_vec_info stmtp;
659 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
660 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
661 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
662 do
663 {
664 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
665 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
666 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
667 if (stmt_info)
668 REDUC_GROUP_NEXT_ELEMENT (stmtp)
669 = STMT_VINFO_RELATED_STMT (stmt_info);
670 }
671 while (stmt_info);
672 STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
673 }
674
675 /* Fixup scalar cycles that now have their stmts detected as patterns. */
676
677 static void
678 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
679 {
680 stmt_vec_info first;
681 unsigned i;
682
683 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
684 if (STMT_VINFO_IN_PATTERN_P (first))
685 {
686 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
687 while (next)
688 {
689 if (! STMT_VINFO_IN_PATTERN_P (next))
690 break;
691 next = REDUC_GROUP_NEXT_ELEMENT (next);
692 }
693 /* If not all stmt in the chain are patterns try to handle
694 the chain without patterns. */
695 if (! next)
696 {
697 vect_fixup_reduc_chain (first);
698 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
699 = STMT_VINFO_RELATED_STMT (first);
700 }
701 }
702 }
703
704 /* Function vect_get_loop_niters.
705
706 Determine how many iterations the loop is executed and place it
707 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
708 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
709 niter information holds in ASSUMPTIONS.
710
711 Return the loop exit condition. */
712
713
714 static gcond *
715 vect_get_loop_niters (struct loop *loop, tree *assumptions,
716 tree *number_of_iterations, tree *number_of_iterationsm1)
717 {
718 edge exit = single_exit (loop);
719 struct tree_niter_desc niter_desc;
720 tree niter_assumptions, niter, may_be_zero;
721 gcond *cond = get_loop_exit_condition (loop);
722
723 *assumptions = boolean_true_node;
724 *number_of_iterationsm1 = chrec_dont_know;
725 *number_of_iterations = chrec_dont_know;
726 DUMP_VECT_SCOPE ("get_loop_niters");
727
728 if (!exit)
729 return cond;
730
731 niter = chrec_dont_know;
732 may_be_zero = NULL_TREE;
733 niter_assumptions = boolean_true_node;
734 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
735 || chrec_contains_undetermined (niter_desc.niter))
736 return cond;
737
738 niter_assumptions = niter_desc.assumptions;
739 may_be_zero = niter_desc.may_be_zero;
740 niter = niter_desc.niter;
741
742 if (may_be_zero && integer_zerop (may_be_zero))
743 may_be_zero = NULL_TREE;
744
745 if (may_be_zero)
746 {
747 if (COMPARISON_CLASS_P (may_be_zero))
748 {
749 /* Try to combine may_be_zero with assumptions, this can simplify
750 computation of niter expression. */
751 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
752 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
753 niter_assumptions,
754 fold_build1 (TRUTH_NOT_EXPR,
755 boolean_type_node,
756 may_be_zero));
757 else
758 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
759 build_int_cst (TREE_TYPE (niter), 0),
760 rewrite_to_non_trapping_overflow (niter));
761
762 may_be_zero = NULL_TREE;
763 }
764 else if (integer_nonzerop (may_be_zero))
765 {
766 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
767 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
768 return cond;
769 }
770 else
771 return cond;
772 }
773
774 *assumptions = niter_assumptions;
775 *number_of_iterationsm1 = niter;
776
777 /* We want the number of loop header executions which is the number
778 of latch executions plus one.
779 ??? For UINT_MAX latch executions this number overflows to zero
780 for loops like do { n++; } while (n != 0); */
781 if (niter && !chrec_contains_undetermined (niter))
782 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
783 build_int_cst (TREE_TYPE (niter), 1));
784 *number_of_iterations = niter;
785
786 return cond;
787 }
788
789 /* Function bb_in_loop_p
790
791 Used as predicate for dfs order traversal of the loop bbs. */
792
793 static bool
794 bb_in_loop_p (const_basic_block bb, const void *data)
795 {
796 const struct loop *const loop = (const struct loop *)data;
797 if (flow_bb_inside_loop_p (loop, bb))
798 return true;
799 return false;
800 }
801
802
803 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
804 stmt_vec_info structs for all the stmts in LOOP_IN. */
805
806 _loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
807 : vec_info (vec_info::loop, init_cost (loop_in), shared),
808 loop (loop_in),
809 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
810 num_itersm1 (NULL_TREE),
811 num_iters (NULL_TREE),
812 num_iters_unchanged (NULL_TREE),
813 num_iters_assumptions (NULL_TREE),
814 th (0),
815 versioning_threshold (0),
816 vectorization_factor (0),
817 max_vectorization_factor (0),
818 mask_skip_niters (NULL_TREE),
819 mask_compare_type (NULL_TREE),
820 unaligned_dr (NULL),
821 peeling_for_alignment (0),
822 ptr_mask (0),
823 ivexpr_map (NULL),
824 slp_unrolling_factor (1),
825 single_scalar_iteration_cost (0),
826 vectorizable (false),
827 can_fully_mask_p (true),
828 fully_masked_p (false),
829 peeling_for_gaps (false),
830 peeling_for_niter (false),
831 operands_swapped (false),
832 no_data_dependencies (false),
833 has_mask_store (false),
834 scalar_loop (NULL),
835 orig_loop_info (NULL)
836 {
837 /* Create/Update stmt_info for all stmts in the loop. */
838 basic_block *body = get_loop_body (loop);
839 for (unsigned int i = 0; i < loop->num_nodes; i++)
840 {
841 basic_block bb = body[i];
842 gimple_stmt_iterator si;
843
844 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
845 {
846 gimple *phi = gsi_stmt (si);
847 gimple_set_uid (phi, 0);
848 add_stmt (phi);
849 }
850
851 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
852 {
853 gimple *stmt = gsi_stmt (si);
854 gimple_set_uid (stmt, 0);
855 add_stmt (stmt);
856 }
857 }
858 free (body);
859
860 /* CHECKME: We want to visit all BBs before their successors (except for
861 latch blocks, for which this assertion wouldn't hold). In the simple
862 case of the loop forms we allow, a dfs order of the BBs would the same
863 as reversed postorder traversal, so we are safe. */
864
865 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
866 bbs, loop->num_nodes, loop);
867 gcc_assert (nbbs == loop->num_nodes);
868 }
869
870 /* Free all levels of MASKS. */
871
872 void
873 release_vec_loop_masks (vec_loop_masks *masks)
874 {
875 rgroup_masks *rgm;
876 unsigned int i;
877 FOR_EACH_VEC_ELT (*masks, i, rgm)
878 rgm->masks.release ();
879 masks->release ();
880 }
881
882 /* Free all memory used by the _loop_vec_info, as well as all the
883 stmt_vec_info structs of all the stmts in the loop. */
884
885 _loop_vec_info::~_loop_vec_info ()
886 {
887 int nbbs;
888 gimple_stmt_iterator si;
889 int j;
890
891 /* ??? We're releasing loop_vinfos en-block. */
892 set_stmt_vec_info_vec (&stmt_vec_infos);
893 nbbs = loop->num_nodes;
894 for (j = 0; j < nbbs; j++)
895 {
896 basic_block bb = bbs[j];
897 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
898 free_stmt_vec_info (gsi_stmt (si));
899
900 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
901 {
902 gimple *stmt = gsi_stmt (si);
903
904 /* We may have broken canonical form by moving a constant
905 into RHS1 of a commutative op. Fix such occurrences. */
906 if (operands_swapped && is_gimple_assign (stmt))
907 {
908 enum tree_code code = gimple_assign_rhs_code (stmt);
909
910 if ((code == PLUS_EXPR
911 || code == POINTER_PLUS_EXPR
912 || code == MULT_EXPR)
913 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
914 swap_ssa_operands (stmt,
915 gimple_assign_rhs1_ptr (stmt),
916 gimple_assign_rhs2_ptr (stmt));
917 else if (code == COND_EXPR
918 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
919 {
920 tree cond_expr = gimple_assign_rhs1 (stmt);
921 enum tree_code cond_code = TREE_CODE (cond_expr);
922
923 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
924 {
925 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
926 0));
927 cond_code = invert_tree_comparison (cond_code,
928 honor_nans);
929 if (cond_code != ERROR_MARK)
930 {
931 TREE_SET_CODE (cond_expr, cond_code);
932 swap_ssa_operands (stmt,
933 gimple_assign_rhs2_ptr (stmt),
934 gimple_assign_rhs3_ptr (stmt));
935 }
936 }
937 }
938 }
939
940 /* Free stmt_vec_info. */
941 free_stmt_vec_info (stmt);
942 gsi_next (&si);
943 }
944 }
945
946 free (bbs);
947
948 release_vec_loop_masks (&masks);
949 delete ivexpr_map;
950
951 loop->aux = NULL;
952 }
953
954 /* Return an invariant or register for EXPR and emit necessary
955 computations in the LOOP_VINFO loop preheader. */
956
957 tree
958 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
959 {
960 if (is_gimple_reg (expr)
961 || is_gimple_min_invariant (expr))
962 return expr;
963
964 if (! loop_vinfo->ivexpr_map)
965 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
966 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
967 if (! cached)
968 {
969 gimple_seq stmts = NULL;
970 cached = force_gimple_operand (unshare_expr (expr),
971 &stmts, true, NULL_TREE);
972 if (stmts)
973 {
974 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
975 gsi_insert_seq_on_edge_immediate (e, stmts);
976 }
977 }
978 return cached;
979 }
980
981 /* Return true if we can use CMP_TYPE as the comparison type to produce
982 all masks required to mask LOOP_VINFO. */
983
984 static bool
985 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
986 {
987 rgroup_masks *rgm;
988 unsigned int i;
989 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
990 if (rgm->mask_type != NULL_TREE
991 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
992 cmp_type, rgm->mask_type,
993 OPTIMIZE_FOR_SPEED))
994 return false;
995 return true;
996 }
997
998 /* Calculate the maximum number of scalars per iteration for every
999 rgroup in LOOP_VINFO. */
1000
1001 static unsigned int
1002 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
1003 {
1004 unsigned int res = 1;
1005 unsigned int i;
1006 rgroup_masks *rgm;
1007 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
1008 res = MAX (res, rgm->max_nscalars_per_iter);
1009 return res;
1010 }
1011
1012 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1013 whether we can actually generate the masks required. Return true if so,
1014 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1015
1016 static bool
1017 vect_verify_full_masking (loop_vec_info loop_vinfo)
1018 {
1019 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1020 unsigned int min_ni_width;
1021
1022 /* Use a normal loop if there are no statements that need masking.
1023 This only happens in rare degenerate cases: it means that the loop
1024 has no loads, no stores, and no live-out values. */
1025 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1026 return false;
1027
1028 /* Get the maximum number of iterations that is representable
1029 in the counter type. */
1030 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1031 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1032
1033 /* Get a more refined estimate for the number of iterations. */
1034 widest_int max_back_edges;
1035 if (max_loop_iterations (loop, &max_back_edges))
1036 max_ni = wi::smin (max_ni, max_back_edges + 1);
1037
1038 /* Account for rgroup masks, in which each bit is replicated N times. */
1039 max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
1040
1041 /* Work out how many bits we need to represent the limit. */
1042 min_ni_width = wi::min_precision (max_ni, UNSIGNED);
1043
1044 /* Find a scalar mode for which WHILE_ULT is supported. */
1045 opt_scalar_int_mode cmp_mode_iter;
1046 tree cmp_type = NULL_TREE;
1047 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1048 {
1049 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1050 if (cmp_bits >= min_ni_width
1051 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1052 {
1053 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1054 if (this_type
1055 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1056 {
1057 /* Although we could stop as soon as we find a valid mode,
1058 it's often better to continue until we hit Pmode, since the
1059 operands to the WHILE are more likely to be reusable in
1060 address calculations. */
1061 cmp_type = this_type;
1062 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1063 break;
1064 }
1065 }
1066 }
1067
1068 if (!cmp_type)
1069 return false;
1070
1071 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
1072 return true;
1073 }
1074
1075 /* Calculate the cost of one scalar iteration of the loop. */
1076 static void
1077 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1078 {
1079 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1080 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1081 int nbbs = loop->num_nodes, factor;
1082 int innerloop_iters, i;
1083
1084 /* Gather costs for statements in the scalar loop. */
1085
1086 /* FORNOW. */
1087 innerloop_iters = 1;
1088 if (loop->inner)
1089 innerloop_iters = 50; /* FIXME */
1090
1091 for (i = 0; i < nbbs; i++)
1092 {
1093 gimple_stmt_iterator si;
1094 basic_block bb = bbs[i];
1095
1096 if (bb->loop_father == loop->inner)
1097 factor = innerloop_iters;
1098 else
1099 factor = 1;
1100
1101 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1102 {
1103 gimple *stmt = gsi_stmt (si);
1104 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1105
1106 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1107 continue;
1108
1109 /* Skip stmts that are not vectorized inside the loop. */
1110 if (stmt_info
1111 && !STMT_VINFO_RELEVANT_P (stmt_info)
1112 && (!STMT_VINFO_LIVE_P (stmt_info)
1113 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1114 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1115 continue;
1116
1117 vect_cost_for_stmt kind;
1118 if (STMT_VINFO_DATA_REF (stmt_info))
1119 {
1120 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1121 kind = scalar_load;
1122 else
1123 kind = scalar_store;
1124 }
1125 else
1126 kind = scalar_stmt;
1127
1128 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1129 factor, kind, stmt_info, 0, vect_prologue);
1130 }
1131 }
1132
1133 /* Now accumulate cost. */
1134 void *target_cost_data = init_cost (loop);
1135 stmt_info_for_cost *si;
1136 int j;
1137 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1138 j, si)
1139 (void) add_stmt_cost (target_cost_data, si->count,
1140 si->kind, si->stmt_info, si->misalign,
1141 vect_body);
1142 unsigned dummy, body_cost = 0;
1143 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1144 destroy_cost_data (target_cost_data);
1145 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1146 }
1147
1148
1149 /* Function vect_analyze_loop_form_1.
1150
1151 Verify that certain CFG restrictions hold, including:
1152 - the loop has a pre-header
1153 - the loop has a single entry and exit
1154 - the loop exit condition is simple enough
1155 - the number of iterations can be analyzed, i.e, a countable loop. The
1156 niter could be analyzed under some assumptions. */
1157
1158 bool
1159 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1160 tree *assumptions, tree *number_of_iterationsm1,
1161 tree *number_of_iterations, gcond **inner_loop_cond)
1162 {
1163 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1164
1165 /* Different restrictions apply when we are considering an inner-most loop,
1166 vs. an outer (nested) loop.
1167 (FORNOW. May want to relax some of these restrictions in the future). */
1168
1169 if (!loop->inner)
1170 {
1171 /* Inner-most loop. We currently require that the number of BBs is
1172 exactly 2 (the header and latch). Vectorizable inner-most loops
1173 look like this:
1174
1175 (pre-header)
1176 |
1177 header <--------+
1178 | | |
1179 | +--> latch --+
1180 |
1181 (exit-bb) */
1182
1183 if (loop->num_nodes != 2)
1184 {
1185 if (dump_enabled_p ())
1186 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1187 "not vectorized: control flow in loop.\n");
1188 return false;
1189 }
1190
1191 if (empty_block_p (loop->header))
1192 {
1193 if (dump_enabled_p ())
1194 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1195 "not vectorized: empty loop.\n");
1196 return false;
1197 }
1198 }
1199 else
1200 {
1201 struct loop *innerloop = loop->inner;
1202 edge entryedge;
1203
1204 /* Nested loop. We currently require that the loop is doubly-nested,
1205 contains a single inner loop, and the number of BBs is exactly 5.
1206 Vectorizable outer-loops look like this:
1207
1208 (pre-header)
1209 |
1210 header <---+
1211 | |
1212 inner-loop |
1213 | |
1214 tail ------+
1215 |
1216 (exit-bb)
1217
1218 The inner-loop has the properties expected of inner-most loops
1219 as described above. */
1220
1221 if ((loop->inner)->inner || (loop->inner)->next)
1222 {
1223 if (dump_enabled_p ())
1224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1225 "not vectorized: multiple nested loops.\n");
1226 return false;
1227 }
1228
1229 if (loop->num_nodes != 5)
1230 {
1231 if (dump_enabled_p ())
1232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1233 "not vectorized: control flow in loop.\n");
1234 return false;
1235 }
1236
1237 entryedge = loop_preheader_edge (innerloop);
1238 if (entryedge->src != loop->header
1239 || !single_exit (innerloop)
1240 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1241 {
1242 if (dump_enabled_p ())
1243 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1244 "not vectorized: unsupported outerloop form.\n");
1245 return false;
1246 }
1247
1248 /* Analyze the inner-loop. */
1249 tree inner_niterm1, inner_niter, inner_assumptions;
1250 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1251 &inner_assumptions, &inner_niterm1,
1252 &inner_niter, NULL)
1253 /* Don't support analyzing niter under assumptions for inner
1254 loop. */
1255 || !integer_onep (inner_assumptions))
1256 {
1257 if (dump_enabled_p ())
1258 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1259 "not vectorized: Bad inner loop.\n");
1260 return false;
1261 }
1262
1263 if (!expr_invariant_in_loop_p (loop, inner_niter))
1264 {
1265 if (dump_enabled_p ())
1266 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1267 "not vectorized: inner-loop count not"
1268 " invariant.\n");
1269 return false;
1270 }
1271
1272 if (dump_enabled_p ())
1273 dump_printf_loc (MSG_NOTE, vect_location,
1274 "Considering outer-loop vectorization.\n");
1275 }
1276
1277 if (!single_exit (loop)
1278 || EDGE_COUNT (loop->header->preds) != 2)
1279 {
1280 if (dump_enabled_p ())
1281 {
1282 if (!single_exit (loop))
1283 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1284 "not vectorized: multiple exits.\n");
1285 else if (EDGE_COUNT (loop->header->preds) != 2)
1286 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1287 "not vectorized: too many incoming edges.\n");
1288 }
1289 return false;
1290 }
1291
1292 /* We assume that the loop exit condition is at the end of the loop. i.e,
1293 that the loop is represented as a do-while (with a proper if-guard
1294 before the loop if needed), where the loop header contains all the
1295 executable statements, and the latch is empty. */
1296 if (!empty_block_p (loop->latch)
1297 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1298 {
1299 if (dump_enabled_p ())
1300 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1301 "not vectorized: latch block not empty.\n");
1302 return false;
1303 }
1304
1305 /* Make sure the exit is not abnormal. */
1306 edge e = single_exit (loop);
1307 if (e->flags & EDGE_ABNORMAL)
1308 {
1309 if (dump_enabled_p ())
1310 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1311 "not vectorized: abnormal loop exit edge.\n");
1312 return false;
1313 }
1314
1315 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1316 number_of_iterationsm1);
1317 if (!*loop_cond)
1318 {
1319 if (dump_enabled_p ())
1320 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1321 "not vectorized: complicated exit condition.\n");
1322 return false;
1323 }
1324
1325 if (integer_zerop (*assumptions)
1326 || !*number_of_iterations
1327 || chrec_contains_undetermined (*number_of_iterations))
1328 {
1329 if (dump_enabled_p ())
1330 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1331 "not vectorized: number of iterations cannot be "
1332 "computed.\n");
1333 return false;
1334 }
1335
1336 if (integer_zerop (*number_of_iterations))
1337 {
1338 if (dump_enabled_p ())
1339 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1340 "not vectorized: number of iterations = 0.\n");
1341 return false;
1342 }
1343
1344 return true;
1345 }
1346
1347 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1348
1349 loop_vec_info
1350 vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
1351 {
1352 tree assumptions, number_of_iterations, number_of_iterationsm1;
1353 gcond *loop_cond, *inner_loop_cond = NULL;
1354
1355 if (! vect_analyze_loop_form_1 (loop, &loop_cond,
1356 &assumptions, &number_of_iterationsm1,
1357 &number_of_iterations, &inner_loop_cond))
1358 return NULL;
1359
1360 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1361 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1362 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1363 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1364 if (!integer_onep (assumptions))
1365 {
1366 /* We consider to vectorize this loop by versioning it under
1367 some assumptions. In order to do this, we need to clear
1368 existing information computed by scev and niter analyzer. */
1369 scev_reset_htab ();
1370 free_numbers_of_iterations_estimates (loop);
1371 /* Also set flag for this loop so that following scev and niter
1372 analysis are done under the assumptions. */
1373 loop_constraint_set (loop, LOOP_C_FINITE);
1374 /* Also record the assumptions for versioning. */
1375 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1376 }
1377
1378 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1379 {
1380 if (dump_enabled_p ())
1381 {
1382 dump_printf_loc (MSG_NOTE, vect_location,
1383 "Symbolic number of iterations is ");
1384 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1385 dump_printf (MSG_NOTE, "\n");
1386 }
1387 }
1388
1389 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1390 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1391 if (inner_loop_cond)
1392 {
1393 stmt_vec_info inner_loop_cond_info
1394 = loop_vinfo->lookup_stmt (inner_loop_cond);
1395 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1396 }
1397
1398 gcc_assert (!loop->aux);
1399 loop->aux = loop_vinfo;
1400 return loop_vinfo;
1401 }
1402
1403
1404
1405 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1406 statements update the vectorization factor. */
1407
1408 static void
1409 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1410 {
1411 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1412 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1413 int nbbs = loop->num_nodes;
1414 poly_uint64 vectorization_factor;
1415 int i;
1416
1417 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1418
1419 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1420 gcc_assert (known_ne (vectorization_factor, 0U));
1421
1422 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1423 vectorization factor of the loop is the unrolling factor required by
1424 the SLP instances. If that unrolling factor is 1, we say, that we
1425 perform pure SLP on loop - cross iteration parallelism is not
1426 exploited. */
1427 bool only_slp_in_loop = true;
1428 for (i = 0; i < nbbs; i++)
1429 {
1430 basic_block bb = bbs[i];
1431 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1432 gsi_next (&si))
1433 {
1434 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1435 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
1436 && STMT_VINFO_RELATED_STMT (stmt_info))
1437 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
1438 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1439 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1440 && !PURE_SLP_STMT (stmt_info))
1441 /* STMT needs both SLP and loop-based vectorization. */
1442 only_slp_in_loop = false;
1443 }
1444 }
1445
1446 if (only_slp_in_loop)
1447 {
1448 dump_printf_loc (MSG_NOTE, vect_location,
1449 "Loop contains only SLP stmts\n");
1450 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1451 }
1452 else
1453 {
1454 dump_printf_loc (MSG_NOTE, vect_location,
1455 "Loop contains SLP and non-SLP stmts\n");
1456 /* Both the vectorization factor and unroll factor have the form
1457 current_vector_size * X for some rational X, so they must have
1458 a common multiple. */
1459 vectorization_factor
1460 = force_common_multiple (vectorization_factor,
1461 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1462 }
1463
1464 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1465 if (dump_enabled_p ())
1466 {
1467 dump_printf_loc (MSG_NOTE, vect_location,
1468 "Updating vectorization factor to ");
1469 dump_dec (MSG_NOTE, vectorization_factor);
1470 dump_printf (MSG_NOTE, ".\n");
1471 }
1472 }
1473
1474 /* Return true if STMT_INFO describes a double reduction phi and if
1475 the other phi in the reduction is also relevant for vectorization.
1476 This rejects cases such as:
1477
1478 outer1:
1479 x_1 = PHI <x_3(outer2), ...>;
1480 ...
1481
1482 inner:
1483 x_2 = ...;
1484 ...
1485
1486 outer2:
1487 x_3 = PHI <x_2(inner)>;
1488
1489 if nothing in x_2 or elsewhere makes x_1 relevant. */
1490
1491 static bool
1492 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1493 {
1494 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1495 return false;
1496
1497 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1498 }
1499
1500 /* Function vect_analyze_loop_operations.
1501
1502 Scan the loop stmts and make sure they are all vectorizable. */
1503
1504 static bool
1505 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1506 {
1507 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1508 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1509 int nbbs = loop->num_nodes;
1510 int i;
1511 stmt_vec_info stmt_info;
1512 bool need_to_vectorize = false;
1513 bool ok;
1514
1515 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1516
1517 stmt_vector_for_cost cost_vec;
1518 cost_vec.create (2);
1519
1520 for (i = 0; i < nbbs; i++)
1521 {
1522 basic_block bb = bbs[i];
1523
1524 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1525 gsi_next (&si))
1526 {
1527 gphi *phi = si.phi ();
1528 ok = true;
1529
1530 stmt_info = loop_vinfo->lookup_stmt (phi);
1531 if (dump_enabled_p ())
1532 {
1533 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1534 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1535 }
1536 if (virtual_operand_p (gimple_phi_result (phi)))
1537 continue;
1538
1539 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1540 (i.e., a phi in the tail of the outer-loop). */
1541 if (! is_loop_header_bb_p (bb))
1542 {
1543 /* FORNOW: we currently don't support the case that these phis
1544 are not used in the outerloop (unless it is double reduction,
1545 i.e., this phi is vect_reduction_def), cause this case
1546 requires to actually do something here. */
1547 if (STMT_VINFO_LIVE_P (stmt_info)
1548 && !vect_active_double_reduction_p (stmt_info))
1549 {
1550 if (dump_enabled_p ())
1551 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1552 "Unsupported loop-closed phi in "
1553 "outer-loop.\n");
1554 return false;
1555 }
1556
1557 /* If PHI is used in the outer loop, we check that its operand
1558 is defined in the inner loop. */
1559 if (STMT_VINFO_RELEVANT_P (stmt_info))
1560 {
1561 tree phi_op;
1562
1563 if (gimple_phi_num_args (phi) != 1)
1564 return false;
1565
1566 phi_op = PHI_ARG_DEF (phi, 0);
1567 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1568 if (!op_def_info)
1569 return false;
1570
1571 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1572 && (STMT_VINFO_RELEVANT (op_def_info)
1573 != vect_used_in_outer_by_reduction))
1574 return false;
1575 }
1576
1577 continue;
1578 }
1579
1580 gcc_assert (stmt_info);
1581
1582 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1583 || STMT_VINFO_LIVE_P (stmt_info))
1584 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1585 {
1586 /* A scalar-dependence cycle that we don't support. */
1587 if (dump_enabled_p ())
1588 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1589 "not vectorized: scalar dependence cycle.\n");
1590 return false;
1591 }
1592
1593 if (STMT_VINFO_RELEVANT_P (stmt_info))
1594 {
1595 need_to_vectorize = true;
1596 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1597 && ! PURE_SLP_STMT (stmt_info))
1598 ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
1599 &cost_vec);
1600 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1601 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1602 && ! PURE_SLP_STMT (stmt_info))
1603 ok = vectorizable_reduction (stmt_info, NULL, NULL, NULL, NULL,
1604 &cost_vec);
1605 }
1606
1607 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1608 if (ok
1609 && STMT_VINFO_LIVE_P (stmt_info)
1610 && !PURE_SLP_STMT (stmt_info))
1611 ok = vectorizable_live_operation (stmt_info, NULL, NULL, -1, NULL,
1612 &cost_vec);
1613
1614 if (!ok)
1615 {
1616 if (dump_enabled_p ())
1617 {
1618 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1619 "not vectorized: relevant phi not "
1620 "supported: ");
1621 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1622 }
1623 return false;
1624 }
1625 }
1626
1627 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1628 gsi_next (&si))
1629 {
1630 gimple *stmt = gsi_stmt (si);
1631 if (!gimple_clobber_p (stmt)
1632 && !vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
1633 &need_to_vectorize,
1634 NULL, NULL, &cost_vec))
1635 return false;
1636 }
1637 } /* bbs */
1638
1639 add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
1640 cost_vec.release ();
1641
1642 /* All operations in the loop are either irrelevant (deal with loop
1643 control, or dead), or only used outside the loop and can be moved
1644 out of the loop (e.g. invariants, inductions). The loop can be
1645 optimized away by scalar optimizations. We're better off not
1646 touching this loop. */
1647 if (!need_to_vectorize)
1648 {
1649 if (dump_enabled_p ())
1650 dump_printf_loc (MSG_NOTE, vect_location,
1651 "All the computation can be taken out of the loop.\n");
1652 if (dump_enabled_p ())
1653 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1654 "not vectorized: redundant loop. no profit to "
1655 "vectorize.\n");
1656 return false;
1657 }
1658
1659 return true;
1660 }
1661
1662 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1663 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1664 definitely no, or -1 if it's worth retrying. */
1665
1666 static int
1667 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1668 {
1669 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1670 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1671
1672 /* Only fully-masked loops can have iteration counts less than the
1673 vectorization factor. */
1674 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1675 {
1676 HOST_WIDE_INT max_niter;
1677
1678 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1679 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1680 else
1681 max_niter = max_stmt_executions_int (loop);
1682
1683 if (max_niter != -1
1684 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1685 {
1686 if (dump_enabled_p ())
1687 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1688 "not vectorized: iteration count smaller than "
1689 "vectorization factor.\n");
1690 return 0;
1691 }
1692 }
1693
1694 int min_profitable_iters, min_profitable_estimate;
1695 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1696 &min_profitable_estimate);
1697
1698 if (min_profitable_iters < 0)
1699 {
1700 if (dump_enabled_p ())
1701 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1702 "not vectorized: vectorization not profitable.\n");
1703 if (dump_enabled_p ())
1704 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1705 "not vectorized: vector version will never be "
1706 "profitable.\n");
1707 return -1;
1708 }
1709
1710 int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1711 * assumed_vf);
1712
1713 /* Use the cost model only if it is more conservative than user specified
1714 threshold. */
1715 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1716 min_profitable_iters);
1717
1718 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1719
1720 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1721 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1722 {
1723 if (dump_enabled_p ())
1724 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1725 "not vectorized: vectorization not profitable.\n");
1726 if (dump_enabled_p ())
1727 dump_printf_loc (MSG_NOTE, vect_location,
1728 "not vectorized: iteration count smaller than user "
1729 "specified loop bound parameter or minimum profitable "
1730 "iterations (whichever is more conservative).\n");
1731 return 0;
1732 }
1733
1734 HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
1735 if (estimated_niter == -1)
1736 estimated_niter = likely_max_stmt_executions_int (loop);
1737 if (estimated_niter != -1
1738 && ((unsigned HOST_WIDE_INT) estimated_niter
1739 < MAX (th, (unsigned) min_profitable_estimate)))
1740 {
1741 if (dump_enabled_p ())
1742 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1743 "not vectorized: estimated iteration count too "
1744 "small.\n");
1745 if (dump_enabled_p ())
1746 dump_printf_loc (MSG_NOTE, vect_location,
1747 "not vectorized: estimated iteration count smaller "
1748 "than specified loop bound parameter or minimum "
1749 "profitable iterations (whichever is more "
1750 "conservative).\n");
1751 return -1;
1752 }
1753
1754 return 1;
1755 }
1756
1757 static bool
1758 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1759 vec<data_reference_p> *datarefs,
1760 unsigned int *n_stmts)
1761 {
1762 *n_stmts = 0;
1763 for (unsigned i = 0; i < loop->num_nodes; i++)
1764 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1765 !gsi_end_p (gsi); gsi_next (&gsi))
1766 {
1767 gimple *stmt = gsi_stmt (gsi);
1768 if (is_gimple_debug (stmt))
1769 continue;
1770 ++(*n_stmts);
1771 if (!vect_find_stmt_data_reference (loop, stmt, datarefs))
1772 {
1773 if (is_gimple_call (stmt) && loop->safelen)
1774 {
1775 tree fndecl = gimple_call_fndecl (stmt), op;
1776 if (fndecl != NULL_TREE)
1777 {
1778 cgraph_node *node = cgraph_node::get (fndecl);
1779 if (node != NULL && node->simd_clones != NULL)
1780 {
1781 unsigned int j, n = gimple_call_num_args (stmt);
1782 for (j = 0; j < n; j++)
1783 {
1784 op = gimple_call_arg (stmt, j);
1785 if (DECL_P (op)
1786 || (REFERENCE_CLASS_P (op)
1787 && get_base_address (op)))
1788 break;
1789 }
1790 op = gimple_call_lhs (stmt);
1791 /* Ignore #pragma omp declare simd functions
1792 if they don't have data references in the
1793 call stmt itself. */
1794 if (j == n
1795 && !(op
1796 && (DECL_P (op)
1797 || (REFERENCE_CLASS_P (op)
1798 && get_base_address (op)))))
1799 continue;
1800 }
1801 }
1802 }
1803 return false;
1804 }
1805 /* If dependence analysis will give up due to the limit on the
1806 number of datarefs stop here and fail fatally. */
1807 if (datarefs->length ()
1808 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
1809 return false;
1810 }
1811 return true;
1812 }
1813
1814 /* Function vect_analyze_loop_2.
1815
1816 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1817 for it. The different analyses will record information in the
1818 loop_vec_info struct. */
1819 static bool
1820 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
1821 {
1822 bool ok;
1823 int res;
1824 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
1825 poly_uint64 min_vf = 2;
1826
1827 /* The first group of checks is independent of the vector size. */
1828 fatal = true;
1829
1830 /* Find all data references in the loop (which correspond to vdefs/vuses)
1831 and analyze their evolution in the loop. */
1832
1833 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1834
1835 /* Gather the data references and count stmts in the loop. */
1836 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
1837 {
1838 if (!vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
1839 &LOOP_VINFO_DATAREFS (loop_vinfo),
1840 n_stmts))
1841 {
1842 if (dump_enabled_p ())
1843 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1844 "not vectorized: loop contains function "
1845 "calls or data references that cannot "
1846 "be analyzed\n");
1847 return false;
1848 }
1849 loop_vinfo->shared->save_datarefs ();
1850 }
1851 else
1852 loop_vinfo->shared->check_datarefs ();
1853
1854 /* Analyze the data references and also adjust the minimal
1855 vectorization factor according to the loads and stores. */
1856
1857 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1858 if (!ok)
1859 {
1860 if (dump_enabled_p ())
1861 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1862 "bad data references.\n");
1863 return false;
1864 }
1865
1866 /* Classify all cross-iteration scalar data-flow cycles.
1867 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1868 vect_analyze_scalar_cycles (loop_vinfo);
1869
1870 vect_pattern_recog (loop_vinfo);
1871
1872 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1873
1874 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1875 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1876
1877 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1878 if (!ok)
1879 {
1880 if (dump_enabled_p ())
1881 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1882 "bad data access.\n");
1883 return false;
1884 }
1885
1886 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1887
1888 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1889 if (!ok)
1890 {
1891 if (dump_enabled_p ())
1892 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1893 "unexpected pattern.\n");
1894 return false;
1895 }
1896
1897 /* While the rest of the analysis below depends on it in some way. */
1898 fatal = false;
1899
1900 /* Analyze data dependences between the data-refs in the loop
1901 and adjust the maximum vectorization factor according to
1902 the dependences.
1903 FORNOW: fail at the first data dependence that we encounter. */
1904
1905 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1906 if (!ok
1907 || (max_vf != MAX_VECTORIZATION_FACTOR
1908 && maybe_lt (max_vf, min_vf)))
1909 {
1910 if (dump_enabled_p ())
1911 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1912 "bad data dependence.\n");
1913 return false;
1914 }
1915 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
1916
1917 ok = vect_determine_vectorization_factor (loop_vinfo);
1918 if (!ok)
1919 {
1920 if (dump_enabled_p ())
1921 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1922 "can't determine vectorization factor.\n");
1923 return false;
1924 }
1925 if (max_vf != MAX_VECTORIZATION_FACTOR
1926 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1927 {
1928 if (dump_enabled_p ())
1929 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1930 "bad data dependence.\n");
1931 return false;
1932 }
1933
1934 /* Compute the scalar iteration cost. */
1935 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1936
1937 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1938 unsigned th;
1939
1940 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1941 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
1942 if (!ok)
1943 return false;
1944
1945 /* If there are any SLP instances mark them as pure_slp. */
1946 bool slp = vect_make_slp_decision (loop_vinfo);
1947 if (slp)
1948 {
1949 /* Find stmts that need to be both vectorized and SLPed. */
1950 vect_detect_hybrid_slp (loop_vinfo);
1951
1952 /* Update the vectorization factor based on the SLP decision. */
1953 vect_update_vf_for_slp (loop_vinfo);
1954 }
1955
1956 bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
1957
1958 /* We don't expect to have to roll back to anything other than an empty
1959 set of rgroups. */
1960 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
1961
1962 /* This is the point where we can re-start analysis with SLP forced off. */
1963 start_over:
1964
1965 /* Now the vectorization factor is final. */
1966 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1967 gcc_assert (known_ne (vectorization_factor, 0U));
1968
1969 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1970 {
1971 dump_printf_loc (MSG_NOTE, vect_location,
1972 "vectorization_factor = ");
1973 dump_dec (MSG_NOTE, vectorization_factor);
1974 dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
1975 LOOP_VINFO_INT_NITERS (loop_vinfo));
1976 }
1977
1978 HOST_WIDE_INT max_niter
1979 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1980
1981 /* Analyze the alignment of the data-refs in the loop.
1982 Fail if a data reference is found that cannot be vectorized. */
1983
1984 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1985 if (!ok)
1986 {
1987 if (dump_enabled_p ())
1988 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1989 "bad data alignment.\n");
1990 return false;
1991 }
1992
1993 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1994 It is important to call pruning after vect_analyze_data_ref_accesses,
1995 since we use grouping information gathered by interleaving analysis. */
1996 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1997 if (!ok)
1998 return false;
1999
2000 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2001 vectorization. */
2002 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2003 {
2004 /* This pass will decide on using loop versioning and/or loop peeling in
2005 order to enhance the alignment of data references in the loop. */
2006 ok = vect_enhance_data_refs_alignment (loop_vinfo);
2007 if (!ok)
2008 {
2009 if (dump_enabled_p ())
2010 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2011 "bad data alignment.\n");
2012 return false;
2013 }
2014 }
2015
2016 if (slp)
2017 {
2018 /* Analyze operations in the SLP instances. Note this may
2019 remove unsupported SLP instances which makes the above
2020 SLP kind detection invalid. */
2021 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2022 vect_slp_analyze_operations (loop_vinfo);
2023 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2024 goto again;
2025 }
2026
2027 /* Scan all the remaining operations in the loop that are not subject
2028 to SLP and make sure they are vectorizable. */
2029 ok = vect_analyze_loop_operations (loop_vinfo);
2030 if (!ok)
2031 {
2032 if (dump_enabled_p ())
2033 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2034 "bad operation or unsupported loop bound.\n");
2035 return false;
2036 }
2037
2038 /* Decide whether to use a fully-masked loop for this vectorization
2039 factor. */
2040 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2041 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
2042 && vect_verify_full_masking (loop_vinfo));
2043 if (dump_enabled_p ())
2044 {
2045 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2046 dump_printf_loc (MSG_NOTE, vect_location,
2047 "using a fully-masked loop.\n");
2048 else
2049 dump_printf_loc (MSG_NOTE, vect_location,
2050 "not using a fully-masked loop.\n");
2051 }
2052
2053 /* If epilog loop is required because of data accesses with gaps,
2054 one additional iteration needs to be peeled. Check if there is
2055 enough iterations for vectorization. */
2056 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2057 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2058 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2059 {
2060 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2061 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2062
2063 if (known_lt (wi::to_widest (scalar_niters), vf))
2064 {
2065 if (dump_enabled_p ())
2066 dump_printf_loc (MSG_NOTE, vect_location,
2067 "loop has no enough iterations to support"
2068 " peeling for gaps.\n");
2069 return false;
2070 }
2071 }
2072
2073 /* Check the costings of the loop make vectorizing worthwhile. */
2074 res = vect_analyze_loop_costing (loop_vinfo);
2075 if (res < 0)
2076 goto again;
2077 if (!res)
2078 {
2079 if (dump_enabled_p ())
2080 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2081 "Loop costings not worthwhile.\n");
2082 return false;
2083 }
2084
2085 /* Decide whether we need to create an epilogue loop to handle
2086 remaining scalar iterations. */
2087 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2088
2089 unsigned HOST_WIDE_INT const_vf;
2090 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2091 /* The main loop handles all iterations. */
2092 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2093 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2094 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2095 {
2096 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
2097 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
2098 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2099 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2100 }
2101 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2102 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2103 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2104 < (unsigned) exact_log2 (const_vf))
2105 /* In case of versioning, check if the maximum number of
2106 iterations is greater than th. If they are identical,
2107 the epilogue is unnecessary. */
2108 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2109 || ((unsigned HOST_WIDE_INT) max_niter
2110 > (th / const_vf) * const_vf))))
2111 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2112
2113 /* If an epilogue loop is required make sure we can create one. */
2114 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2115 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2116 {
2117 if (dump_enabled_p ())
2118 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2119 if (!vect_can_advance_ivs_p (loop_vinfo)
2120 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2121 single_exit (LOOP_VINFO_LOOP
2122 (loop_vinfo))))
2123 {
2124 if (dump_enabled_p ())
2125 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2126 "not vectorized: can't create required "
2127 "epilog loop\n");
2128 goto again;
2129 }
2130 }
2131
2132 /* During peeling, we need to check if number of loop iterations is
2133 enough for both peeled prolog loop and vector loop. This check
2134 can be merged along with threshold check of loop versioning, so
2135 increase threshold for this case if necessary. */
2136 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
2137 {
2138 poly_uint64 niters_th = 0;
2139
2140 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2141 {
2142 /* Niters for peeled prolog loop. */
2143 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2144 {
2145 dr_vec_info *dr_info
2146 = DR_VECT_AUX (LOOP_VINFO_UNALIGNED_DR (loop_vinfo));
2147 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2148 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2149 }
2150 else
2151 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2152 }
2153
2154 /* Niters for at least one iteration of vectorized loop. */
2155 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2156 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2157 /* One additional iteration because of peeling for gap. */
2158 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2159 niters_th += 1;
2160 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2161 }
2162
2163 gcc_assert (known_eq (vectorization_factor,
2164 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2165
2166 /* Ok to vectorize! */
2167 return true;
2168
2169 again:
2170 /* Try again with SLP forced off but if we didn't do any SLP there is
2171 no point in re-trying. */
2172 if (!slp)
2173 return false;
2174
2175 /* If there are reduction chains re-trying will fail anyway. */
2176 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2177 return false;
2178
2179 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2180 via interleaving or lane instructions. */
2181 slp_instance instance;
2182 slp_tree node;
2183 unsigned i, j;
2184 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2185 {
2186 stmt_vec_info vinfo;
2187 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2188 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2189 continue;
2190 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2191 unsigned int size = DR_GROUP_SIZE (vinfo);
2192 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2193 if (! vect_store_lanes_supported (vectype, size, false)
2194 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2195 && ! vect_grouped_store_supported (vectype, size))
2196 return false;
2197 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2198 {
2199 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2200 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2201 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2202 size = DR_GROUP_SIZE (vinfo);
2203 vectype = STMT_VINFO_VECTYPE (vinfo);
2204 if (! vect_load_lanes_supported (vectype, size, false)
2205 && ! vect_grouped_load_supported (vectype, single_element_p,
2206 size))
2207 return false;
2208 }
2209 }
2210
2211 if (dump_enabled_p ())
2212 dump_printf_loc (MSG_NOTE, vect_location,
2213 "re-trying with SLP disabled\n");
2214
2215 /* Roll back state appropriately. No SLP this time. */
2216 slp = false;
2217 /* Restore vectorization factor as it were without SLP. */
2218 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2219 /* Free the SLP instances. */
2220 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2221 vect_free_slp_instance (instance, false);
2222 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2223 /* Reset SLP type to loop_vect on all stmts. */
2224 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2225 {
2226 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2227 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2228 !gsi_end_p (si); gsi_next (&si))
2229 {
2230 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2231 STMT_SLP_TYPE (stmt_info) = loop_vect;
2232 }
2233 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2234 !gsi_end_p (si); gsi_next (&si))
2235 {
2236 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2237 STMT_SLP_TYPE (stmt_info) = loop_vect;
2238 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2239 {
2240 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2241 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2242 STMT_SLP_TYPE (stmt_info) = loop_vect;
2243 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2244 !gsi_end_p (pi); gsi_next (&pi))
2245 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2246 = loop_vect;
2247 }
2248 }
2249 }
2250 /* Free optimized alias test DDRS. */
2251 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2252 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2253 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2254 /* Reset target cost data. */
2255 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2256 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2257 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2258 /* Reset accumulated rgroup information. */
2259 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
2260 /* Reset assorted flags. */
2261 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2262 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2263 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2264 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2265 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
2266
2267 goto start_over;
2268 }
2269
2270 /* Function vect_analyze_loop.
2271
2272 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2273 for it. The different analyses will record information in the
2274 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2275 be vectorized. */
2276 loop_vec_info
2277 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
2278 vec_info_shared *shared)
2279 {
2280 loop_vec_info loop_vinfo;
2281 auto_vector_sizes vector_sizes;
2282
2283 /* Autodetect first vector size we try. */
2284 current_vector_size = 0;
2285 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
2286 unsigned int next_size = 0;
2287
2288 DUMP_VECT_SCOPE ("analyze_loop_nest");
2289
2290 if (loop_outer (loop)
2291 && loop_vec_info_for_loop (loop_outer (loop))
2292 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2293 {
2294 if (dump_enabled_p ())
2295 dump_printf_loc (MSG_NOTE, vect_location,
2296 "outer-loop already vectorized.\n");
2297 return NULL;
2298 }
2299
2300 if (!find_loop_nest (loop, &shared->loop_nest))
2301 {
2302 if (dump_enabled_p ())
2303 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2304 "not vectorized: loop nest containing two "
2305 "or more consecutive inner loops cannot be "
2306 "vectorized\n");
2307 return NULL;
2308 }
2309
2310 unsigned n_stmts = 0;
2311 poly_uint64 autodetected_vector_size = 0;
2312 while (1)
2313 {
2314 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2315 loop_vinfo = vect_analyze_loop_form (loop, shared);
2316 if (!loop_vinfo)
2317 {
2318 if (dump_enabled_p ())
2319 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2320 "bad loop form.\n");
2321 return NULL;
2322 }
2323
2324 bool fatal = false;
2325
2326 if (orig_loop_vinfo)
2327 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2328
2329 if (vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts))
2330 {
2331 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2332
2333 return loop_vinfo;
2334 }
2335
2336 delete loop_vinfo;
2337
2338 if (next_size == 0)
2339 autodetected_vector_size = current_vector_size;
2340
2341 if (next_size < vector_sizes.length ()
2342 && known_eq (vector_sizes[next_size], autodetected_vector_size))
2343 next_size += 1;
2344
2345 if (fatal
2346 || next_size == vector_sizes.length ()
2347 || known_eq (current_vector_size, 0U))
2348 return NULL;
2349
2350 /* Try the next biggest vector size. */
2351 current_vector_size = vector_sizes[next_size++];
2352 if (dump_enabled_p ())
2353 {
2354 dump_printf_loc (MSG_NOTE, vect_location,
2355 "***** Re-trying analysis with "
2356 "vector size ");
2357 dump_dec (MSG_NOTE, current_vector_size);
2358 dump_printf (MSG_NOTE, "\n");
2359 }
2360 }
2361 }
2362
2363 /* Return true if there is an in-order reduction function for CODE, storing
2364 it in *REDUC_FN if so. */
2365
2366 static bool
2367 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2368 {
2369 switch (code)
2370 {
2371 case PLUS_EXPR:
2372 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2373 return true;
2374
2375 default:
2376 return false;
2377 }
2378 }
2379
2380 /* Function reduction_fn_for_scalar_code
2381
2382 Input:
2383 CODE - tree_code of a reduction operations.
2384
2385 Output:
2386 REDUC_FN - the corresponding internal function to be used to reduce the
2387 vector of partial results into a single scalar result, or IFN_LAST
2388 if the operation is a supported reduction operation, but does not have
2389 such an internal function.
2390
2391 Return FALSE if CODE currently cannot be vectorized as reduction. */
2392
2393 static bool
2394 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2395 {
2396 switch (code)
2397 {
2398 case MAX_EXPR:
2399 *reduc_fn = IFN_REDUC_MAX;
2400 return true;
2401
2402 case MIN_EXPR:
2403 *reduc_fn = IFN_REDUC_MIN;
2404 return true;
2405
2406 case PLUS_EXPR:
2407 *reduc_fn = IFN_REDUC_PLUS;
2408 return true;
2409
2410 case BIT_AND_EXPR:
2411 *reduc_fn = IFN_REDUC_AND;
2412 return true;
2413
2414 case BIT_IOR_EXPR:
2415 *reduc_fn = IFN_REDUC_IOR;
2416 return true;
2417
2418 case BIT_XOR_EXPR:
2419 *reduc_fn = IFN_REDUC_XOR;
2420 return true;
2421
2422 case MULT_EXPR:
2423 case MINUS_EXPR:
2424 *reduc_fn = IFN_LAST;
2425 return true;
2426
2427 default:
2428 return false;
2429 }
2430 }
2431
2432 /* If there is a neutral value X such that SLP reduction NODE would not
2433 be affected by the introduction of additional X elements, return that X,
2434 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2435 is true if the SLP statements perform a single reduction, false if each
2436 statement performs an independent reduction. */
2437
2438 static tree
2439 neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
2440 bool reduc_chain)
2441 {
2442 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2443 stmt_vec_info stmt_vinfo = stmts[0];
2444 tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
2445 tree scalar_type = TREE_TYPE (vector_type);
2446 struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2447 gcc_assert (loop);
2448
2449 switch (code)
2450 {
2451 case WIDEN_SUM_EXPR:
2452 case DOT_PROD_EXPR:
2453 case SAD_EXPR:
2454 case PLUS_EXPR:
2455 case MINUS_EXPR:
2456 case BIT_IOR_EXPR:
2457 case BIT_XOR_EXPR:
2458 return build_zero_cst (scalar_type);
2459
2460 case MULT_EXPR:
2461 return build_one_cst (scalar_type);
2462
2463 case BIT_AND_EXPR:
2464 return build_all_ones_cst (scalar_type);
2465
2466 case MAX_EXPR:
2467 case MIN_EXPR:
2468 /* For MIN/MAX the initial values are neutral. A reduction chain
2469 has only a single initial value, so that value is neutral for
2470 all statements. */
2471 if (reduc_chain)
2472 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
2473 loop_preheader_edge (loop));
2474 return NULL_TREE;
2475
2476 default:
2477 return NULL_TREE;
2478 }
2479 }
2480
2481 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2482 STMT is printed with a message MSG. */
2483
2484 static void
2485 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2486 {
2487 dump_printf_loc (msg_type, vect_location, "%s", msg);
2488 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2489 }
2490
2491 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2492 operation. Return true if the results of DEF_STMT_INFO are something
2493 that can be accumulated by such a reduction. */
2494
2495 static bool
2496 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info)
2497 {
2498 return (is_gimple_assign (def_stmt_info->stmt)
2499 || is_gimple_call (def_stmt_info->stmt)
2500 || STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_induction_def
2501 || (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
2502 && STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_internal_def
2503 && !is_loop_header_bb_p (gimple_bb (def_stmt_info->stmt))));
2504 }
2505
2506 /* Detect SLP reduction of the form:
2507
2508 #a1 = phi <a5, a0>
2509 a2 = operation (a1)
2510 a3 = operation (a2)
2511 a4 = operation (a3)
2512 a5 = operation (a4)
2513
2514 #a = phi <a5>
2515
2516 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2517 FIRST_STMT is the first reduction stmt in the chain
2518 (a2 = operation (a1)).
2519
2520 Return TRUE if a reduction chain was detected. */
2521
2522 static bool
2523 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2524 gimple *first_stmt)
2525 {
2526 struct loop *loop = (gimple_bb (phi))->loop_father;
2527 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2528 enum tree_code code;
2529 gimple *loop_use_stmt = NULL;
2530 stmt_vec_info use_stmt_info, current_stmt_info = NULL;
2531 tree lhs;
2532 imm_use_iterator imm_iter;
2533 use_operand_p use_p;
2534 int nloop_uses, size = 0, n_out_of_loop_uses;
2535 bool found = false;
2536
2537 if (loop != vect_loop)
2538 return false;
2539
2540 lhs = PHI_RESULT (phi);
2541 code = gimple_assign_rhs_code (first_stmt);
2542 while (1)
2543 {
2544 nloop_uses = 0;
2545 n_out_of_loop_uses = 0;
2546 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2547 {
2548 gimple *use_stmt = USE_STMT (use_p);
2549 if (is_gimple_debug (use_stmt))
2550 continue;
2551
2552 /* Check if we got back to the reduction phi. */
2553 if (use_stmt == phi)
2554 {
2555 loop_use_stmt = use_stmt;
2556 found = true;
2557 break;
2558 }
2559
2560 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2561 {
2562 loop_use_stmt = use_stmt;
2563 nloop_uses++;
2564 }
2565 else
2566 n_out_of_loop_uses++;
2567
2568 /* There are can be either a single use in the loop or two uses in
2569 phi nodes. */
2570 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2571 return false;
2572 }
2573
2574 if (found)
2575 break;
2576
2577 /* We reached a statement with no loop uses. */
2578 if (nloop_uses == 0)
2579 return false;
2580
2581 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2582 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2583 return false;
2584
2585 if (!is_gimple_assign (loop_use_stmt)
2586 || code != gimple_assign_rhs_code (loop_use_stmt)
2587 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2588 return false;
2589
2590 /* Insert USE_STMT into reduction chain. */
2591 use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
2592 if (current_stmt_info)
2593 {
2594 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = use_stmt_info;
2595 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
2596 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2597 }
2598 else
2599 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = use_stmt_info;
2600
2601 lhs = gimple_assign_lhs (loop_use_stmt);
2602 current_stmt_info = use_stmt_info;
2603 size++;
2604 }
2605
2606 if (!found || loop_use_stmt != phi || size < 2)
2607 return false;
2608
2609 /* Swap the operands, if needed, to make the reduction operand be the second
2610 operand. */
2611 lhs = PHI_RESULT (phi);
2612 stmt_vec_info next_stmt_info = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2613 while (next_stmt_info)
2614 {
2615 gassign *next_stmt = as_a <gassign *> (next_stmt_info->stmt);
2616 if (gimple_assign_rhs2 (next_stmt) == lhs)
2617 {
2618 tree op = gimple_assign_rhs1 (next_stmt);
2619 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2620
2621 /* Check that the other def is either defined in the loop
2622 ("vect_internal_def"), or it's an induction (defined by a
2623 loop-header phi-node). */
2624 if (def_stmt_info
2625 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2626 && vect_valid_reduction_input_p (def_stmt_info))
2627 {
2628 lhs = gimple_assign_lhs (next_stmt);
2629 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2630 continue;
2631 }
2632
2633 return false;
2634 }
2635 else
2636 {
2637 tree op = gimple_assign_rhs2 (next_stmt);
2638 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2639
2640 /* Check that the other def is either defined in the loop
2641 ("vect_internal_def"), or it's an induction (defined by a
2642 loop-header phi-node). */
2643 if (def_stmt_info
2644 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2645 && vect_valid_reduction_input_p (def_stmt_info))
2646 {
2647 if (dump_enabled_p ())
2648 {
2649 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2650 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2651 }
2652
2653 swap_ssa_operands (next_stmt,
2654 gimple_assign_rhs1_ptr (next_stmt),
2655 gimple_assign_rhs2_ptr (next_stmt));
2656 update_stmt (next_stmt);
2657
2658 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2659 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2660 }
2661 else
2662 return false;
2663 }
2664
2665 lhs = gimple_assign_lhs (next_stmt);
2666 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2667 }
2668
2669 /* Save the chain for further analysis in SLP detection. */
2670 stmt_vec_info first_stmt_info
2671 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2672 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first_stmt_info);
2673 REDUC_GROUP_SIZE (first_stmt_info) = size;
2674
2675 return true;
2676 }
2677
2678 /* Return true if we need an in-order reduction for operation CODE
2679 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2680 overflow must wrap. */
2681
2682 static bool
2683 needs_fold_left_reduction_p (tree type, tree_code code,
2684 bool need_wrapping_integral_overflow)
2685 {
2686 /* CHECKME: check for !flag_finite_math_only too? */
2687 if (SCALAR_FLOAT_TYPE_P (type))
2688 switch (code)
2689 {
2690 case MIN_EXPR:
2691 case MAX_EXPR:
2692 return false;
2693
2694 default:
2695 return !flag_associative_math;
2696 }
2697
2698 if (INTEGRAL_TYPE_P (type))
2699 {
2700 if (!operation_no_trapping_overflow (type, code))
2701 return true;
2702 if (need_wrapping_integral_overflow
2703 && !TYPE_OVERFLOW_WRAPS (type)
2704 && operation_can_overflow (code))
2705 return true;
2706 return false;
2707 }
2708
2709 if (SAT_FIXED_POINT_TYPE_P (type))
2710 return true;
2711
2712 return false;
2713 }
2714
2715 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2716 reduction operation CODE has a handled computation expression. */
2717
2718 bool
2719 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
2720 tree loop_arg, enum tree_code code)
2721 {
2722 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
2723 auto_bitmap visited;
2724 tree lookfor = PHI_RESULT (phi);
2725 ssa_op_iter curri;
2726 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
2727 while (USE_FROM_PTR (curr) != loop_arg)
2728 curr = op_iter_next_use (&curri);
2729 curri.i = curri.numops;
2730 do
2731 {
2732 path.safe_push (std::make_pair (curri, curr));
2733 tree use = USE_FROM_PTR (curr);
2734 if (use == lookfor)
2735 break;
2736 gimple *def = SSA_NAME_DEF_STMT (use);
2737 if (gimple_nop_p (def)
2738 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
2739 {
2740 pop:
2741 do
2742 {
2743 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
2744 curri = x.first;
2745 curr = x.second;
2746 do
2747 curr = op_iter_next_use (&curri);
2748 /* Skip already visited or non-SSA operands (from iterating
2749 over PHI args). */
2750 while (curr != NULL_USE_OPERAND_P
2751 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2752 || ! bitmap_set_bit (visited,
2753 SSA_NAME_VERSION
2754 (USE_FROM_PTR (curr)))));
2755 }
2756 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
2757 if (curr == NULL_USE_OPERAND_P)
2758 break;
2759 }
2760 else
2761 {
2762 if (gimple_code (def) == GIMPLE_PHI)
2763 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
2764 else
2765 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
2766 while (curr != NULL_USE_OPERAND_P
2767 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2768 || ! bitmap_set_bit (visited,
2769 SSA_NAME_VERSION
2770 (USE_FROM_PTR (curr)))))
2771 curr = op_iter_next_use (&curri);
2772 if (curr == NULL_USE_OPERAND_P)
2773 goto pop;
2774 }
2775 }
2776 while (1);
2777 if (dump_file && (dump_flags & TDF_DETAILS))
2778 {
2779 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
2780 unsigned i;
2781 std::pair<ssa_op_iter, use_operand_p> *x;
2782 FOR_EACH_VEC_ELT (path, i, x)
2783 {
2784 dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
2785 dump_printf (MSG_NOTE, " ");
2786 }
2787 dump_printf (MSG_NOTE, "\n");
2788 }
2789
2790 /* Check whether the reduction path detected is valid. */
2791 bool fail = path.length () == 0;
2792 bool neg = false;
2793 for (unsigned i = 1; i < path.length (); ++i)
2794 {
2795 gimple *use_stmt = USE_STMT (path[i].second);
2796 tree op = USE_FROM_PTR (path[i].second);
2797 if (! has_single_use (op)
2798 || ! is_gimple_assign (use_stmt))
2799 {
2800 fail = true;
2801 break;
2802 }
2803 if (gimple_assign_rhs_code (use_stmt) != code)
2804 {
2805 if (code == PLUS_EXPR
2806 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2807 {
2808 /* Track whether we negate the reduction value each iteration. */
2809 if (gimple_assign_rhs2 (use_stmt) == op)
2810 neg = ! neg;
2811 }
2812 else
2813 {
2814 fail = true;
2815 break;
2816 }
2817 }
2818 }
2819 return ! fail && ! neg;
2820 }
2821
2822
2823 /* Function vect_is_simple_reduction
2824
2825 (1) Detect a cross-iteration def-use cycle that represents a simple
2826 reduction computation. We look for the following pattern:
2827
2828 loop_header:
2829 a1 = phi < a0, a2 >
2830 a3 = ...
2831 a2 = operation (a3, a1)
2832
2833 or
2834
2835 a3 = ...
2836 loop_header:
2837 a1 = phi < a0, a2 >
2838 a2 = operation (a3, a1)
2839
2840 such that:
2841 1. operation is commutative and associative and it is safe to
2842 change the order of the computation
2843 2. no uses for a2 in the loop (a2 is used out of the loop)
2844 3. no uses of a1 in the loop besides the reduction operation
2845 4. no uses of a1 outside the loop.
2846
2847 Conditions 1,4 are tested here.
2848 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2849
2850 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2851 nested cycles.
2852
2853 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2854 reductions:
2855
2856 a1 = phi < a0, a2 >
2857 inner loop (def of a3)
2858 a2 = phi < a3 >
2859
2860 (4) Detect condition expressions, ie:
2861 for (int i = 0; i < N; i++)
2862 if (a[i] < val)
2863 ret_val = a[i];
2864
2865 */
2866
2867 static stmt_vec_info
2868 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
2869 bool *double_reduc,
2870 bool need_wrapping_integral_overflow,
2871 enum vect_reduction_type *v_reduc_type)
2872 {
2873 gphi *phi = as_a <gphi *> (phi_info->stmt);
2874 struct loop *loop = (gimple_bb (phi))->loop_father;
2875 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2876 gimple *phi_use_stmt = NULL;
2877 enum tree_code orig_code, code;
2878 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2879 tree type;
2880 int nloop_uses;
2881 tree name;
2882 imm_use_iterator imm_iter;
2883 use_operand_p use_p;
2884 bool phi_def;
2885
2886 *double_reduc = false;
2887 *v_reduc_type = TREE_CODE_REDUCTION;
2888
2889 tree phi_name = PHI_RESULT (phi);
2890 /* ??? If there are no uses of the PHI result the inner loop reduction
2891 won't be detected as possibly double-reduction by vectorizable_reduction
2892 because that tries to walk the PHI arg from the preheader edge which
2893 can be constant. See PR60382. */
2894 if (has_zero_uses (phi_name))
2895 return NULL;
2896 nloop_uses = 0;
2897 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
2898 {
2899 gimple *use_stmt = USE_STMT (use_p);
2900 if (is_gimple_debug (use_stmt))
2901 continue;
2902
2903 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2904 {
2905 if (dump_enabled_p ())
2906 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2907 "intermediate value used outside loop.\n");
2908
2909 return NULL;
2910 }
2911
2912 nloop_uses++;
2913 if (nloop_uses > 1)
2914 {
2915 if (dump_enabled_p ())
2916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2917 "reduction value used in loop.\n");
2918 return NULL;
2919 }
2920
2921 phi_use_stmt = use_stmt;
2922 }
2923
2924 edge latch_e = loop_latch_edge (loop);
2925 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2926 if (TREE_CODE (loop_arg) != SSA_NAME)
2927 {
2928 if (dump_enabled_p ())
2929 {
2930 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2931 "reduction: not ssa_name: ");
2932 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2933 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2934 }
2935 return NULL;
2936 }
2937
2938 stmt_vec_info def_stmt_info = loop_info->lookup_def (loop_arg);
2939 if (!def_stmt_info)
2940 return NULL;
2941
2942 if (gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt))
2943 {
2944 name = gimple_assign_lhs (def_stmt);
2945 phi_def = false;
2946 }
2947 else if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
2948 {
2949 name = PHI_RESULT (def_stmt);
2950 phi_def = true;
2951 }
2952 else
2953 {
2954 if (dump_enabled_p ())
2955 {
2956 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2957 "reduction: unhandled reduction operation: ");
2958 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2959 def_stmt_info->stmt, 0);
2960 }
2961 return NULL;
2962 }
2963
2964 nloop_uses = 0;
2965 auto_vec<gphi *, 3> lcphis;
2966 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2967 {
2968 gimple *use_stmt = USE_STMT (use_p);
2969 if (is_gimple_debug (use_stmt))
2970 continue;
2971 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2972 nloop_uses++;
2973 else
2974 /* We can have more than one loop-closed PHI. */
2975 lcphis.safe_push (as_a <gphi *> (use_stmt));
2976 if (nloop_uses > 1)
2977 {
2978 if (dump_enabled_p ())
2979 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2980 "reduction used in loop.\n");
2981 return NULL;
2982 }
2983 }
2984
2985 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2986 defined in the inner loop. */
2987 if (phi_def)
2988 {
2989 gphi *def_stmt = as_a <gphi *> (def_stmt_info->stmt);
2990 op1 = PHI_ARG_DEF (def_stmt, 0);
2991
2992 if (gimple_phi_num_args (def_stmt) != 1
2993 || TREE_CODE (op1) != SSA_NAME)
2994 {
2995 if (dump_enabled_p ())
2996 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2997 "unsupported phi node definition.\n");
2998
2999 return NULL;
3000 }
3001
3002 gimple *def1 = SSA_NAME_DEF_STMT (op1);
3003 if (gimple_bb (def1)
3004 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3005 && loop->inner
3006 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
3007 && is_gimple_assign (def1)
3008 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
3009 {
3010 if (dump_enabled_p ())
3011 report_vect_op (MSG_NOTE, def_stmt,
3012 "detected double reduction: ");
3013
3014 *double_reduc = true;
3015 return def_stmt_info;
3016 }
3017
3018 return NULL;
3019 }
3020
3021 /* If we are vectorizing an inner reduction we are executing that
3022 in the original order only in case we are not dealing with a
3023 double reduction. */
3024 bool check_reduction = true;
3025 if (flow_loop_nested_p (vect_loop, loop))
3026 {
3027 gphi *lcphi;
3028 unsigned i;
3029 check_reduction = false;
3030 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
3031 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
3032 {
3033 gimple *use_stmt = USE_STMT (use_p);
3034 if (is_gimple_debug (use_stmt))
3035 continue;
3036 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
3037 check_reduction = true;
3038 }
3039 }
3040
3041 gassign *def_stmt = as_a <gassign *> (def_stmt_info->stmt);
3042 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
3043 code = orig_code = gimple_assign_rhs_code (def_stmt);
3044
3045 /* We can handle "res -= x[i]", which is non-associative by
3046 simply rewriting this into "res += -x[i]". Avoid changing
3047 gimple instruction for the first simple tests and only do this
3048 if we're allowed to change code at all. */
3049 if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
3050 code = PLUS_EXPR;
3051
3052 if (code == COND_EXPR)
3053 {
3054 if (! nested_in_vect_loop)
3055 *v_reduc_type = COND_REDUCTION;
3056
3057 op3 = gimple_assign_rhs1 (def_stmt);
3058 if (COMPARISON_CLASS_P (op3))
3059 {
3060 op4 = TREE_OPERAND (op3, 1);
3061 op3 = TREE_OPERAND (op3, 0);
3062 }
3063 if (op3 == phi_name || op4 == phi_name)
3064 {
3065 if (dump_enabled_p ())
3066 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3067 "reduction: condition depends on previous"
3068 " iteration: ");
3069 return NULL;
3070 }
3071
3072 op1 = gimple_assign_rhs2 (def_stmt);
3073 op2 = gimple_assign_rhs3 (def_stmt);
3074 }
3075 else if (!commutative_tree_code (code) || !associative_tree_code (code))
3076 {
3077 if (dump_enabled_p ())
3078 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3079 "reduction: not commutative/associative: ");
3080 return NULL;
3081 }
3082 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
3083 {
3084 op1 = gimple_assign_rhs1 (def_stmt);
3085 op2 = gimple_assign_rhs2 (def_stmt);
3086 }
3087 else
3088 {
3089 if (dump_enabled_p ())
3090 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3091 "reduction: not handled operation: ");
3092 return NULL;
3093 }
3094
3095 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
3096 {
3097 if (dump_enabled_p ())
3098 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3099 "reduction: both uses not ssa_names: ");
3100
3101 return NULL;
3102 }
3103
3104 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
3105 if ((TREE_CODE (op1) == SSA_NAME
3106 && !types_compatible_p (type,TREE_TYPE (op1)))
3107 || (TREE_CODE (op2) == SSA_NAME
3108 && !types_compatible_p (type, TREE_TYPE (op2)))
3109 || (op3 && TREE_CODE (op3) == SSA_NAME
3110 && !types_compatible_p (type, TREE_TYPE (op3)))
3111 || (op4 && TREE_CODE (op4) == SSA_NAME
3112 && !types_compatible_p (type, TREE_TYPE (op4))))
3113 {
3114 if (dump_enabled_p ())
3115 {
3116 dump_printf_loc (MSG_NOTE, vect_location,
3117 "reduction: multiple types: operation type: ");
3118 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
3119 dump_printf (MSG_NOTE, ", operands types: ");
3120 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3121 TREE_TYPE (op1));
3122 dump_printf (MSG_NOTE, ",");
3123 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3124 TREE_TYPE (op2));
3125 if (op3)
3126 {
3127 dump_printf (MSG_NOTE, ",");
3128 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3129 TREE_TYPE (op3));
3130 }
3131
3132 if (op4)
3133 {
3134 dump_printf (MSG_NOTE, ",");
3135 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3136 TREE_TYPE (op4));
3137 }
3138 dump_printf (MSG_NOTE, "\n");
3139 }
3140
3141 return NULL;
3142 }
3143
3144 /* Check whether it's ok to change the order of the computation.
3145 Generally, when vectorizing a reduction we change the order of the
3146 computation. This may change the behavior of the program in some
3147 cases, so we need to check that this is ok. One exception is when
3148 vectorizing an outer-loop: the inner-loop is executed sequentially,
3149 and therefore vectorizing reductions in the inner-loop during
3150 outer-loop vectorization is safe. */
3151 if (check_reduction
3152 && *v_reduc_type == TREE_CODE_REDUCTION
3153 && needs_fold_left_reduction_p (type, code,
3154 need_wrapping_integral_overflow))
3155 *v_reduc_type = FOLD_LEFT_REDUCTION;
3156
3157 /* Reduction is safe. We're dealing with one of the following:
3158 1) integer arithmetic and no trapv
3159 2) floating point arithmetic, and special flags permit this optimization
3160 3) nested cycle (i.e., outer loop vectorization). */
3161 stmt_vec_info def1_info = loop_info->lookup_def (op1);
3162 stmt_vec_info def2_info = loop_info->lookup_def (op2);
3163 if (code != COND_EXPR && !def1_info && !def2_info)
3164 {
3165 if (dump_enabled_p ())
3166 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3167 return NULL;
3168 }
3169
3170 /* Check that one def is the reduction def, defined by PHI,
3171 the other def is either defined in the loop ("vect_internal_def"),
3172 or it's an induction (defined by a loop-header phi-node). */
3173
3174 if (def2_info
3175 && def2_info->stmt == phi
3176 && (code == COND_EXPR
3177 || !def1_info
3178 || vect_valid_reduction_input_p (def1_info)))
3179 {
3180 if (dump_enabled_p ())
3181 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3182 return def_stmt_info;
3183 }
3184
3185 if (def1_info
3186 && def1_info->stmt == phi
3187 && (code == COND_EXPR
3188 || !def2_info
3189 || vect_valid_reduction_input_p (def2_info)))
3190 {
3191 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3192 {
3193 /* Check if we can swap operands (just for simplicity - so that
3194 the rest of the code can assume that the reduction variable
3195 is always the last (second) argument). */
3196 if (code == COND_EXPR)
3197 {
3198 /* Swap cond_expr by inverting the condition. */
3199 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3200 enum tree_code invert_code = ERROR_MARK;
3201 enum tree_code cond_code = TREE_CODE (cond_expr);
3202
3203 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3204 {
3205 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3206 invert_code = invert_tree_comparison (cond_code, honor_nans);
3207 }
3208 if (invert_code != ERROR_MARK)
3209 {
3210 TREE_SET_CODE (cond_expr, invert_code);
3211 swap_ssa_operands (def_stmt,
3212 gimple_assign_rhs2_ptr (def_stmt),
3213 gimple_assign_rhs3_ptr (def_stmt));
3214 }
3215 else
3216 {
3217 if (dump_enabled_p ())
3218 report_vect_op (MSG_NOTE, def_stmt,
3219 "detected reduction: cannot swap operands "
3220 "for cond_expr");
3221 return NULL;
3222 }
3223 }
3224 else
3225 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3226 gimple_assign_rhs2_ptr (def_stmt));
3227
3228 if (dump_enabled_p ())
3229 report_vect_op (MSG_NOTE, def_stmt,
3230 "detected reduction: need to swap operands: ");
3231
3232 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3233 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3234 }
3235 else
3236 {
3237 if (dump_enabled_p ())
3238 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3239 }
3240
3241 return def_stmt_info;
3242 }
3243
3244 /* Try to find SLP reduction chain. */
3245 if (! nested_in_vect_loop
3246 && code != COND_EXPR
3247 && orig_code != MINUS_EXPR
3248 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3249 {
3250 if (dump_enabled_p ())
3251 report_vect_op (MSG_NOTE, def_stmt,
3252 "reduction: detected reduction chain: ");
3253
3254 return def_stmt_info;
3255 }
3256
3257 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3258 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (def_stmt_info);
3259 while (first)
3260 {
3261 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
3262 REDUC_GROUP_FIRST_ELEMENT (first) = NULL;
3263 REDUC_GROUP_NEXT_ELEMENT (first) = NULL;
3264 first = next;
3265 }
3266
3267 /* Look for the expression computing loop_arg from loop PHI result. */
3268 if (check_reduction_path (vect_location, loop, phi, loop_arg, code))
3269 return def_stmt_info;
3270
3271 if (dump_enabled_p ())
3272 {
3273 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3274 "reduction: unknown pattern: ");
3275 }
3276
3277 return NULL;
3278 }
3279
3280 /* Wrapper around vect_is_simple_reduction, which will modify code
3281 in-place if it enables detection of more reductions. Arguments
3282 as there. */
3283
3284 stmt_vec_info
3285 vect_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3286 bool *double_reduc,
3287 bool need_wrapping_integral_overflow)
3288 {
3289 enum vect_reduction_type v_reduc_type;
3290 stmt_vec_info def_info
3291 = vect_is_simple_reduction (loop_info, phi_info, double_reduc,
3292 need_wrapping_integral_overflow,
3293 &v_reduc_type);
3294 if (def_info)
3295 {
3296 STMT_VINFO_REDUC_TYPE (phi_info) = v_reduc_type;
3297 STMT_VINFO_REDUC_DEF (phi_info) = def_info;
3298 STMT_VINFO_REDUC_TYPE (def_info) = v_reduc_type;
3299 STMT_VINFO_REDUC_DEF (def_info) = phi_info;
3300 }
3301 return def_info;
3302 }
3303
3304 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3305 int
3306 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3307 int *peel_iters_epilogue,
3308 stmt_vector_for_cost *scalar_cost_vec,
3309 stmt_vector_for_cost *prologue_cost_vec,
3310 stmt_vector_for_cost *epilogue_cost_vec)
3311 {
3312 int retval = 0;
3313 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3314
3315 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3316 {
3317 *peel_iters_epilogue = assumed_vf / 2;
3318 if (dump_enabled_p ())
3319 dump_printf_loc (MSG_NOTE, vect_location,
3320 "cost model: epilogue peel iters set to vf/2 "
3321 "because loop iterations are unknown .\n");
3322
3323 /* If peeled iterations are known but number of scalar loop
3324 iterations are unknown, count a taken branch per peeled loop. */
3325 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3326 NULL, 0, vect_prologue);
3327 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3328 NULL, 0, vect_epilogue);
3329 }
3330 else
3331 {
3332 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3333 peel_iters_prologue = niters < peel_iters_prologue ?
3334 niters : peel_iters_prologue;
3335 *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3336 /* If we need to peel for gaps, but no peeling is required, we have to
3337 peel VF iterations. */
3338 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3339 *peel_iters_epilogue = assumed_vf;
3340 }
3341
3342 stmt_info_for_cost *si;
3343 int j;
3344 if (peel_iters_prologue)
3345 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3346 retval += record_stmt_cost (prologue_cost_vec,
3347 si->count * peel_iters_prologue,
3348 si->kind, si->stmt_info, si->misalign,
3349 vect_prologue);
3350 if (*peel_iters_epilogue)
3351 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3352 retval += record_stmt_cost (epilogue_cost_vec,
3353 si->count * *peel_iters_epilogue,
3354 si->kind, si->stmt_info, si->misalign,
3355 vect_epilogue);
3356
3357 return retval;
3358 }
3359
3360 /* Function vect_estimate_min_profitable_iters
3361
3362 Return the number of iterations required for the vector version of the
3363 loop to be profitable relative to the cost of the scalar version of the
3364 loop.
3365
3366 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3367 of iterations for vectorization. -1 value means loop vectorization
3368 is not profitable. This returned value may be used for dynamic
3369 profitability check.
3370
3371 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3372 for static check against estimated number of iterations. */
3373
3374 static void
3375 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3376 int *ret_min_profitable_niters,
3377 int *ret_min_profitable_estimate)
3378 {
3379 int min_profitable_iters;
3380 int min_profitable_estimate;
3381 int peel_iters_prologue;
3382 int peel_iters_epilogue;
3383 unsigned vec_inside_cost = 0;
3384 int vec_outside_cost = 0;
3385 unsigned vec_prologue_cost = 0;
3386 unsigned vec_epilogue_cost = 0;
3387 int scalar_single_iter_cost = 0;
3388 int scalar_outside_cost = 0;
3389 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3390 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3391 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3392
3393 /* Cost model disabled. */
3394 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3395 {
3396 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3397 *ret_min_profitable_niters = 0;
3398 *ret_min_profitable_estimate = 0;
3399 return;
3400 }
3401
3402 /* Requires loop versioning tests to handle misalignment. */
3403 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3404 {
3405 /* FIXME: Make cost depend on complexity of individual check. */
3406 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3407 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3408 vect_prologue);
3409 dump_printf (MSG_NOTE,
3410 "cost model: Adding cost of checks for loop "
3411 "versioning to treat misalignment.\n");
3412 }
3413
3414 /* Requires loop versioning with alias checks. */
3415 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3416 {
3417 /* FIXME: Make cost depend on complexity of individual check. */
3418 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3419 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3420 vect_prologue);
3421 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3422 if (len)
3423 /* Count LEN - 1 ANDs and LEN comparisons. */
3424 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3425 NULL, 0, vect_prologue);
3426 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3427 if (len)
3428 {
3429 /* Count LEN - 1 ANDs and LEN comparisons. */
3430 unsigned int nstmts = len * 2 - 1;
3431 /* +1 for each bias that needs adding. */
3432 for (unsigned int i = 0; i < len; ++i)
3433 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3434 nstmts += 1;
3435 (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
3436 NULL, 0, vect_prologue);
3437 }
3438 dump_printf (MSG_NOTE,
3439 "cost model: Adding cost of checks for loop "
3440 "versioning aliasing.\n");
3441 }
3442
3443 /* Requires loop versioning with niter checks. */
3444 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3445 {
3446 /* FIXME: Make cost depend on complexity of individual check. */
3447 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3448 vect_prologue);
3449 dump_printf (MSG_NOTE,
3450 "cost model: Adding cost of checks for loop "
3451 "versioning niters.\n");
3452 }
3453
3454 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3455 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3456 vect_prologue);
3457
3458 /* Count statements in scalar loop. Using this as scalar cost for a single
3459 iteration for now.
3460
3461 TODO: Add outer loop support.
3462
3463 TODO: Consider assigning different costs to different scalar
3464 statements. */
3465
3466 scalar_single_iter_cost
3467 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3468
3469 /* Add additional cost for the peeled instructions in prologue and epilogue
3470 loop. (For fully-masked loops there will be no peeling.)
3471
3472 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3473 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3474
3475 TODO: Build an expression that represents peel_iters for prologue and
3476 epilogue to be used in a run-time test. */
3477
3478 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3479 {
3480 peel_iters_prologue = 0;
3481 peel_iters_epilogue = 0;
3482
3483 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
3484 {
3485 /* We need to peel exactly one iteration. */
3486 peel_iters_epilogue += 1;
3487 stmt_info_for_cost *si;
3488 int j;
3489 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
3490 j, si)
3491 (void) add_stmt_cost (target_cost_data, si->count,
3492 si->kind, si->stmt_info, si->misalign,
3493 vect_epilogue);
3494 }
3495 }
3496 else if (npeel < 0)
3497 {
3498 peel_iters_prologue = assumed_vf / 2;
3499 dump_printf (MSG_NOTE, "cost model: "
3500 "prologue peel iters set to vf/2.\n");
3501
3502 /* If peeling for alignment is unknown, loop bound of main loop becomes
3503 unknown. */
3504 peel_iters_epilogue = assumed_vf / 2;
3505 dump_printf (MSG_NOTE, "cost model: "
3506 "epilogue peel iters set to vf/2 because "
3507 "peeling for alignment is unknown.\n");
3508
3509 /* If peeled iterations are unknown, count a taken branch and a not taken
3510 branch per peeled loop. Even if scalar loop iterations are known,
3511 vector iterations are not known since peeled prologue iterations are
3512 not known. Hence guards remain the same. */
3513 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3514 NULL, 0, vect_prologue);
3515 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3516 NULL, 0, vect_prologue);
3517 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3518 NULL, 0, vect_epilogue);
3519 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3520 NULL, 0, vect_epilogue);
3521 stmt_info_for_cost *si;
3522 int j;
3523 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3524 {
3525 (void) add_stmt_cost (target_cost_data,
3526 si->count * peel_iters_prologue,
3527 si->kind, si->stmt_info, si->misalign,
3528 vect_prologue);
3529 (void) add_stmt_cost (target_cost_data,
3530 si->count * peel_iters_epilogue,
3531 si->kind, si->stmt_info, si->misalign,
3532 vect_epilogue);
3533 }
3534 }
3535 else
3536 {
3537 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3538 stmt_info_for_cost *si;
3539 int j;
3540 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3541
3542 prologue_cost_vec.create (2);
3543 epilogue_cost_vec.create (2);
3544 peel_iters_prologue = npeel;
3545
3546 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3547 &peel_iters_epilogue,
3548 &LOOP_VINFO_SCALAR_ITERATION_COST
3549 (loop_vinfo),
3550 &prologue_cost_vec,
3551 &epilogue_cost_vec);
3552
3553 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3554 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3555 si->misalign, vect_prologue);
3556
3557 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3558 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3559 si->misalign, vect_epilogue);
3560
3561 prologue_cost_vec.release ();
3562 epilogue_cost_vec.release ();
3563 }
3564
3565 /* FORNOW: The scalar outside cost is incremented in one of the
3566 following ways:
3567
3568 1. The vectorizer checks for alignment and aliasing and generates
3569 a condition that allows dynamic vectorization. A cost model
3570 check is ANDED with the versioning condition. Hence scalar code
3571 path now has the added cost of the versioning check.
3572
3573 if (cost > th & versioning_check)
3574 jmp to vector code
3575
3576 Hence run-time scalar is incremented by not-taken branch cost.
3577
3578 2. The vectorizer then checks if a prologue is required. If the
3579 cost model check was not done before during versioning, it has to
3580 be done before the prologue check.
3581
3582 if (cost <= th)
3583 prologue = scalar_iters
3584 if (prologue == 0)
3585 jmp to vector code
3586 else
3587 execute prologue
3588 if (prologue == num_iters)
3589 go to exit
3590
3591 Hence the run-time scalar cost is incremented by a taken branch,
3592 plus a not-taken branch, plus a taken branch cost.
3593
3594 3. The vectorizer then checks if an epilogue is required. If the
3595 cost model check was not done before during prologue check, it
3596 has to be done with the epilogue check.
3597
3598 if (prologue == 0)
3599 jmp to vector code
3600 else
3601 execute prologue
3602 if (prologue == num_iters)
3603 go to exit
3604 vector code:
3605 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3606 jmp to epilogue
3607
3608 Hence the run-time scalar cost should be incremented by 2 taken
3609 branches.
3610
3611 TODO: The back end may reorder the BBS's differently and reverse
3612 conditions/branch directions. Change the estimates below to
3613 something more reasonable. */
3614
3615 /* If the number of iterations is known and we do not do versioning, we can
3616 decide whether to vectorize at compile time. Hence the scalar version
3617 do not carry cost model guard costs. */
3618 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3619 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3620 {
3621 /* Cost model check occurs at versioning. */
3622 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3623 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3624 else
3625 {
3626 /* Cost model check occurs at prologue generation. */
3627 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3628 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3629 + vect_get_stmt_cost (cond_branch_not_taken);
3630 /* Cost model check occurs at epilogue generation. */
3631 else
3632 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3633 }
3634 }
3635
3636 /* Complete the target-specific cost calculations. */
3637 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3638 &vec_inside_cost, &vec_epilogue_cost);
3639
3640 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3641
3642 if (dump_enabled_p ())
3643 {
3644 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3645 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3646 vec_inside_cost);
3647 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3648 vec_prologue_cost);
3649 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3650 vec_epilogue_cost);
3651 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3652 scalar_single_iter_cost);
3653 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3654 scalar_outside_cost);
3655 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3656 vec_outside_cost);
3657 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3658 peel_iters_prologue);
3659 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3660 peel_iters_epilogue);
3661 }
3662
3663 /* Calculate number of iterations required to make the vector version
3664 profitable, relative to the loop bodies only. The following condition
3665 must hold true:
3666 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3667 where
3668 SIC = scalar iteration cost, VIC = vector iteration cost,
3669 VOC = vector outside cost, VF = vectorization factor,
3670 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3671 SOC = scalar outside cost for run time cost model check. */
3672
3673 if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
3674 {
3675 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
3676 * assumed_vf
3677 - vec_inside_cost * peel_iters_prologue
3678 - vec_inside_cost * peel_iters_epilogue);
3679 if (min_profitable_iters <= 0)
3680 min_profitable_iters = 0;
3681 else
3682 {
3683 min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
3684 - vec_inside_cost);
3685
3686 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
3687 <= (((int) vec_inside_cost * min_profitable_iters)
3688 + (((int) vec_outside_cost - scalar_outside_cost)
3689 * assumed_vf)))
3690 min_profitable_iters++;
3691 }
3692 }
3693 /* vector version will never be profitable. */
3694 else
3695 {
3696 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3697 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3698 "vectorization did not happen for a simd loop");
3699
3700 if (dump_enabled_p ())
3701 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3702 "cost model: the vector iteration cost = %d "
3703 "divided by the scalar iteration cost = %d "
3704 "is greater or equal to the vectorization factor = %d"
3705 ".\n",
3706 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3707 *ret_min_profitable_niters = -1;
3708 *ret_min_profitable_estimate = -1;
3709 return;
3710 }
3711
3712 dump_printf (MSG_NOTE,
3713 " Calculated minimum iters for profitability: %d\n",
3714 min_profitable_iters);
3715
3716 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
3717 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
3718 /* We want the vectorized loop to execute at least once. */
3719 min_profitable_iters = assumed_vf + peel_iters_prologue;
3720
3721 if (dump_enabled_p ())
3722 dump_printf_loc (MSG_NOTE, vect_location,
3723 " Runtime profitability threshold = %d\n",
3724 min_profitable_iters);
3725
3726 *ret_min_profitable_niters = min_profitable_iters;
3727
3728 /* Calculate number of iterations required to make the vector version
3729 profitable, relative to the loop bodies only.
3730
3731 Non-vectorized variant is SIC * niters and it must win over vector
3732 variant on the expected loop trip count. The following condition must hold true:
3733 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3734
3735 if (vec_outside_cost <= 0)
3736 min_profitable_estimate = 0;
3737 else
3738 {
3739 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
3740 * assumed_vf
3741 - vec_inside_cost * peel_iters_prologue
3742 - vec_inside_cost * peel_iters_epilogue)
3743 / ((scalar_single_iter_cost * assumed_vf)
3744 - vec_inside_cost);
3745 }
3746 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3747 if (dump_enabled_p ())
3748 dump_printf_loc (MSG_NOTE, vect_location,
3749 " Static estimate profitability threshold = %d\n",
3750 min_profitable_estimate);
3751
3752 *ret_min_profitable_estimate = min_profitable_estimate;
3753 }
3754
3755 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3756 vector elements (not bits) for a vector with NELT elements. */
3757 static void
3758 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
3759 vec_perm_builder *sel)
3760 {
3761 /* The encoding is a single stepped pattern. Any wrap-around is handled
3762 by vec_perm_indices. */
3763 sel->new_vector (nelt, 1, 3);
3764 for (unsigned int i = 0; i < 3; i++)
3765 sel->quick_push (i + offset);
3766 }
3767
3768 /* Checks whether the target supports whole-vector shifts for vectors of mode
3769 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3770 it supports vec_perm_const with masks for all necessary shift amounts. */
3771 static bool
3772 have_whole_vector_shift (machine_mode mode)
3773 {
3774 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3775 return true;
3776
3777 /* Variable-length vectors should be handled via the optab. */
3778 unsigned int nelt;
3779 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
3780 return false;
3781
3782 vec_perm_builder sel;
3783 vec_perm_indices indices;
3784 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
3785 {
3786 calc_vec_perm_mask_for_shift (i, nelt, &sel);
3787 indices.new_vector (sel, 2, nelt);
3788 if (!can_vec_perm_const_p (mode, indices, false))
3789 return false;
3790 }
3791 return true;
3792 }
3793
3794 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3795 functions. Design better to avoid maintenance issues. */
3796
3797 /* Function vect_model_reduction_cost.
3798
3799 Models cost for a reduction operation, including the vector ops
3800 generated within the strip-mine loop, the initial definition before
3801 the loop, and the epilogue code that must be generated. */
3802
3803 static void
3804 vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
3805 int ncopies, stmt_vector_for_cost *cost_vec)
3806 {
3807 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
3808 enum tree_code code;
3809 optab optab;
3810 tree vectype;
3811 machine_mode mode;
3812 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3813 struct loop *loop = NULL;
3814
3815 if (loop_vinfo)
3816 loop = LOOP_VINFO_LOOP (loop_vinfo);
3817
3818 /* Condition reductions generate two reductions in the loop. */
3819 vect_reduction_type reduction_type
3820 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
3821 if (reduction_type == COND_REDUCTION)
3822 ncopies *= 2;
3823
3824 vectype = STMT_VINFO_VECTYPE (stmt_info);
3825 mode = TYPE_MODE (vectype);
3826 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
3827
3828 if (!orig_stmt_info)
3829 orig_stmt_info = stmt_info;
3830
3831 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
3832
3833 if (reduction_type == EXTRACT_LAST_REDUCTION
3834 || reduction_type == FOLD_LEFT_REDUCTION)
3835 {
3836 /* No extra instructions needed in the prologue. */
3837 prologue_cost = 0;
3838
3839 if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
3840 /* Count one reduction-like operation per vector. */
3841 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
3842 stmt_info, 0, vect_body);
3843 else
3844 {
3845 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3846 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
3847 inside_cost = record_stmt_cost (cost_vec, nelements,
3848 vec_to_scalar, stmt_info, 0,
3849 vect_body);
3850 inside_cost += record_stmt_cost (cost_vec, nelements,
3851 scalar_stmt, stmt_info, 0,
3852 vect_body);
3853 }
3854 }
3855 else
3856 {
3857 /* Add in cost for initial definition.
3858 For cond reduction we have four vectors: initial index, step,
3859 initial result of the data reduction, initial value of the index
3860 reduction. */
3861 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
3862 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
3863 scalar_to_vec, stmt_info, 0,
3864 vect_prologue);
3865
3866 /* Cost of reduction op inside loop. */
3867 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3868 stmt_info, 0, vect_body);
3869 }
3870
3871 /* Determine cost of epilogue code.
3872
3873 We have a reduction operator that will reduce the vector in one statement.
3874 Also requires scalar extract. */
3875
3876 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
3877 {
3878 if (reduc_fn != IFN_LAST)
3879 {
3880 if (reduction_type == COND_REDUCTION)
3881 {
3882 /* An EQ stmt and an COND_EXPR stmt. */
3883 epilogue_cost += record_stmt_cost (cost_vec, 2,
3884 vector_stmt, stmt_info, 0,
3885 vect_epilogue);
3886 /* Reduction of the max index and a reduction of the found
3887 values. */
3888 epilogue_cost += record_stmt_cost (cost_vec, 2,
3889 vec_to_scalar, stmt_info, 0,
3890 vect_epilogue);
3891 /* A broadcast of the max value. */
3892 epilogue_cost += record_stmt_cost (cost_vec, 1,
3893 scalar_to_vec, stmt_info, 0,
3894 vect_epilogue);
3895 }
3896 else
3897 {
3898 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
3899 stmt_info, 0, vect_epilogue);
3900 epilogue_cost += record_stmt_cost (cost_vec, 1,
3901 vec_to_scalar, stmt_info, 0,
3902 vect_epilogue);
3903 }
3904 }
3905 else if (reduction_type == COND_REDUCTION)
3906 {
3907 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
3908 /* Extraction of scalar elements. */
3909 epilogue_cost += record_stmt_cost (cost_vec,
3910 2 * estimated_nunits,
3911 vec_to_scalar, stmt_info, 0,
3912 vect_epilogue);
3913 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3914 epilogue_cost += record_stmt_cost (cost_vec,
3915 2 * estimated_nunits - 3,
3916 scalar_stmt, stmt_info, 0,
3917 vect_epilogue);
3918 }
3919 else if (reduction_type == EXTRACT_LAST_REDUCTION
3920 || reduction_type == FOLD_LEFT_REDUCTION)
3921 /* No extra instructions need in the epilogue. */
3922 ;
3923 else
3924 {
3925 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3926 tree bitsize =
3927 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
3928 int element_bitsize = tree_to_uhwi (bitsize);
3929 int nelements = vec_size_in_bits / element_bitsize;
3930
3931 if (code == COND_EXPR)
3932 code = MAX_EXPR;
3933
3934 optab = optab_for_tree_code (code, vectype, optab_default);
3935
3936 /* We have a whole vector shift available. */
3937 if (optab != unknown_optab
3938 && VECTOR_MODE_P (mode)
3939 && optab_handler (optab, mode) != CODE_FOR_nothing
3940 && have_whole_vector_shift (mode))
3941 {
3942 /* Final reduction via vector shifts and the reduction operator.
3943 Also requires scalar extract. */
3944 epilogue_cost += record_stmt_cost (cost_vec,
3945 exact_log2 (nelements) * 2,
3946 vector_stmt, stmt_info, 0,
3947 vect_epilogue);
3948 epilogue_cost += record_stmt_cost (cost_vec, 1,
3949 vec_to_scalar, stmt_info, 0,
3950 vect_epilogue);
3951 }
3952 else
3953 /* Use extracts and reduction op for final reduction. For N
3954 elements, we have N extracts and N-1 reduction ops. */
3955 epilogue_cost += record_stmt_cost (cost_vec,
3956 nelements + nelements - 1,
3957 vector_stmt, stmt_info, 0,
3958 vect_epilogue);
3959 }
3960 }
3961
3962 if (dump_enabled_p ())
3963 dump_printf (MSG_NOTE,
3964 "vect_model_reduction_cost: inside_cost = %d, "
3965 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3966 prologue_cost, epilogue_cost);
3967 }
3968
3969
3970 /* Function vect_model_induction_cost.
3971
3972 Models cost for induction operations. */
3973
3974 static void
3975 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
3976 stmt_vector_for_cost *cost_vec)
3977 {
3978 unsigned inside_cost, prologue_cost;
3979
3980 if (PURE_SLP_STMT (stmt_info))
3981 return;
3982
3983 /* loop cost for vec_loop. */
3984 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3985 stmt_info, 0, vect_body);
3986
3987 /* prologue cost for vec_init and vec_step. */
3988 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
3989 stmt_info, 0, vect_prologue);
3990
3991 if (dump_enabled_p ())
3992 dump_printf_loc (MSG_NOTE, vect_location,
3993 "vect_model_induction_cost: inside_cost = %d, "
3994 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3995 }
3996
3997
3998
3999 /* Function get_initial_def_for_reduction
4000
4001 Input:
4002 STMT_VINFO - a stmt that performs a reduction operation in the loop.
4003 INIT_VAL - the initial value of the reduction variable
4004
4005 Output:
4006 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4007 of the reduction (used for adjusting the epilog - see below).
4008 Return a vector variable, initialized according to the operation that
4009 STMT_VINFO performs. This vector will be used as the initial value
4010 of the vector of partial results.
4011
4012 Option1 (adjust in epilog): Initialize the vector as follows:
4013 add/bit or/xor: [0,0,...,0,0]
4014 mult/bit and: [1,1,...,1,1]
4015 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4016 and when necessary (e.g. add/mult case) let the caller know
4017 that it needs to adjust the result by init_val.
4018
4019 Option2: Initialize the vector as follows:
4020 add/bit or/xor: [init_val,0,0,...,0]
4021 mult/bit and: [init_val,1,1,...,1]
4022 min/max/cond_expr: [init_val,init_val,...,init_val]
4023 and no adjustments are needed.
4024
4025 For example, for the following code:
4026
4027 s = init_val;
4028 for (i=0;i<n;i++)
4029 s = s + a[i];
4030
4031 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
4032 For a vector of 4 units, we want to return either [0,0,0,init_val],
4033 or [0,0,0,0] and let the caller know that it needs to adjust
4034 the result at the end by 'init_val'.
4035
4036 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4037 initialization vector is simpler (same element in all entries), if
4038 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4039
4040 A cost model should help decide between these two schemes. */
4041
4042 tree
4043 get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, tree init_val,
4044 tree *adjustment_def)
4045 {
4046 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
4047 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4048 tree scalar_type = TREE_TYPE (init_val);
4049 tree vectype = get_vectype_for_scalar_type (scalar_type);
4050 enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
4051 tree def_for_init;
4052 tree init_def;
4053 REAL_VALUE_TYPE real_init_val = dconst0;
4054 int int_init_val = 0;
4055 gimple_seq stmts = NULL;
4056
4057 gcc_assert (vectype);
4058
4059 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4060 || SCALAR_FLOAT_TYPE_P (scalar_type));
4061
4062 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
4063 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
4064
4065 vect_reduction_type reduction_type
4066 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
4067
4068 switch (code)
4069 {
4070 case WIDEN_SUM_EXPR:
4071 case DOT_PROD_EXPR:
4072 case SAD_EXPR:
4073 case PLUS_EXPR:
4074 case MINUS_EXPR:
4075 case BIT_IOR_EXPR:
4076 case BIT_XOR_EXPR:
4077 case MULT_EXPR:
4078 case BIT_AND_EXPR:
4079 {
4080 /* ADJUSTMENT_DEF is NULL when called from
4081 vect_create_epilog_for_reduction to vectorize double reduction. */
4082 if (adjustment_def)
4083 *adjustment_def = init_val;
4084
4085 if (code == MULT_EXPR)
4086 {
4087 real_init_val = dconst1;
4088 int_init_val = 1;
4089 }
4090
4091 if (code == BIT_AND_EXPR)
4092 int_init_val = -1;
4093
4094 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4095 def_for_init = build_real (scalar_type, real_init_val);
4096 else
4097 def_for_init = build_int_cst (scalar_type, int_init_val);
4098
4099 if (adjustment_def)
4100 /* Option1: the first element is '0' or '1' as well. */
4101 init_def = gimple_build_vector_from_val (&stmts, vectype,
4102 def_for_init);
4103 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4104 {
4105 /* Option2 (variable length): the first element is INIT_VAL. */
4106 init_def = gimple_build_vector_from_val (&stmts, vectype,
4107 def_for_init);
4108 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4109 vectype, init_def, init_val);
4110 }
4111 else
4112 {
4113 /* Option2: the first element is INIT_VAL. */
4114 tree_vector_builder elts (vectype, 1, 2);
4115 elts.quick_push (init_val);
4116 elts.quick_push (def_for_init);
4117 init_def = gimple_build_vector (&stmts, &elts);
4118 }
4119 }
4120 break;
4121
4122 case MIN_EXPR:
4123 case MAX_EXPR:
4124 case COND_EXPR:
4125 {
4126 if (adjustment_def)
4127 {
4128 *adjustment_def = NULL_TREE;
4129 if (reduction_type != COND_REDUCTION
4130 && reduction_type != EXTRACT_LAST_REDUCTION)
4131 {
4132 init_def = vect_get_vec_def_for_operand (init_val, stmt_vinfo);
4133 break;
4134 }
4135 }
4136 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4137 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4138 }
4139 break;
4140
4141 default:
4142 gcc_unreachable ();
4143 }
4144
4145 if (stmts)
4146 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4147 return init_def;
4148 }
4149
4150 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4151 NUMBER_OF_VECTORS is the number of vector defs to create.
4152 If NEUTRAL_OP is nonnull, introducing extra elements of that
4153 value will not change the result. */
4154
4155 static void
4156 get_initial_defs_for_reduction (slp_tree slp_node,
4157 vec<tree> *vec_oprnds,
4158 unsigned int number_of_vectors,
4159 bool reduc_chain, tree neutral_op)
4160 {
4161 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4162 stmt_vec_info stmt_vinfo = stmts[0];
4163 unsigned HOST_WIDE_INT nunits;
4164 unsigned j, number_of_places_left_in_vector;
4165 tree vector_type;
4166 tree vop;
4167 int group_size = stmts.length ();
4168 unsigned int vec_num, i;
4169 unsigned number_of_copies = 1;
4170 vec<tree> voprnds;
4171 voprnds.create (number_of_vectors);
4172 struct loop *loop;
4173 auto_vec<tree, 16> permute_results;
4174
4175 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4176
4177 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4178
4179 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4180 gcc_assert (loop);
4181 edge pe = loop_preheader_edge (loop);
4182
4183 gcc_assert (!reduc_chain || neutral_op);
4184
4185 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4186 created vectors. It is greater than 1 if unrolling is performed.
4187
4188 For example, we have two scalar operands, s1 and s2 (e.g., group of
4189 strided accesses of size two), while NUNITS is four (i.e., four scalars
4190 of this type can be packed in a vector). The output vector will contain
4191 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4192 will be 2).
4193
4194 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4195 vectors containing the operands.
4196
4197 For example, NUNITS is four as before, and the group size is 8
4198 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4199 {s5, s6, s7, s8}. */
4200
4201 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4202 nunits = group_size;
4203
4204 number_of_copies = nunits * number_of_vectors / group_size;
4205
4206 number_of_places_left_in_vector = nunits;
4207 bool constant_p = true;
4208 tree_vector_builder elts (vector_type, nunits, 1);
4209 elts.quick_grow (nunits);
4210 for (j = 0; j < number_of_copies; j++)
4211 {
4212 for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
4213 {
4214 tree op;
4215 /* Get the def before the loop. In reduction chain we have only
4216 one initial value. */
4217 if ((j != (number_of_copies - 1)
4218 || (reduc_chain && i != 0))
4219 && neutral_op)
4220 op = neutral_op;
4221 else
4222 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4223
4224 /* Create 'vect_ = {op0,op1,...,opn}'. */
4225 number_of_places_left_in_vector--;
4226 elts[number_of_places_left_in_vector] = op;
4227 if (!CONSTANT_CLASS_P (op))
4228 constant_p = false;
4229
4230 if (number_of_places_left_in_vector == 0)
4231 {
4232 gimple_seq ctor_seq = NULL;
4233 tree init;
4234 if (constant_p && !neutral_op
4235 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4236 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4237 /* Build the vector directly from ELTS. */
4238 init = gimple_build_vector (&ctor_seq, &elts);
4239 else if (neutral_op)
4240 {
4241 /* Build a vector of the neutral value and shift the
4242 other elements into place. */
4243 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4244 neutral_op);
4245 int k = nunits;
4246 while (k > 0 && elts[k - 1] == neutral_op)
4247 k -= 1;
4248 while (k > 0)
4249 {
4250 k -= 1;
4251 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4252 vector_type, init, elts[k]);
4253 }
4254 }
4255 else
4256 {
4257 /* First time round, duplicate ELTS to fill the
4258 required number of vectors, then cherry pick the
4259 appropriate result for each iteration. */
4260 if (vec_oprnds->is_empty ())
4261 duplicate_and_interleave (&ctor_seq, vector_type, elts,
4262 number_of_vectors,
4263 permute_results);
4264 init = permute_results[number_of_vectors - j - 1];
4265 }
4266 if (ctor_seq != NULL)
4267 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4268 voprnds.quick_push (init);
4269
4270 number_of_places_left_in_vector = nunits;
4271 elts.new_vector (vector_type, nunits, 1);
4272 elts.quick_grow (nunits);
4273 constant_p = true;
4274 }
4275 }
4276 }
4277
4278 /* Since the vectors are created in the reverse order, we should invert
4279 them. */
4280 vec_num = voprnds.length ();
4281 for (j = vec_num; j != 0; j--)
4282 {
4283 vop = voprnds[j - 1];
4284 vec_oprnds->quick_push (vop);
4285 }
4286
4287 voprnds.release ();
4288
4289 /* In case that VF is greater than the unrolling factor needed for the SLP
4290 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4291 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4292 to replicate the vectors. */
4293 tree neutral_vec = NULL;
4294 while (number_of_vectors > vec_oprnds->length ())
4295 {
4296 if (neutral_op)
4297 {
4298 if (!neutral_vec)
4299 {
4300 gimple_seq ctor_seq = NULL;
4301 neutral_vec = gimple_build_vector_from_val
4302 (&ctor_seq, vector_type, neutral_op);
4303 if (ctor_seq != NULL)
4304 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4305 }
4306 vec_oprnds->quick_push (neutral_vec);
4307 }
4308 else
4309 {
4310 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4311 vec_oprnds->quick_push (vop);
4312 }
4313 }
4314 }
4315
4316
4317 /* Function vect_create_epilog_for_reduction
4318
4319 Create code at the loop-epilog to finalize the result of a reduction
4320 computation.
4321
4322 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4323 reduction statements.
4324 STMT_INFO is the scalar reduction stmt that is being vectorized.
4325 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4326 number of elements that we can fit in a vectype (nunits). In this case
4327 we have to generate more than one vector stmt - i.e - we need to "unroll"
4328 the vector stmt by a factor VF/nunits. For more details see documentation
4329 in vectorizable_operation.
4330 REDUC_FN is the internal function for the epilog reduction.
4331 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4332 computation.
4333 REDUC_INDEX is the index of the operand in the right hand side of the
4334 statement that is defined by REDUCTION_PHI.
4335 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4336 SLP_NODE is an SLP node containing a group of reduction statements. The
4337 first one in this group is STMT_INFO.
4338 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4339 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4340 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4341 any value of the IV in the loop.
4342 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4343 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4344 null if this is not an SLP reduction
4345
4346 This function:
4347 1. Creates the reduction def-use cycles: sets the arguments for
4348 REDUCTION_PHIS:
4349 The loop-entry argument is the vectorized initial-value of the reduction.
4350 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4351 sums.
4352 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4353 by calling the function specified by REDUC_FN if available, or by
4354 other means (whole-vector shifts or a scalar loop).
4355 The function also creates a new phi node at the loop exit to preserve
4356 loop-closed form, as illustrated below.
4357
4358 The flow at the entry to this function:
4359
4360 loop:
4361 vec_def = phi <null, null> # REDUCTION_PHI
4362 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4363 s_loop = scalar_stmt # (scalar) STMT_INFO
4364 loop_exit:
4365 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4366 use <s_out0>
4367 use <s_out0>
4368
4369 The above is transformed by this function into:
4370
4371 loop:
4372 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4373 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4374 s_loop = scalar_stmt # (scalar) STMT_INFO
4375 loop_exit:
4376 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4377 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4378 v_out2 = reduce <v_out1>
4379 s_out3 = extract_field <v_out2, 0>
4380 s_out4 = adjust_result <s_out3>
4381 use <s_out4>
4382 use <s_out4>
4383 */
4384
4385 static void
4386 vect_create_epilog_for_reduction (vec<tree> vect_defs,
4387 stmt_vec_info stmt_info,
4388 gimple *reduc_def_stmt,
4389 int ncopies, internal_fn reduc_fn,
4390 vec<stmt_vec_info> reduction_phis,
4391 bool double_reduc,
4392 slp_tree slp_node,
4393 slp_instance slp_node_instance,
4394 tree induc_val, enum tree_code induc_code,
4395 tree neutral_op)
4396 {
4397 stmt_vec_info prev_phi_info;
4398 tree vectype;
4399 machine_mode mode;
4400 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4401 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4402 basic_block exit_bb;
4403 tree scalar_dest;
4404 tree scalar_type;
4405 gimple *new_phi = NULL, *phi;
4406 stmt_vec_info phi_info;
4407 gimple_stmt_iterator exit_gsi;
4408 tree vec_dest;
4409 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4410 gimple *epilog_stmt = NULL;
4411 enum tree_code code = gimple_assign_rhs_code (stmt_info->stmt);
4412 gimple *exit_phi;
4413 tree bitsize;
4414 tree adjustment_def = NULL;
4415 tree vec_initial_def = NULL;
4416 tree expr, def, initial_def = NULL;
4417 tree orig_name, scalar_result;
4418 imm_use_iterator imm_iter, phi_imm_iter;
4419 use_operand_p use_p, phi_use_p;
4420 gimple *use_stmt;
4421 stmt_vec_info reduction_phi_info = NULL;
4422 bool nested_in_vect_loop = false;
4423 auto_vec<gimple *> new_phis;
4424 auto_vec<stmt_vec_info> inner_phis;
4425 int j, i;
4426 auto_vec<tree> scalar_results;
4427 unsigned int group_size = 1, k, ratio;
4428 auto_vec<tree> vec_initial_defs;
4429 auto_vec<gimple *> phis;
4430 bool slp_reduc = false;
4431 bool direct_slp_reduc;
4432 tree new_phi_result;
4433 stmt_vec_info inner_phi = NULL;
4434 tree induction_index = NULL_TREE;
4435
4436 if (slp_node)
4437 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4438
4439 if (nested_in_vect_loop_p (loop, stmt_info))
4440 {
4441 outer_loop = loop;
4442 loop = loop->inner;
4443 nested_in_vect_loop = true;
4444 gcc_assert (!slp_node);
4445 }
4446
4447 vectype = STMT_VINFO_VECTYPE (stmt_info);
4448 gcc_assert (vectype);
4449 mode = TYPE_MODE (vectype);
4450
4451 /* 1. Create the reduction def-use cycle:
4452 Set the arguments of REDUCTION_PHIS, i.e., transform
4453
4454 loop:
4455 vec_def = phi <null, null> # REDUCTION_PHI
4456 VECT_DEF = vector_stmt # vectorized form of STMT
4457 ...
4458
4459 into:
4460
4461 loop:
4462 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4463 VECT_DEF = vector_stmt # vectorized form of STMT
4464 ...
4465
4466 (in case of SLP, do it for all the phis). */
4467
4468 /* Get the loop-entry arguments. */
4469 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4470 if (slp_node)
4471 {
4472 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4473 vec_initial_defs.reserve (vec_num);
4474 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4475 &vec_initial_defs, vec_num,
4476 REDUC_GROUP_FIRST_ELEMENT (stmt_info),
4477 neutral_op);
4478 }
4479 else
4480 {
4481 /* Get at the scalar def before the loop, that defines the initial value
4482 of the reduction variable. */
4483 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4484 loop_preheader_edge (loop));
4485 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4486 and we can't use zero for induc_val, use initial_def. Similarly
4487 for REDUC_MIN and initial_def larger than the base. */
4488 if (TREE_CODE (initial_def) == INTEGER_CST
4489 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4490 == INTEGER_INDUC_COND_REDUCTION)
4491 && !integer_zerop (induc_val)
4492 && ((induc_code == MAX_EXPR
4493 && tree_int_cst_lt (initial_def, induc_val))
4494 || (induc_code == MIN_EXPR
4495 && tree_int_cst_lt (induc_val, initial_def))))
4496 induc_val = initial_def;
4497
4498 if (double_reduc)
4499 /* In case of double reduction we only create a vector variable
4500 to be put in the reduction phi node. The actual statement
4501 creation is done later in this function. */
4502 vec_initial_def = vect_create_destination_var (initial_def, vectype);
4503 else if (nested_in_vect_loop)
4504 {
4505 /* Do not use an adjustment def as that case is not supported
4506 correctly if ncopies is not one. */
4507 vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
4508 vec_initial_def = vect_get_vec_def_for_operand (initial_def,
4509 stmt_info);
4510 }
4511 else
4512 vec_initial_def
4513 = get_initial_def_for_reduction (stmt_info, initial_def,
4514 &adjustment_def);
4515 vec_initial_defs.create (1);
4516 vec_initial_defs.quick_push (vec_initial_def);
4517 }
4518
4519 /* Set phi nodes arguments. */
4520 FOR_EACH_VEC_ELT (reduction_phis, i, phi_info)
4521 {
4522 tree vec_init_def = vec_initial_defs[i];
4523 tree def = vect_defs[i];
4524 for (j = 0; j < ncopies; j++)
4525 {
4526 if (j != 0)
4527 {
4528 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4529 if (nested_in_vect_loop)
4530 vec_init_def
4531 = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_init_def);
4532 }
4533
4534 /* Set the loop-entry arg of the reduction-phi. */
4535
4536 gphi *phi = as_a <gphi *> (phi_info->stmt);
4537 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4538 == INTEGER_INDUC_COND_REDUCTION)
4539 {
4540 /* Initialise the reduction phi to zero. This prevents initial
4541 values of non-zero interferring with the reduction op. */
4542 gcc_assert (ncopies == 1);
4543 gcc_assert (i == 0);
4544
4545 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4546 tree induc_val_vec
4547 = build_vector_from_val (vec_init_def_type, induc_val);
4548
4549 add_phi_arg (phi, induc_val_vec, loop_preheader_edge (loop),
4550 UNKNOWN_LOCATION);
4551 }
4552 else
4553 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
4554 UNKNOWN_LOCATION);
4555
4556 /* Set the loop-latch arg for the reduction-phi. */
4557 if (j > 0)
4558 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4559
4560 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
4561
4562 if (dump_enabled_p ())
4563 {
4564 dump_printf_loc (MSG_NOTE, vect_location,
4565 "transform reduction: created def-use cycle: ");
4566 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4567 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4568 }
4569 }
4570 }
4571
4572 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4573 which is updated with the current index of the loop for every match of
4574 the original loop's cond_expr (VEC_STMT). This results in a vector
4575 containing the last time the condition passed for that vector lane.
4576 The first match will be a 1 to allow 0 to be used for non-matching
4577 indexes. If there are no matches at all then the vector will be all
4578 zeroes. */
4579 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4580 {
4581 tree indx_before_incr, indx_after_incr;
4582 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4583
4584 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
4585 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4586
4587 int scalar_precision
4588 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4589 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4590 tree cr_index_vector_type = build_vector_type
4591 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4592
4593 /* First we create a simple vector induction variable which starts
4594 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4595 vector size (STEP). */
4596
4597 /* Create a {1,2,3,...} vector. */
4598 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4599
4600 /* Create a vector of the step value. */
4601 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4602 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4603
4604 /* Create an induction variable. */
4605 gimple_stmt_iterator incr_gsi;
4606 bool insert_after;
4607 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4608 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4609 insert_after, &indx_before_incr, &indx_after_incr);
4610
4611 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4612 filled with zeros (VEC_ZERO). */
4613
4614 /* Create a vector of 0s. */
4615 tree zero = build_zero_cst (cr_index_scalar_type);
4616 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4617
4618 /* Create a vector phi node. */
4619 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4620 new_phi = create_phi_node (new_phi_tree, loop->header);
4621 loop_vinfo->add_stmt (new_phi);
4622 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4623 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4624
4625 /* Now take the condition from the loops original cond_expr
4626 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4627 every match uses values from the induction variable
4628 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4629 (NEW_PHI_TREE).
4630 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4631 the new cond_expr (INDEX_COND_EXPR). */
4632
4633 /* Duplicate the condition from vec_stmt. */
4634 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4635
4636 /* Create a conditional, where the condition is taken from vec_stmt
4637 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4638 else is the phi (NEW_PHI_TREE). */
4639 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4640 ccompare, indx_before_incr,
4641 new_phi_tree);
4642 induction_index = make_ssa_name (cr_index_vector_type);
4643 gimple *index_condition = gimple_build_assign (induction_index,
4644 index_cond_expr);
4645 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4646 stmt_vec_info index_vec_info = loop_vinfo->add_stmt (index_condition);
4647 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4648
4649 /* Update the phi with the vec cond. */
4650 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4651 loop_latch_edge (loop), UNKNOWN_LOCATION);
4652 }
4653
4654 /* 2. Create epilog code.
4655 The reduction epilog code operates across the elements of the vector
4656 of partial results computed by the vectorized loop.
4657 The reduction epilog code consists of:
4658
4659 step 1: compute the scalar result in a vector (v_out2)
4660 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4661 step 3: adjust the scalar result (s_out3) if needed.
4662
4663 Step 1 can be accomplished using one the following three schemes:
4664 (scheme 1) using reduc_fn, if available.
4665 (scheme 2) using whole-vector shifts, if available.
4666 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4667 combined.
4668
4669 The overall epilog code looks like this:
4670
4671 s_out0 = phi <s_loop> # original EXIT_PHI
4672 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4673 v_out2 = reduce <v_out1> # step 1
4674 s_out3 = extract_field <v_out2, 0> # step 2
4675 s_out4 = adjust_result <s_out3> # step 3
4676
4677 (step 3 is optional, and steps 1 and 2 may be combined).
4678 Lastly, the uses of s_out0 are replaced by s_out4. */
4679
4680
4681 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4682 v_out1 = phi <VECT_DEF>
4683 Store them in NEW_PHIS. */
4684
4685 exit_bb = single_exit (loop)->dest;
4686 prev_phi_info = NULL;
4687 new_phis.create (vect_defs.length ());
4688 FOR_EACH_VEC_ELT (vect_defs, i, def)
4689 {
4690 for (j = 0; j < ncopies; j++)
4691 {
4692 tree new_def = copy_ssa_name (def);
4693 phi = create_phi_node (new_def, exit_bb);
4694 stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
4695 if (j == 0)
4696 new_phis.quick_push (phi);
4697 else
4698 {
4699 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4700 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
4701 }
4702
4703 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4704 prev_phi_info = phi_info;
4705 }
4706 }
4707
4708 /* The epilogue is created for the outer-loop, i.e., for the loop being
4709 vectorized. Create exit phis for the outer loop. */
4710 if (double_reduc)
4711 {
4712 loop = outer_loop;
4713 exit_bb = single_exit (loop)->dest;
4714 inner_phis.create (vect_defs.length ());
4715 FOR_EACH_VEC_ELT (new_phis, i, phi)
4716 {
4717 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
4718 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4719 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4720 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4721 PHI_RESULT (phi));
4722 prev_phi_info = loop_vinfo->add_stmt (outer_phi);
4723 inner_phis.quick_push (phi_info);
4724 new_phis[i] = outer_phi;
4725 while (STMT_VINFO_RELATED_STMT (phi_info))
4726 {
4727 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4728 new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
4729 outer_phi = create_phi_node (new_result, exit_bb);
4730 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4731 PHI_RESULT (phi_info->stmt));
4732 stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
4733 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
4734 prev_phi_info = outer_phi_info;
4735 }
4736 }
4737 }
4738
4739 exit_gsi = gsi_after_labels (exit_bb);
4740
4741 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4742 (i.e. when reduc_fn is not available) and in the final adjustment
4743 code (if needed). Also get the original scalar reduction variable as
4744 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4745 represents a reduction pattern), the tree-code and scalar-def are
4746 taken from the original stmt that the pattern-stmt (STMT) replaces.
4747 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4748 are taken from STMT. */
4749
4750 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
4751 if (!orig_stmt_info)
4752 {
4753 /* Regular reduction */
4754 orig_stmt_info = stmt_info;
4755 }
4756 else
4757 {
4758 /* Reduction pattern */
4759 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4760 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4761 }
4762
4763 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4764 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4765 partial results are added and not subtracted. */
4766 if (code == MINUS_EXPR)
4767 code = PLUS_EXPR;
4768
4769 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
4770 scalar_type = TREE_TYPE (scalar_dest);
4771 scalar_results.create (group_size);
4772 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4773 bitsize = TYPE_SIZE (scalar_type);
4774
4775 /* In case this is a reduction in an inner-loop while vectorizing an outer
4776 loop - we don't need to extract a single scalar result at the end of the
4777 inner-loop (unless it is double reduction, i.e., the use of reduction is
4778 outside the outer-loop). The final vector of partial results will be used
4779 in the vectorized outer-loop, or reduced to a scalar result at the end of
4780 the outer-loop. */
4781 if (nested_in_vect_loop && !double_reduc)
4782 goto vect_finalize_reduction;
4783
4784 /* SLP reduction without reduction chain, e.g.,
4785 # a1 = phi <a2, a0>
4786 # b1 = phi <b2, b0>
4787 a2 = operation (a1)
4788 b2 = operation (b1) */
4789 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
4790
4791 /* True if we should implement SLP_REDUC using native reduction operations
4792 instead of scalar operations. */
4793 direct_slp_reduc = (reduc_fn != IFN_LAST
4794 && slp_reduc
4795 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
4796
4797 /* In case of reduction chain, e.g.,
4798 # a1 = phi <a3, a0>
4799 a2 = operation (a1)
4800 a3 = operation (a2),
4801
4802 we may end up with more than one vector result. Here we reduce them to
4803 one vector. */
4804 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
4805 {
4806 tree first_vect = PHI_RESULT (new_phis[0]);
4807 gassign *new_vec_stmt = NULL;
4808 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4809 for (k = 1; k < new_phis.length (); k++)
4810 {
4811 gimple *next_phi = new_phis[k];
4812 tree second_vect = PHI_RESULT (next_phi);
4813 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4814 new_vec_stmt = gimple_build_assign (tem, code,
4815 first_vect, second_vect);
4816 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4817 first_vect = tem;
4818 }
4819
4820 new_phi_result = first_vect;
4821 if (new_vec_stmt)
4822 {
4823 new_phis.truncate (0);
4824 new_phis.safe_push (new_vec_stmt);
4825 }
4826 }
4827 /* Likewise if we couldn't use a single defuse cycle. */
4828 else if (ncopies > 1)
4829 {
4830 gcc_assert (new_phis.length () == 1);
4831 tree first_vect = PHI_RESULT (new_phis[0]);
4832 gassign *new_vec_stmt = NULL;
4833 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4834 stmt_vec_info next_phi_info = loop_vinfo->lookup_stmt (new_phis[0]);
4835 for (int k = 1; k < ncopies; ++k)
4836 {
4837 next_phi_info = STMT_VINFO_RELATED_STMT (next_phi_info);
4838 tree second_vect = PHI_RESULT (next_phi_info->stmt);
4839 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4840 new_vec_stmt = gimple_build_assign (tem, code,
4841 first_vect, second_vect);
4842 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4843 first_vect = tem;
4844 }
4845 new_phi_result = first_vect;
4846 new_phis.truncate (0);
4847 new_phis.safe_push (new_vec_stmt);
4848 }
4849 else
4850 new_phi_result = PHI_RESULT (new_phis[0]);
4851
4852 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4853 && reduc_fn != IFN_LAST)
4854 {
4855 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4856 various data values where the condition matched and another vector
4857 (INDUCTION_INDEX) containing all the indexes of those matches. We
4858 need to extract the last matching index (which will be the index with
4859 highest value) and use this to index into the data vector.
4860 For the case where there were no matches, the data vector will contain
4861 all default values and the index vector will be all zeros. */
4862
4863 /* Get various versions of the type of the vector of indexes. */
4864 tree index_vec_type = TREE_TYPE (induction_index);
4865 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4866 tree index_scalar_type = TREE_TYPE (index_vec_type);
4867 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4868 (index_vec_type);
4869
4870 /* Get an unsigned integer version of the type of the data vector. */
4871 int scalar_precision
4872 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
4873 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4874 tree vectype_unsigned = build_vector_type
4875 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4876
4877 /* First we need to create a vector (ZERO_VEC) of zeros and another
4878 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4879 can create using a MAX reduction and then expanding.
4880 In the case where the loop never made any matches, the max index will
4881 be zero. */
4882
4883 /* Vector of {0, 0, 0,...}. */
4884 tree zero_vec = make_ssa_name (vectype);
4885 tree zero_vec_rhs = build_zero_cst (vectype);
4886 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4887 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4888
4889 /* Find maximum value from the vector of found indexes. */
4890 tree max_index = make_ssa_name (index_scalar_type);
4891 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4892 1, induction_index);
4893 gimple_call_set_lhs (max_index_stmt, max_index);
4894 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4895
4896 /* Vector of {max_index, max_index, max_index,...}. */
4897 tree max_index_vec = make_ssa_name (index_vec_type);
4898 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4899 max_index);
4900 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4901 max_index_vec_rhs);
4902 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4903
4904 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4905 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4906 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4907 otherwise. Only one value should match, resulting in a vector
4908 (VEC_COND) with one data value and the rest zeros.
4909 In the case where the loop never made any matches, every index will
4910 match, resulting in a vector with all data values (which will all be
4911 the default value). */
4912
4913 /* Compare the max index vector to the vector of found indexes to find
4914 the position of the max value. */
4915 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4916 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4917 induction_index,
4918 max_index_vec);
4919 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4920
4921 /* Use the compare to choose either values from the data vector or
4922 zero. */
4923 tree vec_cond = make_ssa_name (vectype);
4924 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4925 vec_compare, new_phi_result,
4926 zero_vec);
4927 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4928
4929 /* Finally we need to extract the data value from the vector (VEC_COND)
4930 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4931 reduction, but because this doesn't exist, we can use a MAX reduction
4932 instead. The data value might be signed or a float so we need to cast
4933 it first.
4934 In the case where the loop never made any matches, the data values are
4935 all identical, and so will reduce down correctly. */
4936
4937 /* Make the matched data values unsigned. */
4938 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4939 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4940 vec_cond);
4941 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4942 VIEW_CONVERT_EXPR,
4943 vec_cond_cast_rhs);
4944 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4945
4946 /* Reduce down to a scalar value. */
4947 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4948 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4949 1, vec_cond_cast);
4950 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
4951 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4952
4953 /* Convert the reduced value back to the result type and set as the
4954 result. */
4955 gimple_seq stmts = NULL;
4956 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4957 data_reduc);
4958 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4959 scalar_results.safe_push (new_temp);
4960 }
4961 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4962 && reduc_fn == IFN_LAST)
4963 {
4964 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4965 idx = 0;
4966 idx_val = induction_index[0];
4967 val = data_reduc[0];
4968 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4969 if (induction_index[i] > idx_val)
4970 val = data_reduc[i], idx_val = induction_index[i];
4971 return val; */
4972
4973 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4974 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4975 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4976 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4977 /* Enforced by vectorizable_reduction, which ensures we have target
4978 support before allowing a conditional reduction on variable-length
4979 vectors. */
4980 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
4981 tree idx_val = NULL_TREE, val = NULL_TREE;
4982 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
4983 {
4984 tree old_idx_val = idx_val;
4985 tree old_val = val;
4986 idx_val = make_ssa_name (idx_eltype);
4987 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
4988 build3 (BIT_FIELD_REF, idx_eltype,
4989 induction_index,
4990 bitsize_int (el_size),
4991 bitsize_int (off)));
4992 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4993 val = make_ssa_name (data_eltype);
4994 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
4995 build3 (BIT_FIELD_REF,
4996 data_eltype,
4997 new_phi_result,
4998 bitsize_int (el_size),
4999 bitsize_int (off)));
5000 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5001 if (off != 0)
5002 {
5003 tree new_idx_val = idx_val;
5004 tree new_val = val;
5005 if (off != v_size - el_size)
5006 {
5007 new_idx_val = make_ssa_name (idx_eltype);
5008 epilog_stmt = gimple_build_assign (new_idx_val,
5009 MAX_EXPR, idx_val,
5010 old_idx_val);
5011 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5012 }
5013 new_val = make_ssa_name (data_eltype);
5014 epilog_stmt = gimple_build_assign (new_val,
5015 COND_EXPR,
5016 build2 (GT_EXPR,
5017 boolean_type_node,
5018 idx_val,
5019 old_idx_val),
5020 val, old_val);
5021 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5022 idx_val = new_idx_val;
5023 val = new_val;
5024 }
5025 }
5026 /* Convert the reduced value back to the result type and set as the
5027 result. */
5028 gimple_seq stmts = NULL;
5029 val = gimple_convert (&stmts, scalar_type, val);
5030 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5031 scalar_results.safe_push (val);
5032 }
5033
5034 /* 2.3 Create the reduction code, using one of the three schemes described
5035 above. In SLP we simply need to extract all the elements from the
5036 vector (without reducing them), so we use scalar shifts. */
5037 else if (reduc_fn != IFN_LAST && !slp_reduc)
5038 {
5039 tree tmp;
5040 tree vec_elem_type;
5041
5042 /* Case 1: Create:
5043 v_out2 = reduc_expr <v_out1> */
5044
5045 if (dump_enabled_p ())
5046 dump_printf_loc (MSG_NOTE, vect_location,
5047 "Reduce using direct vector reduction.\n");
5048
5049 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5050 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
5051 {
5052 tree tmp_dest
5053 = vect_create_destination_var (scalar_dest, vec_elem_type);
5054 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5055 new_phi_result);
5056 gimple_set_lhs (epilog_stmt, tmp_dest);
5057 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
5058 gimple_set_lhs (epilog_stmt, new_temp);
5059 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5060
5061 epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
5062 new_temp);
5063 }
5064 else
5065 {
5066 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5067 new_phi_result);
5068 gimple_set_lhs (epilog_stmt, new_scalar_dest);
5069 }
5070
5071 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5072 gimple_set_lhs (epilog_stmt, new_temp);
5073 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5074
5075 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5076 == INTEGER_INDUC_COND_REDUCTION)
5077 && !operand_equal_p (initial_def, induc_val, 0))
5078 {
5079 /* Earlier we set the initial value to be a vector if induc_val
5080 values. Check the result and if it is induc_val then replace
5081 with the original initial value, unless induc_val is
5082 the same as initial_def already. */
5083 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5084 induc_val);
5085
5086 tmp = make_ssa_name (new_scalar_dest);
5087 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5088 initial_def, new_temp);
5089 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5090 new_temp = tmp;
5091 }
5092
5093 scalar_results.safe_push (new_temp);
5094 }
5095 else if (direct_slp_reduc)
5096 {
5097 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5098 with the elements for other SLP statements replaced with the
5099 neutral value. We can then do a normal reduction on each vector. */
5100
5101 /* Enforced by vectorizable_reduction. */
5102 gcc_assert (new_phis.length () == 1);
5103 gcc_assert (pow2p_hwi (group_size));
5104
5105 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5106 vec<stmt_vec_info> orig_phis
5107 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5108 gimple_seq seq = NULL;
5109
5110 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5111 and the same element size as VECTYPE. */
5112 tree index = build_index_vector (vectype, 0, 1);
5113 tree index_type = TREE_TYPE (index);
5114 tree index_elt_type = TREE_TYPE (index_type);
5115 tree mask_type = build_same_sized_truth_vector_type (index_type);
5116
5117 /* Create a vector that, for each element, identifies which of
5118 the REDUC_GROUP_SIZE results should use it. */
5119 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5120 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5121 build_vector_from_val (index_type, index_mask));
5122
5123 /* Get a neutral vector value. This is simply a splat of the neutral
5124 scalar value if we have one, otherwise the initial scalar value
5125 is itself a neutral value. */
5126 tree vector_identity = NULL_TREE;
5127 if (neutral_op)
5128 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5129 neutral_op);
5130 for (unsigned int i = 0; i < group_size; ++i)
5131 {
5132 /* If there's no univeral neutral value, we can use the
5133 initial scalar value from the original PHI. This is used
5134 for MIN and MAX reduction, for example. */
5135 if (!neutral_op)
5136 {
5137 tree scalar_value
5138 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5139 loop_preheader_edge (loop));
5140 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5141 scalar_value);
5142 }
5143
5144 /* Calculate the equivalent of:
5145
5146 sel[j] = (index[j] == i);
5147
5148 which selects the elements of NEW_PHI_RESULT that should
5149 be included in the result. */
5150 tree compare_val = build_int_cst (index_elt_type, i);
5151 compare_val = build_vector_from_val (index_type, compare_val);
5152 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5153 index, compare_val);
5154
5155 /* Calculate the equivalent of:
5156
5157 vec = seq ? new_phi_result : vector_identity;
5158
5159 VEC is now suitable for a full vector reduction. */
5160 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5161 sel, new_phi_result, vector_identity);
5162
5163 /* Do the reduction and convert it to the appropriate type. */
5164 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5165 TREE_TYPE (vectype), vec);
5166 scalar = gimple_convert (&seq, scalar_type, scalar);
5167 scalar_results.safe_push (scalar);
5168 }
5169 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5170 }
5171 else
5172 {
5173 bool reduce_with_shift;
5174 tree vec_temp;
5175
5176 /* COND reductions all do the final reduction with MAX_EXPR
5177 or MIN_EXPR. */
5178 if (code == COND_EXPR)
5179 {
5180 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5181 == INTEGER_INDUC_COND_REDUCTION)
5182 code = induc_code;
5183 else
5184 code = MAX_EXPR;
5185 }
5186
5187 /* See if the target wants to do the final (shift) reduction
5188 in a vector mode of smaller size and first reduce upper/lower
5189 halves against each other. */
5190 enum machine_mode mode1 = mode;
5191 tree vectype1 = vectype;
5192 unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
5193 unsigned sz1 = sz;
5194 if (!slp_reduc
5195 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5196 sz1 = GET_MODE_SIZE (mode1).to_constant ();
5197
5198 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
5199 reduce_with_shift = have_whole_vector_shift (mode1);
5200 if (!VECTOR_MODE_P (mode1))
5201 reduce_with_shift = false;
5202 else
5203 {
5204 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5205 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5206 reduce_with_shift = false;
5207 }
5208
5209 /* First reduce the vector to the desired vector size we should
5210 do shift reduction on by combining upper and lower halves. */
5211 new_temp = new_phi_result;
5212 while (sz > sz1)
5213 {
5214 gcc_assert (!slp_reduc);
5215 sz /= 2;
5216 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
5217
5218 /* The target has to make sure we support lowpart/highpart
5219 extraction, either via direct vector extract or through
5220 an integer mode punning. */
5221 tree dst1, dst2;
5222 if (convert_optab_handler (vec_extract_optab,
5223 TYPE_MODE (TREE_TYPE (new_temp)),
5224 TYPE_MODE (vectype1))
5225 != CODE_FOR_nothing)
5226 {
5227 /* Extract sub-vectors directly once vec_extract becomes
5228 a conversion optab. */
5229 dst1 = make_ssa_name (vectype1);
5230 epilog_stmt
5231 = gimple_build_assign (dst1, BIT_FIELD_REF,
5232 build3 (BIT_FIELD_REF, vectype1,
5233 new_temp, TYPE_SIZE (vectype1),
5234 bitsize_int (0)));
5235 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5236 dst2 = make_ssa_name (vectype1);
5237 epilog_stmt
5238 = gimple_build_assign (dst2, BIT_FIELD_REF,
5239 build3 (BIT_FIELD_REF, vectype1,
5240 new_temp, TYPE_SIZE (vectype1),
5241 bitsize_int (sz * BITS_PER_UNIT)));
5242 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5243 }
5244 else
5245 {
5246 /* Extract via punning to appropriately sized integer mode
5247 vector. */
5248 tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
5249 1);
5250 tree etype = build_vector_type (eltype, 2);
5251 gcc_assert (convert_optab_handler (vec_extract_optab,
5252 TYPE_MODE (etype),
5253 TYPE_MODE (eltype))
5254 != CODE_FOR_nothing);
5255 tree tem = make_ssa_name (etype);
5256 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5257 build1 (VIEW_CONVERT_EXPR,
5258 etype, new_temp));
5259 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5260 new_temp = tem;
5261 tem = make_ssa_name (eltype);
5262 epilog_stmt
5263 = gimple_build_assign (tem, BIT_FIELD_REF,
5264 build3 (BIT_FIELD_REF, eltype,
5265 new_temp, TYPE_SIZE (eltype),
5266 bitsize_int (0)));
5267 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5268 dst1 = make_ssa_name (vectype1);
5269 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5270 build1 (VIEW_CONVERT_EXPR,
5271 vectype1, tem));
5272 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5273 tem = make_ssa_name (eltype);
5274 epilog_stmt
5275 = gimple_build_assign (tem, BIT_FIELD_REF,
5276 build3 (BIT_FIELD_REF, eltype,
5277 new_temp, TYPE_SIZE (eltype),
5278 bitsize_int (sz * BITS_PER_UNIT)));
5279 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5280 dst2 = make_ssa_name (vectype1);
5281 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5282 build1 (VIEW_CONVERT_EXPR,
5283 vectype1, tem));
5284 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5285 }
5286
5287 new_temp = make_ssa_name (vectype1);
5288 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5289 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5290 }
5291
5292 if (reduce_with_shift && !slp_reduc)
5293 {
5294 int element_bitsize = tree_to_uhwi (bitsize);
5295 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5296 for variable-length vectors and also requires direct target support
5297 for loop reductions. */
5298 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5299 int nelements = vec_size_in_bits / element_bitsize;
5300 vec_perm_builder sel;
5301 vec_perm_indices indices;
5302
5303 int elt_offset;
5304
5305 tree zero_vec = build_zero_cst (vectype1);
5306 /* Case 2: Create:
5307 for (offset = nelements/2; offset >= 1; offset/=2)
5308 {
5309 Create: va' = vec_shift <va, offset>
5310 Create: va = vop <va, va'>
5311 } */
5312
5313 tree rhs;
5314
5315 if (dump_enabled_p ())
5316 dump_printf_loc (MSG_NOTE, vect_location,
5317 "Reduce using vector shifts\n");
5318
5319 mode1 = TYPE_MODE (vectype1);
5320 vec_dest = vect_create_destination_var (scalar_dest, vectype1);
5321 for (elt_offset = nelements / 2;
5322 elt_offset >= 1;
5323 elt_offset /= 2)
5324 {
5325 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5326 indices.new_vector (sel, 2, nelements);
5327 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5328 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5329 new_temp, zero_vec, mask);
5330 new_name = make_ssa_name (vec_dest, epilog_stmt);
5331 gimple_assign_set_lhs (epilog_stmt, new_name);
5332 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5333
5334 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5335 new_temp);
5336 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5337 gimple_assign_set_lhs (epilog_stmt, new_temp);
5338 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5339 }
5340
5341 /* 2.4 Extract the final scalar result. Create:
5342 s_out3 = extract_field <v_out2, bitpos> */
5343
5344 if (dump_enabled_p ())
5345 dump_printf_loc (MSG_NOTE, vect_location,
5346 "extract scalar result\n");
5347
5348 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5349 bitsize, bitsize_zero_node);
5350 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5351 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5352 gimple_assign_set_lhs (epilog_stmt, new_temp);
5353 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5354 scalar_results.safe_push (new_temp);
5355 }
5356 else
5357 {
5358 /* Case 3: Create:
5359 s = extract_field <v_out2, 0>
5360 for (offset = element_size;
5361 offset < vector_size;
5362 offset += element_size;)
5363 {
5364 Create: s' = extract_field <v_out2, offset>
5365 Create: s = op <s, s'> // For non SLP cases
5366 } */
5367
5368 if (dump_enabled_p ())
5369 dump_printf_loc (MSG_NOTE, vect_location,
5370 "Reduce using scalar code.\n");
5371
5372 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5373 int element_bitsize = tree_to_uhwi (bitsize);
5374 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5375 {
5376 int bit_offset;
5377 if (gimple_code (new_phi) == GIMPLE_PHI)
5378 vec_temp = PHI_RESULT (new_phi);
5379 else
5380 vec_temp = gimple_assign_lhs (new_phi);
5381 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5382 bitsize_zero_node);
5383 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5384 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5385 gimple_assign_set_lhs (epilog_stmt, new_temp);
5386 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5387
5388 /* In SLP we don't need to apply reduction operation, so we just
5389 collect s' values in SCALAR_RESULTS. */
5390 if (slp_reduc)
5391 scalar_results.safe_push (new_temp);
5392
5393 for (bit_offset = element_bitsize;
5394 bit_offset < vec_size_in_bits;
5395 bit_offset += element_bitsize)
5396 {
5397 tree bitpos = bitsize_int (bit_offset);
5398 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5399 bitsize, bitpos);
5400
5401 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5402 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5403 gimple_assign_set_lhs (epilog_stmt, new_name);
5404 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5405
5406 if (slp_reduc)
5407 {
5408 /* In SLP we don't need to apply reduction operation, so
5409 we just collect s' values in SCALAR_RESULTS. */
5410 new_temp = new_name;
5411 scalar_results.safe_push (new_name);
5412 }
5413 else
5414 {
5415 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5416 new_name, new_temp);
5417 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5418 gimple_assign_set_lhs (epilog_stmt, new_temp);
5419 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5420 }
5421 }
5422 }
5423
5424 /* The only case where we need to reduce scalar results in SLP, is
5425 unrolling. If the size of SCALAR_RESULTS is greater than
5426 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5427 REDUC_GROUP_SIZE. */
5428 if (slp_reduc)
5429 {
5430 tree res, first_res, new_res;
5431 gimple *new_stmt;
5432
5433 /* Reduce multiple scalar results in case of SLP unrolling. */
5434 for (j = group_size; scalar_results.iterate (j, &res);
5435 j++)
5436 {
5437 first_res = scalar_results[j % group_size];
5438 new_stmt = gimple_build_assign (new_scalar_dest, code,
5439 first_res, res);
5440 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5441 gimple_assign_set_lhs (new_stmt, new_res);
5442 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5443 scalar_results[j % group_size] = new_res;
5444 }
5445 }
5446 else
5447 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5448 scalar_results.safe_push (new_temp);
5449 }
5450
5451 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5452 == INTEGER_INDUC_COND_REDUCTION)
5453 && !operand_equal_p (initial_def, induc_val, 0))
5454 {
5455 /* Earlier we set the initial value to be a vector if induc_val
5456 values. Check the result and if it is induc_val then replace
5457 with the original initial value, unless induc_val is
5458 the same as initial_def already. */
5459 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5460 induc_val);
5461
5462 tree tmp = make_ssa_name (new_scalar_dest);
5463 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5464 initial_def, new_temp);
5465 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5466 scalar_results[0] = tmp;
5467 }
5468 }
5469
5470 vect_finalize_reduction:
5471
5472 if (double_reduc)
5473 loop = loop->inner;
5474
5475 /* 2.5 Adjust the final result by the initial value of the reduction
5476 variable. (When such adjustment is not needed, then
5477 'adjustment_def' is zero). For example, if code is PLUS we create:
5478 new_temp = loop_exit_def + adjustment_def */
5479
5480 if (adjustment_def)
5481 {
5482 gcc_assert (!slp_reduc);
5483 if (nested_in_vect_loop)
5484 {
5485 new_phi = new_phis[0];
5486 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5487 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5488 new_dest = vect_create_destination_var (scalar_dest, vectype);
5489 }
5490 else
5491 {
5492 new_temp = scalar_results[0];
5493 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5494 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5495 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5496 }
5497
5498 epilog_stmt = gimple_build_assign (new_dest, expr);
5499 new_temp = make_ssa_name (new_dest, epilog_stmt);
5500 gimple_assign_set_lhs (epilog_stmt, new_temp);
5501 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5502 if (nested_in_vect_loop)
5503 {
5504 stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
5505 STMT_VINFO_RELATED_STMT (epilog_stmt_info)
5506 = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
5507
5508 if (!double_reduc)
5509 scalar_results.quick_push (new_temp);
5510 else
5511 scalar_results[0] = new_temp;
5512 }
5513 else
5514 scalar_results[0] = new_temp;
5515
5516 new_phis[0] = epilog_stmt;
5517 }
5518
5519 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5520 phis with new adjusted scalar results, i.e., replace use <s_out0>
5521 with use <s_out4>.
5522
5523 Transform:
5524 loop_exit:
5525 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5526 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5527 v_out2 = reduce <v_out1>
5528 s_out3 = extract_field <v_out2, 0>
5529 s_out4 = adjust_result <s_out3>
5530 use <s_out0>
5531 use <s_out0>
5532
5533 into:
5534
5535 loop_exit:
5536 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5537 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5538 v_out2 = reduce <v_out1>
5539 s_out3 = extract_field <v_out2, 0>
5540 s_out4 = adjust_result <s_out3>
5541 use <s_out4>
5542 use <s_out4> */
5543
5544
5545 /* In SLP reduction chain we reduce vector results into one vector if
5546 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5547 LHS of the last stmt in the reduction chain, since we are looking for
5548 the loop exit phi node. */
5549 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5550 {
5551 stmt_vec_info dest_stmt_info
5552 = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5553 /* Handle reduction patterns. */
5554 if (STMT_VINFO_RELATED_STMT (dest_stmt_info))
5555 dest_stmt_info = STMT_VINFO_RELATED_STMT (dest_stmt_info);
5556
5557 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5558 group_size = 1;
5559 }
5560
5561 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5562 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5563 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5564 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5565 correspond to the first vector stmt, etc.
5566 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5567 if (group_size > new_phis.length ())
5568 {
5569 ratio = group_size / new_phis.length ();
5570 gcc_assert (!(group_size % new_phis.length ()));
5571 }
5572 else
5573 ratio = 1;
5574
5575 stmt_vec_info epilog_stmt_info = NULL;
5576 for (k = 0; k < group_size; k++)
5577 {
5578 if (k % ratio == 0)
5579 {
5580 epilog_stmt_info = loop_vinfo->lookup_stmt (new_phis[k / ratio]);
5581 reduction_phi_info = reduction_phis[k / ratio];
5582 if (double_reduc)
5583 inner_phi = inner_phis[k / ratio];
5584 }
5585
5586 if (slp_reduc)
5587 {
5588 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5589
5590 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5591 /* SLP statements can't participate in patterns. */
5592 gcc_assert (!orig_stmt_info);
5593 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5594 }
5595
5596 phis.create (3);
5597 /* Find the loop-closed-use at the loop exit of the original scalar
5598 result. (The reduction result is expected to have two immediate uses -
5599 one at the latch block, and one at the loop exit). */
5600 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5601 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5602 && !is_gimple_debug (USE_STMT (use_p)))
5603 phis.safe_push (USE_STMT (use_p));
5604
5605 /* While we expect to have found an exit_phi because of loop-closed-ssa
5606 form we can end up without one if the scalar cycle is dead. */
5607
5608 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5609 {
5610 if (outer_loop)
5611 {
5612 stmt_vec_info exit_phi_vinfo
5613 = loop_vinfo->lookup_stmt (exit_phi);
5614 gphi *vect_phi;
5615
5616 /* FORNOW. Currently not supporting the case that an inner-loop
5617 reduction is not used in the outer-loop (but only outside the
5618 outer-loop), unless it is double reduction. */
5619 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5620 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5621 || double_reduc);
5622
5623 if (double_reduc)
5624 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5625 else
5626 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt_info;
5627 if (!double_reduc
5628 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5629 != vect_double_reduction_def)
5630 continue;
5631
5632 /* Handle double reduction:
5633
5634 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5635 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5636 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5637 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5638
5639 At that point the regular reduction (stmt2 and stmt3) is
5640 already vectorized, as well as the exit phi node, stmt4.
5641 Here we vectorize the phi node of double reduction, stmt1, and
5642 update all relevant statements. */
5643
5644 /* Go through all the uses of s2 to find double reduction phi
5645 node, i.e., stmt1 above. */
5646 orig_name = PHI_RESULT (exit_phi);
5647 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5648 {
5649 stmt_vec_info use_stmt_vinfo;
5650 tree vect_phi_init, preheader_arg, vect_phi_res;
5651 basic_block bb = gimple_bb (use_stmt);
5652
5653 /* Check that USE_STMT is really double reduction phi
5654 node. */
5655 if (gimple_code (use_stmt) != GIMPLE_PHI
5656 || gimple_phi_num_args (use_stmt) != 2
5657 || bb->loop_father != outer_loop)
5658 continue;
5659 use_stmt_vinfo = loop_vinfo->lookup_stmt (use_stmt);
5660 if (!use_stmt_vinfo
5661 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5662 != vect_double_reduction_def)
5663 continue;
5664
5665 /* Create vector phi node for double reduction:
5666 vs1 = phi <vs0, vs2>
5667 vs1 was created previously in this function by a call to
5668 vect_get_vec_def_for_operand and is stored in
5669 vec_initial_def;
5670 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5671 vs0 is created here. */
5672
5673 /* Create vector phi node. */
5674 vect_phi = create_phi_node (vec_initial_def, bb);
5675 loop_vec_info_for_loop (outer_loop)->add_stmt (vect_phi);
5676
5677 /* Create vs0 - initial def of the double reduction phi. */
5678 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5679 loop_preheader_edge (outer_loop));
5680 vect_phi_init = get_initial_def_for_reduction
5681 (stmt_info, preheader_arg, NULL);
5682
5683 /* Update phi node arguments with vs0 and vs2. */
5684 add_phi_arg (vect_phi, vect_phi_init,
5685 loop_preheader_edge (outer_loop),
5686 UNKNOWN_LOCATION);
5687 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
5688 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5689 if (dump_enabled_p ())
5690 {
5691 dump_printf_loc (MSG_NOTE, vect_location,
5692 "created double reduction phi node: ");
5693 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5694 }
5695
5696 vect_phi_res = PHI_RESULT (vect_phi);
5697
5698 /* Replace the use, i.e., set the correct vs1 in the regular
5699 reduction phi node. FORNOW, NCOPIES is always 1, so the
5700 loop is redundant. */
5701 stmt_vec_info use_info = reduction_phi_info;
5702 for (j = 0; j < ncopies; j++)
5703 {
5704 edge pr_edge = loop_preheader_edge (loop);
5705 SET_PHI_ARG_DEF (as_a <gphi *> (use_info->stmt),
5706 pr_edge->dest_idx, vect_phi_res);
5707 use_info = STMT_VINFO_RELATED_STMT (use_info);
5708 }
5709 }
5710 }
5711 }
5712
5713 phis.release ();
5714 if (nested_in_vect_loop)
5715 {
5716 if (double_reduc)
5717 loop = outer_loop;
5718 else
5719 continue;
5720 }
5721
5722 phis.create (3);
5723 /* Find the loop-closed-use at the loop exit of the original scalar
5724 result. (The reduction result is expected to have two immediate uses,
5725 one at the latch block, and one at the loop exit). For double
5726 reductions we are looking for exit phis of the outer loop. */
5727 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5728 {
5729 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5730 {
5731 if (!is_gimple_debug (USE_STMT (use_p)))
5732 phis.safe_push (USE_STMT (use_p));
5733 }
5734 else
5735 {
5736 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5737 {
5738 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5739
5740 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5741 {
5742 if (!flow_bb_inside_loop_p (loop,
5743 gimple_bb (USE_STMT (phi_use_p)))
5744 && !is_gimple_debug (USE_STMT (phi_use_p)))
5745 phis.safe_push (USE_STMT (phi_use_p));
5746 }
5747 }
5748 }
5749 }
5750
5751 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5752 {
5753 /* Replace the uses: */
5754 orig_name = PHI_RESULT (exit_phi);
5755 scalar_result = scalar_results[k];
5756 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5757 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5758 SET_USE (use_p, scalar_result);
5759 }
5760
5761 phis.release ();
5762 }
5763 }
5764
5765 /* Return a vector of type VECTYPE that is equal to the vector select
5766 operation "MASK ? VEC : IDENTITY". Insert the select statements
5767 before GSI. */
5768
5769 static tree
5770 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5771 tree vec, tree identity)
5772 {
5773 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5774 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5775 mask, vec, identity);
5776 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5777 return cond;
5778 }
5779
5780 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5781 order, starting with LHS. Insert the extraction statements before GSI and
5782 associate the new scalar SSA names with variable SCALAR_DEST.
5783 Return the SSA name for the result. */
5784
5785 static tree
5786 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5787 tree_code code, tree lhs, tree vector_rhs)
5788 {
5789 tree vectype = TREE_TYPE (vector_rhs);
5790 tree scalar_type = TREE_TYPE (vectype);
5791 tree bitsize = TYPE_SIZE (scalar_type);
5792 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5793 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5794
5795 for (unsigned HOST_WIDE_INT bit_offset = 0;
5796 bit_offset < vec_size_in_bits;
5797 bit_offset += element_bitsize)
5798 {
5799 tree bitpos = bitsize_int (bit_offset);
5800 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5801 bitsize, bitpos);
5802
5803 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5804 rhs = make_ssa_name (scalar_dest, stmt);
5805 gimple_assign_set_lhs (stmt, rhs);
5806 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5807
5808 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5809 tree new_name = make_ssa_name (scalar_dest, stmt);
5810 gimple_assign_set_lhs (stmt, new_name);
5811 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5812 lhs = new_name;
5813 }
5814 return lhs;
5815 }
5816
5817 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
5818 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5819 statement. CODE is the operation performed by STMT_INFO and OPS are
5820 its scalar operands. REDUC_INDEX is the index of the operand in
5821 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5822 implements in-order reduction, or IFN_LAST if we should open-code it.
5823 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5824 that should be used to control the operation in a fully-masked loop. */
5825
5826 static bool
5827 vectorize_fold_left_reduction (stmt_vec_info stmt_info,
5828 gimple_stmt_iterator *gsi,
5829 stmt_vec_info *vec_stmt, slp_tree slp_node,
5830 gimple *reduc_def_stmt,
5831 tree_code code, internal_fn reduc_fn,
5832 tree ops[3], tree vectype_in,
5833 int reduc_index, vec_loop_masks *masks)
5834 {
5835 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5836 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5837 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5838 stmt_vec_info new_stmt_info = NULL;
5839
5840 int ncopies;
5841 if (slp_node)
5842 ncopies = 1;
5843 else
5844 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5845
5846 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
5847 gcc_assert (ncopies == 1);
5848 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5849 gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
5850 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5851 == FOLD_LEFT_REDUCTION);
5852
5853 if (slp_node)
5854 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5855 TYPE_VECTOR_SUBPARTS (vectype_in)));
5856
5857 tree op0 = ops[1 - reduc_index];
5858
5859 int group_size = 1;
5860 stmt_vec_info scalar_dest_def_info;
5861 auto_vec<tree> vec_oprnds0;
5862 if (slp_node)
5863 {
5864 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5865 slp_node);
5866 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5867 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5868 }
5869 else
5870 {
5871 tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
5872 vec_oprnds0.create (1);
5873 vec_oprnds0.quick_push (loop_vec_def0);
5874 scalar_dest_def_info = stmt_info;
5875 }
5876
5877 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5878 tree scalar_type = TREE_TYPE (scalar_dest);
5879 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5880
5881 int vec_num = vec_oprnds0.length ();
5882 gcc_assert (vec_num == 1 || slp_node);
5883 tree vec_elem_type = TREE_TYPE (vectype_out);
5884 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5885
5886 tree vector_identity = NULL_TREE;
5887 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5888 vector_identity = build_zero_cst (vectype_out);
5889
5890 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5891 int i;
5892 tree def0;
5893 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5894 {
5895 gimple *new_stmt;
5896 tree mask = NULL_TREE;
5897 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5898 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5899
5900 /* Handle MINUS by adding the negative. */
5901 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5902 {
5903 tree negated = make_ssa_name (vectype_out);
5904 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5905 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5906 def0 = negated;
5907 }
5908
5909 if (mask)
5910 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5911 vector_identity);
5912
5913 /* On the first iteration the input is simply the scalar phi
5914 result, and for subsequent iterations it is the output of
5915 the preceding operation. */
5916 if (reduc_fn != IFN_LAST)
5917 {
5918 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
5919 /* For chained SLP reductions the output of the previous reduction
5920 operation serves as the input of the next. For the final statement
5921 the output cannot be a temporary - we reuse the original
5922 scalar destination of the last statement. */
5923 if (i != vec_num - 1)
5924 {
5925 gimple_set_lhs (new_stmt, scalar_dest_var);
5926 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
5927 gimple_set_lhs (new_stmt, reduc_var);
5928 }
5929 }
5930 else
5931 {
5932 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
5933 reduc_var, def0);
5934 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
5935 /* Remove the statement, so that we can use the same code paths
5936 as for statements that we've just created. */
5937 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
5938 gsi_remove (&tmp_gsi, false);
5939 }
5940
5941 if (i == vec_num - 1)
5942 {
5943 gimple_set_lhs (new_stmt, scalar_dest);
5944 new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
5945 new_stmt);
5946 }
5947 else
5948 new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
5949 new_stmt, gsi);
5950
5951 if (slp_node)
5952 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5953 }
5954
5955 if (!slp_node)
5956 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5957
5958 return true;
5959 }
5960
5961 /* Function is_nonwrapping_integer_induction.
5962
5963 Check if STMT_VINO (which is part of loop LOOP) both increments and
5964 does not cause overflow. */
5965
5966 static bool
5967 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
5968 {
5969 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
5970 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5971 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5972 tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
5973 widest_int ni, max_loop_value, lhs_max;
5974 wi::overflow_type overflow = wi::OVF_NONE;
5975
5976 /* Make sure the loop is integer based. */
5977 if (TREE_CODE (base) != INTEGER_CST
5978 || TREE_CODE (step) != INTEGER_CST)
5979 return false;
5980
5981 /* Check that the max size of the loop will not wrap. */
5982
5983 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
5984 return true;
5985
5986 if (! max_stmt_executions (loop, &ni))
5987 return false;
5988
5989 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
5990 &overflow);
5991 if (overflow)
5992 return false;
5993
5994 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
5995 TYPE_SIGN (lhs_type), &overflow);
5996 if (overflow)
5997 return false;
5998
5999 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
6000 <= TYPE_PRECISION (lhs_type));
6001 }
6002
6003 /* Function vectorizable_reduction.
6004
6005 Check if STMT_INFO performs a reduction operation that can be vectorized.
6006 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6007 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6008 Return true if STMT_INFO is vectorizable in this way.
6009
6010 This function also handles reduction idioms (patterns) that have been
6011 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
6012 may be of this form:
6013 X = pattern_expr (arg0, arg1, ..., X)
6014 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
6015 sequence that had been detected and replaced by the pattern-stmt
6016 (STMT_INFO).
6017
6018 This function also handles reduction of condition expressions, for example:
6019 for (int i = 0; i < N; i++)
6020 if (a[i] < value)
6021 last = a[i];
6022 This is handled by vectorising the loop and creating an additional vector
6023 containing the loop indexes for which "a[i] < value" was true. In the
6024 function epilogue this is reduced to a single max value and then used to
6025 index into the vector of results.
6026
6027 In some cases of reduction patterns, the type of the reduction variable X is
6028 different than the type of the other arguments of STMT_INFO.
6029 In such cases, the vectype that is used when transforming STMT_INFO into
6030 a vector stmt is different than the vectype that is used to determine the
6031 vectorization factor, because it consists of a different number of elements
6032 than the actual number of elements that are being operated upon in parallel.
6033
6034 For example, consider an accumulation of shorts into an int accumulator.
6035 On some targets it's possible to vectorize this pattern operating on 8
6036 shorts at a time (hence, the vectype for purposes of determining the
6037 vectorization factor should be V8HI); on the other hand, the vectype that
6038 is used to create the vector form is actually V4SI (the type of the result).
6039
6040 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6041 indicates what is the actual level of parallelism (V8HI in the example), so
6042 that the right vectorization factor would be derived. This vectype
6043 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6044 be used to create the vectorized stmt. The right vectype for the vectorized
6045 stmt is obtained from the type of the result X:
6046 get_vectype_for_scalar_type (TREE_TYPE (X))
6047
6048 This means that, contrary to "regular" reductions (or "regular" stmts in
6049 general), the following equation:
6050 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6051 does *NOT* necessarily hold for reduction patterns. */
6052
6053 bool
6054 vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6055 stmt_vec_info *vec_stmt, slp_tree slp_node,
6056 slp_instance slp_node_instance,
6057 stmt_vector_for_cost *cost_vec)
6058 {
6059 tree vec_dest;
6060 tree scalar_dest;
6061 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6062 tree vectype_in = NULL_TREE;
6063 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6064 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6065 enum tree_code code, orig_code;
6066 internal_fn reduc_fn;
6067 machine_mode vec_mode;
6068 int op_type;
6069 optab optab;
6070 tree new_temp = NULL_TREE;
6071 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
6072 stmt_vec_info cond_stmt_vinfo = NULL;
6073 enum tree_code cond_reduc_op_code = ERROR_MARK;
6074 tree scalar_type;
6075 bool is_simple_use;
6076 int i;
6077 int ncopies;
6078 int epilog_copies;
6079 stmt_vec_info prev_stmt_info, prev_phi_info;
6080 bool single_defuse_cycle = false;
6081 stmt_vec_info new_stmt_info = NULL;
6082 int j;
6083 tree ops[3];
6084 enum vect_def_type dts[3];
6085 bool nested_cycle = false, found_nested_cycle_def = false;
6086 bool double_reduc = false;
6087 basic_block def_bb;
6088 struct loop * def_stmt_loop;
6089 tree def_arg;
6090 auto_vec<tree> vec_oprnds0;
6091 auto_vec<tree> vec_oprnds1;
6092 auto_vec<tree> vec_oprnds2;
6093 auto_vec<tree> vect_defs;
6094 auto_vec<stmt_vec_info> phis;
6095 int vec_num;
6096 tree def0, tem;
6097 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6098 tree cond_reduc_val = NULL_TREE;
6099
6100 /* Make sure it was already recognized as a reduction computation. */
6101 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6102 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6103 return false;
6104
6105 if (nested_in_vect_loop_p (loop, stmt_info))
6106 {
6107 loop = loop->inner;
6108 nested_cycle = true;
6109 }
6110
6111 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6112 gcc_assert (slp_node
6113 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6114
6115 if (gphi *phi = dyn_cast <gphi *> (stmt_info->stmt))
6116 {
6117 tree phi_result = gimple_phi_result (phi);
6118 /* Analysis is fully done on the reduction stmt invocation. */
6119 if (! vec_stmt)
6120 {
6121 if (slp_node)
6122 slp_node_instance->reduc_phis = slp_node;
6123
6124 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6125 return true;
6126 }
6127
6128 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6129 /* Leave the scalar phi in place. Note that checking
6130 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6131 for reductions involving a single statement. */
6132 return true;
6133
6134 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
6135 if (STMT_VINFO_IN_PATTERN_P (reduc_stmt_info))
6136 reduc_stmt_info = STMT_VINFO_RELATED_STMT (reduc_stmt_info);
6137
6138 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info)
6139 == EXTRACT_LAST_REDUCTION)
6140 /* Leave the scalar phi in place. */
6141 return true;
6142
6143 gassign *reduc_stmt = as_a <gassign *> (reduc_stmt_info->stmt);
6144 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
6145 {
6146 tree op = gimple_op (reduc_stmt, k);
6147 if (op == phi_result)
6148 continue;
6149 if (k == 1
6150 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
6151 continue;
6152 if (!vectype_in
6153 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6154 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
6155 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
6156 break;
6157 }
6158 gcc_assert (vectype_in);
6159
6160 if (slp_node)
6161 ncopies = 1;
6162 else
6163 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6164
6165 stmt_vec_info use_stmt_info;
6166 if (ncopies > 1
6167 && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
6168 && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
6169 && (use_stmt_info == reduc_stmt_info
6170 || STMT_VINFO_RELATED_STMT (use_stmt_info) == reduc_stmt_info))
6171 single_defuse_cycle = true;
6172
6173 /* Create the destination vector */
6174 scalar_dest = gimple_assign_lhs (reduc_stmt);
6175 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6176
6177 if (slp_node)
6178 /* The size vect_schedule_slp_instance computes is off for us. */
6179 vec_num = vect_get_num_vectors
6180 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6181 * SLP_TREE_SCALAR_STMTS (slp_node).length (),
6182 vectype_in);
6183 else
6184 vec_num = 1;
6185
6186 /* Generate the reduction PHIs upfront. */
6187 prev_phi_info = NULL;
6188 for (j = 0; j < ncopies; j++)
6189 {
6190 if (j == 0 || !single_defuse_cycle)
6191 {
6192 for (i = 0; i < vec_num; i++)
6193 {
6194 /* Create the reduction-phi that defines the reduction
6195 operand. */
6196 gimple *new_phi = create_phi_node (vec_dest, loop->header);
6197 stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
6198
6199 if (slp_node)
6200 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
6201 else
6202 {
6203 if (j == 0)
6204 STMT_VINFO_VEC_STMT (stmt_info)
6205 = *vec_stmt = new_phi_info;
6206 else
6207 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
6208 prev_phi_info = new_phi_info;
6209 }
6210 }
6211 }
6212 }
6213
6214 return true;
6215 }
6216
6217 /* 1. Is vectorizable reduction? */
6218 /* Not supportable if the reduction variable is used in the loop, unless
6219 it's a reduction chain. */
6220 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6221 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6222 return false;
6223
6224 /* Reductions that are not used even in an enclosing outer-loop,
6225 are expected to be "live" (used out of the loop). */
6226 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6227 && !STMT_VINFO_LIVE_P (stmt_info))
6228 return false;
6229
6230 /* 2. Has this been recognized as a reduction pattern?
6231
6232 Check if STMT represents a pattern that has been recognized
6233 in earlier analysis stages. For stmts that represent a pattern,
6234 the STMT_VINFO_RELATED_STMT field records the last stmt in
6235 the original sequence that constitutes the pattern. */
6236
6237 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6238 if (orig_stmt_info)
6239 {
6240 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6241 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6242 }
6243
6244 /* 3. Check the operands of the operation. The first operands are defined
6245 inside the loop body. The last operand is the reduction variable,
6246 which is defined by the loop-header-phi. */
6247
6248 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
6249
6250 /* Flatten RHS. */
6251 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
6252 {
6253 case GIMPLE_BINARY_RHS:
6254 code = gimple_assign_rhs_code (stmt);
6255 op_type = TREE_CODE_LENGTH (code);
6256 gcc_assert (op_type == binary_op);
6257 ops[0] = gimple_assign_rhs1 (stmt);
6258 ops[1] = gimple_assign_rhs2 (stmt);
6259 break;
6260
6261 case GIMPLE_TERNARY_RHS:
6262 code = gimple_assign_rhs_code (stmt);
6263 op_type = TREE_CODE_LENGTH (code);
6264 gcc_assert (op_type == ternary_op);
6265 ops[0] = gimple_assign_rhs1 (stmt);
6266 ops[1] = gimple_assign_rhs2 (stmt);
6267 ops[2] = gimple_assign_rhs3 (stmt);
6268 break;
6269
6270 case GIMPLE_UNARY_RHS:
6271 return false;
6272
6273 default:
6274 gcc_unreachable ();
6275 }
6276
6277 if (code == COND_EXPR && slp_node)
6278 return false;
6279
6280 scalar_dest = gimple_assign_lhs (stmt);
6281 scalar_type = TREE_TYPE (scalar_dest);
6282 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6283 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6284 return false;
6285
6286 /* Do not try to vectorize bit-precision reductions. */
6287 if (!type_has_mode_precision_p (scalar_type))
6288 return false;
6289
6290 /* All uses but the last are expected to be defined in the loop.
6291 The last use is the reduction variable. In case of nested cycle this
6292 assumption is not true: we use reduc_index to record the index of the
6293 reduction variable. */
6294 stmt_vec_info reduc_def_info = NULL;
6295 int reduc_index = -1;
6296 for (i = 0; i < op_type; i++)
6297 {
6298 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6299 if (i == 0 && code == COND_EXPR)
6300 continue;
6301
6302 stmt_vec_info def_stmt_info;
6303 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem,
6304 &def_stmt_info);
6305 dt = dts[i];
6306 gcc_assert (is_simple_use);
6307 if (dt == vect_reduction_def)
6308 {
6309 reduc_def_info = def_stmt_info;
6310 reduc_index = i;
6311 continue;
6312 }
6313 else if (tem)
6314 {
6315 /* To properly compute ncopies we are interested in the widest
6316 input type in case we're looking at a widening accumulation. */
6317 if (!vectype_in
6318 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6319 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
6320 vectype_in = tem;
6321 }
6322
6323 if (dt != vect_internal_def
6324 && dt != vect_external_def
6325 && dt != vect_constant_def
6326 && dt != vect_induction_def
6327 && !(dt == vect_nested_cycle && nested_cycle))
6328 return false;
6329
6330 if (dt == vect_nested_cycle)
6331 {
6332 found_nested_cycle_def = true;
6333 reduc_def_info = def_stmt_info;
6334 reduc_index = i;
6335 }
6336
6337 if (i == 1 && code == COND_EXPR)
6338 {
6339 /* Record how value of COND_EXPR is defined. */
6340 if (dt == vect_constant_def)
6341 {
6342 cond_reduc_dt = dt;
6343 cond_reduc_val = ops[i];
6344 }
6345 if (dt == vect_induction_def
6346 && def_stmt_info
6347 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6348 {
6349 cond_reduc_dt = dt;
6350 cond_stmt_vinfo = def_stmt_info;
6351 }
6352 }
6353 }
6354
6355 if (!vectype_in)
6356 vectype_in = vectype_out;
6357
6358 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6359 directy used in stmt. */
6360 if (reduc_index == -1)
6361 {
6362 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6363 {
6364 if (dump_enabled_p ())
6365 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6366 "in-order reduction chain without SLP.\n");
6367 return false;
6368 }
6369
6370 if (orig_stmt_info)
6371 reduc_def_info = STMT_VINFO_REDUC_DEF (orig_stmt_info);
6372 else
6373 reduc_def_info = STMT_VINFO_REDUC_DEF (stmt_info);
6374 }
6375
6376 if (! reduc_def_info)
6377 return false;
6378
6379 gphi *reduc_def_phi = dyn_cast <gphi *> (reduc_def_info->stmt);
6380 if (!reduc_def_phi)
6381 return false;
6382
6383 if (!(reduc_index == -1
6384 || dts[reduc_index] == vect_reduction_def
6385 || dts[reduc_index] == vect_nested_cycle
6386 || ((dts[reduc_index] == vect_internal_def
6387 || dts[reduc_index] == vect_external_def
6388 || dts[reduc_index] == vect_constant_def
6389 || dts[reduc_index] == vect_induction_def)
6390 && nested_cycle && found_nested_cycle_def)))
6391 {
6392 /* For pattern recognized stmts, orig_stmt might be a reduction,
6393 but some helper statements for the pattern might not, or
6394 might be COND_EXPRs with reduction uses in the condition. */
6395 gcc_assert (orig_stmt_info);
6396 return false;
6397 }
6398
6399 /* PHIs should not participate in patterns. */
6400 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info));
6401 enum vect_reduction_type v_reduc_type
6402 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
6403 stmt_vec_info tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
6404
6405 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
6406 /* If we have a condition reduction, see if we can simplify it further. */
6407 if (v_reduc_type == COND_REDUCTION)
6408 {
6409 /* TODO: We can't yet handle reduction chains, since we need to treat
6410 each COND_EXPR in the chain specially, not just the last one.
6411 E.g. for:
6412
6413 x_1 = PHI <x_3, ...>
6414 x_2 = a_2 ? ... : x_1;
6415 x_3 = a_3 ? ... : x_2;
6416
6417 we're interested in the last element in x_3 for which a_2 || a_3
6418 is true, whereas the current reduction chain handling would
6419 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6420 as a reduction operation. */
6421 if (reduc_index == -1)
6422 {
6423 if (dump_enabled_p ())
6424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6425 "conditional reduction chains not supported\n");
6426 return false;
6427 }
6428
6429 /* vect_is_simple_reduction ensured that operand 2 is the
6430 loop-carried operand. */
6431 gcc_assert (reduc_index == 2);
6432
6433 /* Loop peeling modifies initial value of reduction PHI, which
6434 makes the reduction stmt to be transformed different to the
6435 original stmt analyzed. We need to record reduction code for
6436 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6437 it can be used directly at transform stage. */
6438 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
6439 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
6440 {
6441 /* Also set the reduction type to CONST_COND_REDUCTION. */
6442 gcc_assert (cond_reduc_dt == vect_constant_def);
6443 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
6444 }
6445 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6446 vectype_in, OPTIMIZE_FOR_SPEED))
6447 {
6448 if (dump_enabled_p ())
6449 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6450 "optimizing condition reduction with"
6451 " FOLD_EXTRACT_LAST.\n");
6452 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
6453 }
6454 else if (cond_reduc_dt == vect_induction_def)
6455 {
6456 tree base
6457 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6458 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6459
6460 gcc_assert (TREE_CODE (base) == INTEGER_CST
6461 && TREE_CODE (step) == INTEGER_CST);
6462 cond_reduc_val = NULL_TREE;
6463 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6464 above base; punt if base is the minimum value of the type for
6465 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6466 if (tree_int_cst_sgn (step) == -1)
6467 {
6468 cond_reduc_op_code = MIN_EXPR;
6469 if (tree_int_cst_sgn (base) == -1)
6470 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6471 else if (tree_int_cst_lt (base,
6472 TYPE_MAX_VALUE (TREE_TYPE (base))))
6473 cond_reduc_val
6474 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6475 }
6476 else
6477 {
6478 cond_reduc_op_code = MAX_EXPR;
6479 if (tree_int_cst_sgn (base) == 1)
6480 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6481 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6482 base))
6483 cond_reduc_val
6484 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6485 }
6486 if (cond_reduc_val)
6487 {
6488 if (dump_enabled_p ())
6489 dump_printf_loc (MSG_NOTE, vect_location,
6490 "condition expression based on "
6491 "integer induction.\n");
6492 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6493 = INTEGER_INDUC_COND_REDUCTION;
6494 }
6495 }
6496 else if (cond_reduc_dt == vect_constant_def)
6497 {
6498 enum vect_def_type cond_initial_dt;
6499 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
6500 tree cond_initial_val
6501 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
6502
6503 gcc_assert (cond_reduc_val != NULL_TREE);
6504 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6505 if (cond_initial_dt == vect_constant_def
6506 && types_compatible_p (TREE_TYPE (cond_initial_val),
6507 TREE_TYPE (cond_reduc_val)))
6508 {
6509 tree e = fold_binary (LE_EXPR, boolean_type_node,
6510 cond_initial_val, cond_reduc_val);
6511 if (e && (integer_onep (e) || integer_zerop (e)))
6512 {
6513 if (dump_enabled_p ())
6514 dump_printf_loc (MSG_NOTE, vect_location,
6515 "condition expression based on "
6516 "compile time constant.\n");
6517 /* Record reduction code at analysis stage. */
6518 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
6519 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6520 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6521 = CONST_COND_REDUCTION;
6522 }
6523 }
6524 }
6525 }
6526
6527 if (orig_stmt_info)
6528 gcc_assert (tmp == orig_stmt_info
6529 || REDUC_GROUP_FIRST_ELEMENT (tmp) == orig_stmt_info);
6530 else
6531 /* We changed STMT to be the first stmt in reduction chain, hence we
6532 check that in this case the first element in the chain is STMT. */
6533 gcc_assert (tmp == stmt_info
6534 || REDUC_GROUP_FIRST_ELEMENT (tmp) == stmt_info);
6535
6536 if (STMT_VINFO_LIVE_P (reduc_def_info))
6537 return false;
6538
6539 if (slp_node)
6540 ncopies = 1;
6541 else
6542 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6543
6544 gcc_assert (ncopies >= 1);
6545
6546 vec_mode = TYPE_MODE (vectype_in);
6547 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6548
6549 if (code == COND_EXPR)
6550 {
6551 /* Only call during the analysis stage, otherwise we'll lose
6552 STMT_VINFO_TYPE. */
6553 if (!vec_stmt && !vectorizable_condition (stmt_info, gsi, NULL,
6554 ops[reduc_index], 0, NULL,
6555 cost_vec))
6556 {
6557 if (dump_enabled_p ())
6558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6559 "unsupported condition in reduction\n");
6560 return false;
6561 }
6562 }
6563 else
6564 {
6565 /* 4. Supportable by target? */
6566
6567 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6568 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6569 {
6570 /* Shifts and rotates are only supported by vectorizable_shifts,
6571 not vectorizable_reduction. */
6572 if (dump_enabled_p ())
6573 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6574 "unsupported shift or rotation.\n");
6575 return false;
6576 }
6577
6578 /* 4.1. check support for the operation in the loop */
6579 optab = optab_for_tree_code (code, vectype_in, optab_default);
6580 if (!optab)
6581 {
6582 if (dump_enabled_p ())
6583 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6584 "no optab.\n");
6585
6586 return false;
6587 }
6588
6589 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6590 {
6591 if (dump_enabled_p ())
6592 dump_printf (MSG_NOTE, "op not supported by target.\n");
6593
6594 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6595 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6596 return false;
6597
6598 if (dump_enabled_p ())
6599 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6600 }
6601
6602 /* Worthwhile without SIMD support? */
6603 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6604 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6605 {
6606 if (dump_enabled_p ())
6607 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6608 "not worthwhile without SIMD support.\n");
6609
6610 return false;
6611 }
6612 }
6613
6614 /* 4.2. Check support for the epilog operation.
6615
6616 If STMT represents a reduction pattern, then the type of the
6617 reduction variable may be different than the type of the rest
6618 of the arguments. For example, consider the case of accumulation
6619 of shorts into an int accumulator; The original code:
6620 S1: int_a = (int) short_a;
6621 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6622
6623 was replaced with:
6624 STMT: int_acc = widen_sum <short_a, int_acc>
6625
6626 This means that:
6627 1. The tree-code that is used to create the vector operation in the
6628 epilog code (that reduces the partial results) is not the
6629 tree-code of STMT, but is rather the tree-code of the original
6630 stmt from the pattern that STMT is replacing. I.e, in the example
6631 above we want to use 'widen_sum' in the loop, but 'plus' in the
6632 epilog.
6633 2. The type (mode) we use to check available target support
6634 for the vector operation to be created in the *epilog*, is
6635 determined by the type of the reduction variable (in the example
6636 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6637 However the type (mode) we use to check available target support
6638 for the vector operation to be created *inside the loop*, is
6639 determined by the type of the other arguments to STMT (in the
6640 example we'd check this: optab_handler (widen_sum_optab,
6641 vect_short_mode)).
6642
6643 This is contrary to "regular" reductions, in which the types of all
6644 the arguments are the same as the type of the reduction variable.
6645 For "regular" reductions we can therefore use the same vector type
6646 (and also the same tree-code) when generating the epilog code and
6647 when generating the code inside the loop. */
6648
6649 vect_reduction_type reduction_type
6650 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
6651 if (orig_stmt_info
6652 && (reduction_type == TREE_CODE_REDUCTION
6653 || reduction_type == FOLD_LEFT_REDUCTION))
6654 {
6655 /* This is a reduction pattern: get the vectype from the type of the
6656 reduction variable, and get the tree-code from orig_stmt. */
6657 orig_code = gimple_assign_rhs_code (orig_stmt_info->stmt);
6658 gcc_assert (vectype_out);
6659 vec_mode = TYPE_MODE (vectype_out);
6660 }
6661 else
6662 {
6663 /* Regular reduction: use the same vectype and tree-code as used for
6664 the vector code inside the loop can be used for the epilog code. */
6665 orig_code = code;
6666
6667 if (code == MINUS_EXPR)
6668 orig_code = PLUS_EXPR;
6669
6670 /* For simple condition reductions, replace with the actual expression
6671 we want to base our reduction around. */
6672 if (reduction_type == CONST_COND_REDUCTION)
6673 {
6674 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6675 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6676 }
6677 else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
6678 orig_code = cond_reduc_op_code;
6679 }
6680
6681 if (nested_cycle)
6682 {
6683 def_bb = gimple_bb (reduc_def_phi);
6684 def_stmt_loop = def_bb->loop_father;
6685 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6686 loop_preheader_edge (def_stmt_loop));
6687 stmt_vec_info def_arg_stmt_info = loop_vinfo->lookup_def (def_arg);
6688 if (def_arg_stmt_info
6689 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info)
6690 == vect_double_reduction_def))
6691 double_reduc = true;
6692 }
6693
6694 reduc_fn = IFN_LAST;
6695
6696 if (reduction_type == TREE_CODE_REDUCTION
6697 || reduction_type == FOLD_LEFT_REDUCTION
6698 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6699 || reduction_type == CONST_COND_REDUCTION)
6700 {
6701 if (reduction_type == FOLD_LEFT_REDUCTION
6702 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6703 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6704 {
6705 if (reduc_fn != IFN_LAST
6706 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6707 OPTIMIZE_FOR_SPEED))
6708 {
6709 if (dump_enabled_p ())
6710 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6711 "reduc op not supported by target.\n");
6712
6713 reduc_fn = IFN_LAST;
6714 }
6715 }
6716 else
6717 {
6718 if (!nested_cycle || double_reduc)
6719 {
6720 if (dump_enabled_p ())
6721 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6722 "no reduc code for scalar code.\n");
6723
6724 return false;
6725 }
6726 }
6727 }
6728 else if (reduction_type == COND_REDUCTION)
6729 {
6730 int scalar_precision
6731 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6732 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6733 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6734 nunits_out);
6735
6736 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6737 OPTIMIZE_FOR_SPEED))
6738 reduc_fn = IFN_REDUC_MAX;
6739 }
6740
6741 if (reduction_type != EXTRACT_LAST_REDUCTION
6742 && reduc_fn == IFN_LAST
6743 && !nunits_out.is_constant ())
6744 {
6745 if (dump_enabled_p ())
6746 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6747 "missing target support for reduction on"
6748 " variable-length vectors.\n");
6749 return false;
6750 }
6751
6752 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6753 && ncopies > 1)
6754 {
6755 if (dump_enabled_p ())
6756 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6757 "multiple types in double reduction or condition "
6758 "reduction.\n");
6759 return false;
6760 }
6761
6762 /* For SLP reductions, see if there is a neutral value we can use. */
6763 tree neutral_op = NULL_TREE;
6764 if (slp_node)
6765 neutral_op = neutral_op_for_slp_reduction
6766 (slp_node_instance->reduc_phis, code,
6767 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL_STMT_VEC_INFO);
6768
6769 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6770 {
6771 /* We can't support in-order reductions of code such as this:
6772
6773 for (int i = 0; i < n1; ++i)
6774 for (int j = 0; j < n2; ++j)
6775 l += a[j];
6776
6777 since GCC effectively transforms the loop when vectorizing:
6778
6779 for (int i = 0; i < n1 / VF; ++i)
6780 for (int j = 0; j < n2; ++j)
6781 for (int k = 0; k < VF; ++k)
6782 l += a[j];
6783
6784 which is a reassociation of the original operation. */
6785 if (dump_enabled_p ())
6786 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6787 "in-order double reduction not supported.\n");
6788
6789 return false;
6790 }
6791
6792 if (reduction_type == FOLD_LEFT_REDUCTION
6793 && slp_node
6794 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6795 {
6796 /* We cannot use in-order reductions in this case because there is
6797 an implicit reassociation of the operations involved. */
6798 if (dump_enabled_p ())
6799 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6800 "in-order unchained SLP reductions not supported.\n");
6801 return false;
6802 }
6803
6804 /* For double reductions, and for SLP reductions with a neutral value,
6805 we construct a variable-length initial vector by loading a vector
6806 full of the neutral value and then shift-and-inserting the start
6807 values into the low-numbered elements. */
6808 if ((double_reduc || neutral_op)
6809 && !nunits_out.is_constant ()
6810 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6811 vectype_out, OPTIMIZE_FOR_SPEED))
6812 {
6813 if (dump_enabled_p ())
6814 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6815 "reduction on variable-length vectors requires"
6816 " target support for a vector-shift-and-insert"
6817 " operation.\n");
6818 return false;
6819 }
6820
6821 /* Check extra constraints for variable-length unchained SLP reductions. */
6822 if (STMT_SLP_TYPE (stmt_info)
6823 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6824 && !nunits_out.is_constant ())
6825 {
6826 /* We checked above that we could build the initial vector when
6827 there's a neutral element value. Check here for the case in
6828 which each SLP statement has its own initial value and in which
6829 that value needs to be repeated for every instance of the
6830 statement within the initial vector. */
6831 unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6832 scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
6833 if (!neutral_op
6834 && !can_duplicate_and_interleave_p (group_size, elt_mode))
6835 {
6836 if (dump_enabled_p ())
6837 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6838 "unsupported form of SLP reduction for"
6839 " variable-length vectors: cannot build"
6840 " initial vector.\n");
6841 return false;
6842 }
6843 /* The epilogue code relies on the number of elements being a multiple
6844 of the group size. The duplicate-and-interleave approach to setting
6845 up the the initial vector does too. */
6846 if (!multiple_p (nunits_out, group_size))
6847 {
6848 if (dump_enabled_p ())
6849 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6850 "unsupported form of SLP reduction for"
6851 " variable-length vectors: the vector size"
6852 " is not a multiple of the number of results.\n");
6853 return false;
6854 }
6855 }
6856
6857 /* In case of widenning multiplication by a constant, we update the type
6858 of the constant to be the type of the other operand. We check that the
6859 constant fits the type in the pattern recognition pass. */
6860 if (code == DOT_PROD_EXPR
6861 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6862 {
6863 if (TREE_CODE (ops[0]) == INTEGER_CST)
6864 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6865 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6866 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6867 else
6868 {
6869 if (dump_enabled_p ())
6870 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6871 "invalid types in dot-prod\n");
6872
6873 return false;
6874 }
6875 }
6876
6877 if (reduction_type == COND_REDUCTION)
6878 {
6879 widest_int ni;
6880
6881 if (! max_loop_iterations (loop, &ni))
6882 {
6883 if (dump_enabled_p ())
6884 dump_printf_loc (MSG_NOTE, vect_location,
6885 "loop count not known, cannot create cond "
6886 "reduction.\n");
6887 return false;
6888 }
6889 /* Convert backedges to iterations. */
6890 ni += 1;
6891
6892 /* The additional index will be the same type as the condition. Check
6893 that the loop can fit into this less one (because we'll use up the
6894 zero slot for when there are no matches). */
6895 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6896 if (wi::geu_p (ni, wi::to_widest (max_index)))
6897 {
6898 if (dump_enabled_p ())
6899 dump_printf_loc (MSG_NOTE, vect_location,
6900 "loop size is greater than data size.\n");
6901 return false;
6902 }
6903 }
6904
6905 /* In case the vectorization factor (VF) is bigger than the number
6906 of elements that we can fit in a vectype (nunits), we have to generate
6907 more than one vector stmt - i.e - we need to "unroll" the
6908 vector stmt by a factor VF/nunits. For more details see documentation
6909 in vectorizable_operation. */
6910
6911 /* If the reduction is used in an outer loop we need to generate
6912 VF intermediate results, like so (e.g. for ncopies=2):
6913 r0 = phi (init, r0)
6914 r1 = phi (init, r1)
6915 r0 = x0 + r0;
6916 r1 = x1 + r1;
6917 (i.e. we generate VF results in 2 registers).
6918 In this case we have a separate def-use cycle for each copy, and therefore
6919 for each copy we get the vector def for the reduction variable from the
6920 respective phi node created for this copy.
6921
6922 Otherwise (the reduction is unused in the loop nest), we can combine
6923 together intermediate results, like so (e.g. for ncopies=2):
6924 r = phi (init, r)
6925 r = x0 + r;
6926 r = x1 + r;
6927 (i.e. we generate VF/2 results in a single register).
6928 In this case for each copy we get the vector def for the reduction variable
6929 from the vectorized reduction operation generated in the previous iteration.
6930
6931 This only works when we see both the reduction PHI and its only consumer
6932 in vectorizable_reduction and there are no intermediate stmts
6933 participating. */
6934 stmt_vec_info use_stmt_info;
6935 tree reduc_phi_result = gimple_phi_result (reduc_def_phi);
6936 if (ncopies > 1
6937 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6938 && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
6939 && (use_stmt_info == stmt_info
6940 || STMT_VINFO_RELATED_STMT (use_stmt_info) == stmt_info))
6941 {
6942 single_defuse_cycle = true;
6943 epilog_copies = 1;
6944 }
6945 else
6946 epilog_copies = ncopies;
6947
6948 /* If the reduction stmt is one of the patterns that have lane
6949 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6950 if ((ncopies > 1
6951 && ! single_defuse_cycle)
6952 && (code == DOT_PROD_EXPR
6953 || code == WIDEN_SUM_EXPR
6954 || code == SAD_EXPR))
6955 {
6956 if (dump_enabled_p ())
6957 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6958 "multi def-use cycle not possible for lane-reducing "
6959 "reduction operation\n");
6960 return false;
6961 }
6962
6963 if (slp_node)
6964 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6965 else
6966 vec_num = 1;
6967
6968 internal_fn cond_fn = get_conditional_internal_fn (code);
6969 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6970
6971 if (!vec_stmt) /* transformation not required. */
6972 {
6973 vect_model_reduction_cost (stmt_info, reduc_fn, ncopies, cost_vec);
6974 if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6975 {
6976 if (reduction_type != FOLD_LEFT_REDUCTION
6977 && (cond_fn == IFN_LAST
6978 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
6979 OPTIMIZE_FOR_SPEED)))
6980 {
6981 if (dump_enabled_p ())
6982 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6983 "can't use a fully-masked loop because no"
6984 " conditional operation is available.\n");
6985 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6986 }
6987 else if (reduc_index == -1)
6988 {
6989 if (dump_enabled_p ())
6990 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6991 "can't use a fully-masked loop for chained"
6992 " reductions.\n");
6993 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6994 }
6995 else
6996 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
6997 vectype_in);
6998 }
6999 if (dump_enabled_p ()
7000 && reduction_type == FOLD_LEFT_REDUCTION)
7001 dump_printf_loc (MSG_NOTE, vect_location,
7002 "using an in-order (fold-left) reduction.\n");
7003 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
7004 return true;
7005 }
7006
7007 /* Transform. */
7008
7009 if (dump_enabled_p ())
7010 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
7011
7012 /* FORNOW: Multiple types are not supported for condition. */
7013 if (code == COND_EXPR)
7014 gcc_assert (ncopies == 1);
7015
7016 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
7017
7018 if (reduction_type == FOLD_LEFT_REDUCTION)
7019 return vectorize_fold_left_reduction
7020 (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
7021 reduc_fn, ops, vectype_in, reduc_index, masks);
7022
7023 if (reduction_type == EXTRACT_LAST_REDUCTION)
7024 {
7025 gcc_assert (!slp_node);
7026 return vectorizable_condition (stmt_info, gsi, vec_stmt,
7027 NULL, reduc_index, NULL, NULL);
7028 }
7029
7030 /* Create the destination vector */
7031 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7032
7033 prev_stmt_info = NULL;
7034 prev_phi_info = NULL;
7035 if (!slp_node)
7036 {
7037 vec_oprnds0.create (1);
7038 vec_oprnds1.create (1);
7039 if (op_type == ternary_op)
7040 vec_oprnds2.create (1);
7041 }
7042
7043 phis.create (vec_num);
7044 vect_defs.create (vec_num);
7045 if (!slp_node)
7046 vect_defs.quick_push (NULL_TREE);
7047
7048 if (slp_node)
7049 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
7050 else
7051 phis.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info));
7052
7053 for (j = 0; j < ncopies; j++)
7054 {
7055 if (code == COND_EXPR)
7056 {
7057 gcc_assert (!slp_node);
7058 vectorizable_condition (stmt_info, gsi, vec_stmt,
7059 PHI_RESULT (phis[0]->stmt),
7060 reduc_index, NULL, NULL);
7061 /* Multiple types are not supported for condition. */
7062 break;
7063 }
7064
7065 /* Handle uses. */
7066 if (j == 0)
7067 {
7068 if (slp_node)
7069 {
7070 /* Get vec defs for all the operands except the reduction index,
7071 ensuring the ordering of the ops in the vector is kept. */
7072 auto_vec<tree, 3> slp_ops;
7073 auto_vec<vec<tree>, 3> vec_defs;
7074
7075 slp_ops.quick_push (ops[0]);
7076 slp_ops.quick_push (ops[1]);
7077 if (op_type == ternary_op)
7078 slp_ops.quick_push (ops[2]);
7079
7080 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
7081
7082 vec_oprnds0.safe_splice (vec_defs[0]);
7083 vec_defs[0].release ();
7084 vec_oprnds1.safe_splice (vec_defs[1]);
7085 vec_defs[1].release ();
7086 if (op_type == ternary_op)
7087 {
7088 vec_oprnds2.safe_splice (vec_defs[2]);
7089 vec_defs[2].release ();
7090 }
7091 }
7092 else
7093 {
7094 vec_oprnds0.quick_push
7095 (vect_get_vec_def_for_operand (ops[0], stmt_info));
7096 vec_oprnds1.quick_push
7097 (vect_get_vec_def_for_operand (ops[1], stmt_info));
7098 if (op_type == ternary_op)
7099 vec_oprnds2.quick_push
7100 (vect_get_vec_def_for_operand (ops[2], stmt_info));
7101 }
7102 }
7103 else
7104 {
7105 if (!slp_node)
7106 {
7107 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
7108
7109 if (single_defuse_cycle && reduc_index == 0)
7110 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
7111 else
7112 vec_oprnds0[0]
7113 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7114 vec_oprnds0[0]);
7115 if (single_defuse_cycle && reduc_index == 1)
7116 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
7117 else
7118 vec_oprnds1[0]
7119 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7120 vec_oprnds1[0]);
7121 if (op_type == ternary_op)
7122 {
7123 if (single_defuse_cycle && reduc_index == 2)
7124 vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
7125 else
7126 vec_oprnds2[0]
7127 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7128 vec_oprnds2[0]);
7129 }
7130 }
7131 }
7132
7133 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7134 {
7135 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7136 if (masked_loop_p)
7137 {
7138 /* Make sure that the reduction accumulator is vop[0]. */
7139 if (reduc_index == 1)
7140 {
7141 gcc_assert (commutative_tree_code (code));
7142 std::swap (vop[0], vop[1]);
7143 }
7144 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7145 vectype_in, i * ncopies + j);
7146 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7147 vop[0], vop[1],
7148 vop[0]);
7149 new_temp = make_ssa_name (vec_dest, call);
7150 gimple_call_set_lhs (call, new_temp);
7151 gimple_call_set_nothrow (call, true);
7152 new_stmt_info
7153 = vect_finish_stmt_generation (stmt_info, call, gsi);
7154 }
7155 else
7156 {
7157 if (op_type == ternary_op)
7158 vop[2] = vec_oprnds2[i];
7159
7160 gassign *new_stmt = gimple_build_assign (vec_dest, code,
7161 vop[0], vop[1], vop[2]);
7162 new_temp = make_ssa_name (vec_dest, new_stmt);
7163 gimple_assign_set_lhs (new_stmt, new_temp);
7164 new_stmt_info
7165 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7166 }
7167
7168 if (slp_node)
7169 {
7170 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7171 vect_defs.quick_push (new_temp);
7172 }
7173 else
7174 vect_defs[0] = new_temp;
7175 }
7176
7177 if (slp_node)
7178 continue;
7179
7180 if (j == 0)
7181 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7182 else
7183 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7184
7185 prev_stmt_info = new_stmt_info;
7186 }
7187
7188 /* Finalize the reduction-phi (set its arguments) and create the
7189 epilog reduction code. */
7190 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
7191 vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
7192
7193 vect_create_epilog_for_reduction (vect_defs, stmt_info, reduc_def_phi,
7194 epilog_copies, reduc_fn, phis,
7195 double_reduc, slp_node, slp_node_instance,
7196 cond_reduc_val, cond_reduc_op_code,
7197 neutral_op);
7198
7199 return true;
7200 }
7201
7202 /* Function vect_min_worthwhile_factor.
7203
7204 For a loop where we could vectorize the operation indicated by CODE,
7205 return the minimum vectorization factor that makes it worthwhile
7206 to use generic vectors. */
7207 static unsigned int
7208 vect_min_worthwhile_factor (enum tree_code code)
7209 {
7210 switch (code)
7211 {
7212 case PLUS_EXPR:
7213 case MINUS_EXPR:
7214 case NEGATE_EXPR:
7215 return 4;
7216
7217 case BIT_AND_EXPR:
7218 case BIT_IOR_EXPR:
7219 case BIT_XOR_EXPR:
7220 case BIT_NOT_EXPR:
7221 return 2;
7222
7223 default:
7224 return INT_MAX;
7225 }
7226 }
7227
7228 /* Return true if VINFO indicates we are doing loop vectorization and if
7229 it is worth decomposing CODE operations into scalar operations for
7230 that loop's vectorization factor. */
7231
7232 bool
7233 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7234 {
7235 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7236 unsigned HOST_WIDE_INT value;
7237 return (loop_vinfo
7238 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7239 && value >= vect_min_worthwhile_factor (code));
7240 }
7241
7242 /* Function vectorizable_induction
7243
7244 Check if STMT_INFO performs an induction computation that can be vectorized.
7245 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7246 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7247 Return true if STMT_INFO is vectorizable in this way. */
7248
7249 bool
7250 vectorizable_induction (stmt_vec_info stmt_info,
7251 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7252 stmt_vec_info *vec_stmt, slp_tree slp_node,
7253 stmt_vector_for_cost *cost_vec)
7254 {
7255 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7256 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7257 unsigned ncopies;
7258 bool nested_in_vect_loop = false;
7259 struct loop *iv_loop;
7260 tree vec_def;
7261 edge pe = loop_preheader_edge (loop);
7262 basic_block new_bb;
7263 tree new_vec, vec_init, vec_step, t;
7264 tree new_name;
7265 gimple *new_stmt;
7266 gphi *induction_phi;
7267 tree induc_def, vec_dest;
7268 tree init_expr, step_expr;
7269 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7270 unsigned i;
7271 tree expr;
7272 gimple_seq stmts;
7273 imm_use_iterator imm_iter;
7274 use_operand_p use_p;
7275 gimple *exit_phi;
7276 edge latch_e;
7277 tree loop_arg;
7278 gimple_stmt_iterator si;
7279
7280 gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
7281 if (!phi)
7282 return false;
7283
7284 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7285 return false;
7286
7287 /* Make sure it was recognized as induction computation. */
7288 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7289 return false;
7290
7291 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7292 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7293
7294 if (slp_node)
7295 ncopies = 1;
7296 else
7297 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7298 gcc_assert (ncopies >= 1);
7299
7300 /* FORNOW. These restrictions should be relaxed. */
7301 if (nested_in_vect_loop_p (loop, stmt_info))
7302 {
7303 imm_use_iterator imm_iter;
7304 use_operand_p use_p;
7305 gimple *exit_phi;
7306 edge latch_e;
7307 tree loop_arg;
7308
7309 if (ncopies > 1)
7310 {
7311 if (dump_enabled_p ())
7312 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7313 "multiple types in nested loop.\n");
7314 return false;
7315 }
7316
7317 /* FORNOW: outer loop induction with SLP not supported. */
7318 if (STMT_SLP_TYPE (stmt_info))
7319 return false;
7320
7321 exit_phi = NULL;
7322 latch_e = loop_latch_edge (loop->inner);
7323 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7324 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7325 {
7326 gimple *use_stmt = USE_STMT (use_p);
7327 if (is_gimple_debug (use_stmt))
7328 continue;
7329
7330 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7331 {
7332 exit_phi = use_stmt;
7333 break;
7334 }
7335 }
7336 if (exit_phi)
7337 {
7338 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7339 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7340 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7341 {
7342 if (dump_enabled_p ())
7343 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7344 "inner-loop induction only used outside "
7345 "of the outer vectorized loop.\n");
7346 return false;
7347 }
7348 }
7349
7350 nested_in_vect_loop = true;
7351 iv_loop = loop->inner;
7352 }
7353 else
7354 iv_loop = loop;
7355 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7356
7357 if (slp_node && !nunits.is_constant ())
7358 {
7359 /* The current SLP code creates the initial value element-by-element. */
7360 if (dump_enabled_p ())
7361 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7362 "SLP induction not supported for variable-length"
7363 " vectors.\n");
7364 return false;
7365 }
7366
7367 if (!vec_stmt) /* transformation not required. */
7368 {
7369 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7370 DUMP_VECT_SCOPE ("vectorizable_induction");
7371 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7372 return true;
7373 }
7374
7375 /* Transform. */
7376
7377 /* Compute a vector variable, initialized with the first VF values of
7378 the induction variable. E.g., for an iv with IV_PHI='X' and
7379 evolution S, for a vector of 4 units, we want to compute:
7380 [X, X + S, X + 2*S, X + 3*S]. */
7381
7382 if (dump_enabled_p ())
7383 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7384
7385 latch_e = loop_latch_edge (iv_loop);
7386 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7387
7388 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7389 gcc_assert (step_expr != NULL_TREE);
7390
7391 pe = loop_preheader_edge (iv_loop);
7392 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7393 loop_preheader_edge (iv_loop));
7394
7395 stmts = NULL;
7396 if (!nested_in_vect_loop)
7397 {
7398 /* Convert the initial value to the desired type. */
7399 tree new_type = TREE_TYPE (vectype);
7400 init_expr = gimple_convert (&stmts, new_type, init_expr);
7401
7402 /* If we are using the loop mask to "peel" for alignment then we need
7403 to adjust the start value here. */
7404 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7405 if (skip_niters != NULL_TREE)
7406 {
7407 if (FLOAT_TYPE_P (vectype))
7408 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7409 skip_niters);
7410 else
7411 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7412 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7413 skip_niters, step_expr);
7414 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7415 init_expr, skip_step);
7416 }
7417 }
7418
7419 /* Convert the step to the desired type. */
7420 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
7421
7422 if (stmts)
7423 {
7424 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7425 gcc_assert (!new_bb);
7426 }
7427
7428 /* Find the first insertion point in the BB. */
7429 basic_block bb = gimple_bb (phi);
7430 si = gsi_after_labels (bb);
7431
7432 /* For SLP induction we have to generate several IVs as for example
7433 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7434 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7435 [VF*S, VF*S, VF*S, VF*S] for all. */
7436 if (slp_node)
7437 {
7438 /* Enforced above. */
7439 unsigned int const_nunits = nunits.to_constant ();
7440
7441 /* Generate [VF*S, VF*S, ... ]. */
7442 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7443 {
7444 expr = build_int_cst (integer_type_node, vf);
7445 expr = fold_convert (TREE_TYPE (step_expr), expr);
7446 }
7447 else
7448 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7449 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7450 expr, step_expr);
7451 if (! CONSTANT_CLASS_P (new_name))
7452 new_name = vect_init_vector (stmt_info, new_name,
7453 TREE_TYPE (step_expr), NULL);
7454 new_vec = build_vector_from_val (vectype, new_name);
7455 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7456
7457 /* Now generate the IVs. */
7458 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7459 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7460 unsigned elts = const_nunits * nvects;
7461 unsigned nivs = least_common_multiple (group_size,
7462 const_nunits) / const_nunits;
7463 gcc_assert (elts % group_size == 0);
7464 tree elt = init_expr;
7465 unsigned ivn;
7466 for (ivn = 0; ivn < nivs; ++ivn)
7467 {
7468 tree_vector_builder elts (vectype, const_nunits, 1);
7469 stmts = NULL;
7470 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7471 {
7472 if (ivn*const_nunits + eltn >= group_size
7473 && (ivn * const_nunits + eltn) % group_size == 0)
7474 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7475 elt, step_expr);
7476 elts.quick_push (elt);
7477 }
7478 vec_init = gimple_build_vector (&stmts, &elts);
7479 if (stmts)
7480 {
7481 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7482 gcc_assert (!new_bb);
7483 }
7484
7485 /* Create the induction-phi that defines the induction-operand. */
7486 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7487 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7488 stmt_vec_info induction_phi_info
7489 = loop_vinfo->add_stmt (induction_phi);
7490 induc_def = PHI_RESULT (induction_phi);
7491
7492 /* Create the iv update inside the loop */
7493 vec_def = make_ssa_name (vec_dest);
7494 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7495 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7496 loop_vinfo->add_stmt (new_stmt);
7497
7498 /* Set the arguments of the phi node: */
7499 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7500 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7501 UNKNOWN_LOCATION);
7502
7503 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi_info);
7504 }
7505
7506 /* Re-use IVs when we can. */
7507 if (ivn < nvects)
7508 {
7509 unsigned vfp
7510 = least_common_multiple (group_size, const_nunits) / group_size;
7511 /* Generate [VF'*S, VF'*S, ... ]. */
7512 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7513 {
7514 expr = build_int_cst (integer_type_node, vfp);
7515 expr = fold_convert (TREE_TYPE (step_expr), expr);
7516 }
7517 else
7518 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7519 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7520 expr, step_expr);
7521 if (! CONSTANT_CLASS_P (new_name))
7522 new_name = vect_init_vector (stmt_info, new_name,
7523 TREE_TYPE (step_expr), NULL);
7524 new_vec = build_vector_from_val (vectype, new_name);
7525 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7526 for (; ivn < nvects; ++ivn)
7527 {
7528 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
7529 tree def;
7530 if (gimple_code (iv) == GIMPLE_PHI)
7531 def = gimple_phi_result (iv);
7532 else
7533 def = gimple_assign_lhs (iv);
7534 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7535 PLUS_EXPR,
7536 def, vec_step);
7537 if (gimple_code (iv) == GIMPLE_PHI)
7538 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7539 else
7540 {
7541 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7542 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
7543 }
7544 SLP_TREE_VEC_STMTS (slp_node).quick_push
7545 (loop_vinfo->add_stmt (new_stmt));
7546 }
7547 }
7548
7549 return true;
7550 }
7551
7552 /* Create the vector that holds the initial_value of the induction. */
7553 if (nested_in_vect_loop)
7554 {
7555 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7556 been created during vectorization of previous stmts. We obtain it
7557 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7558 vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
7559 /* If the initial value is not of proper type, convert it. */
7560 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7561 {
7562 new_stmt
7563 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7564 vect_simple_var,
7565 "vec_iv_"),
7566 VIEW_CONVERT_EXPR,
7567 build1 (VIEW_CONVERT_EXPR, vectype,
7568 vec_init));
7569 vec_init = gimple_assign_lhs (new_stmt);
7570 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7571 new_stmt);
7572 gcc_assert (!new_bb);
7573 loop_vinfo->add_stmt (new_stmt);
7574 }
7575 }
7576 else
7577 {
7578 /* iv_loop is the loop to be vectorized. Create:
7579 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7580 stmts = NULL;
7581 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
7582
7583 unsigned HOST_WIDE_INT const_nunits;
7584 if (nunits.is_constant (&const_nunits))
7585 {
7586 tree_vector_builder elts (vectype, const_nunits, 1);
7587 elts.quick_push (new_name);
7588 for (i = 1; i < const_nunits; i++)
7589 {
7590 /* Create: new_name_i = new_name + step_expr */
7591 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7592 new_name, step_expr);
7593 elts.quick_push (new_name);
7594 }
7595 /* Create a vector from [new_name_0, new_name_1, ...,
7596 new_name_nunits-1] */
7597 vec_init = gimple_build_vector (&stmts, &elts);
7598 }
7599 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7600 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7601 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
7602 new_name, step_expr);
7603 else
7604 {
7605 /* Build:
7606 [base, base, base, ...]
7607 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7608 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7609 gcc_assert (flag_associative_math);
7610 tree index = build_index_vector (vectype, 0, 1);
7611 tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
7612 new_name);
7613 tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
7614 step_expr);
7615 vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
7616 vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
7617 vec_init, step_vec);
7618 vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
7619 vec_init, base_vec);
7620 }
7621
7622 if (stmts)
7623 {
7624 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7625 gcc_assert (!new_bb);
7626 }
7627 }
7628
7629
7630 /* Create the vector that holds the step of the induction. */
7631 if (nested_in_vect_loop)
7632 /* iv_loop is nested in the loop to be vectorized. Generate:
7633 vec_step = [S, S, S, S] */
7634 new_name = step_expr;
7635 else
7636 {
7637 /* iv_loop is the loop to be vectorized. Generate:
7638 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7639 gimple_seq seq = NULL;
7640 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7641 {
7642 expr = build_int_cst (integer_type_node, vf);
7643 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7644 }
7645 else
7646 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7647 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7648 expr, step_expr);
7649 if (seq)
7650 {
7651 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7652 gcc_assert (!new_bb);
7653 }
7654 }
7655
7656 t = unshare_expr (new_name);
7657 gcc_assert (CONSTANT_CLASS_P (new_name)
7658 || TREE_CODE (new_name) == SSA_NAME);
7659 new_vec = build_vector_from_val (vectype, t);
7660 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7661
7662
7663 /* Create the following def-use cycle:
7664 loop prolog:
7665 vec_init = ...
7666 vec_step = ...
7667 loop:
7668 vec_iv = PHI <vec_init, vec_loop>
7669 ...
7670 STMT
7671 ...
7672 vec_loop = vec_iv + vec_step; */
7673
7674 /* Create the induction-phi that defines the induction-operand. */
7675 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7676 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7677 stmt_vec_info induction_phi_info = loop_vinfo->add_stmt (induction_phi);
7678 induc_def = PHI_RESULT (induction_phi);
7679
7680 /* Create the iv update inside the loop */
7681 vec_def = make_ssa_name (vec_dest);
7682 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7683 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7684 stmt_vec_info new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7685
7686 /* Set the arguments of the phi node: */
7687 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7688 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7689 UNKNOWN_LOCATION);
7690
7691 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
7692
7693 /* In case that vectorization factor (VF) is bigger than the number
7694 of elements that we can fit in a vectype (nunits), we have to generate
7695 more than one vector stmt - i.e - we need to "unroll" the
7696 vector stmt by a factor VF/nunits. For more details see documentation
7697 in vectorizable_operation. */
7698
7699 if (ncopies > 1)
7700 {
7701 gimple_seq seq = NULL;
7702 stmt_vec_info prev_stmt_vinfo;
7703 /* FORNOW. This restriction should be relaxed. */
7704 gcc_assert (!nested_in_vect_loop);
7705
7706 /* Create the vector that holds the step of the induction. */
7707 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7708 {
7709 expr = build_int_cst (integer_type_node, nunits);
7710 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7711 }
7712 else
7713 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7714 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7715 expr, step_expr);
7716 if (seq)
7717 {
7718 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7719 gcc_assert (!new_bb);
7720 }
7721
7722 t = unshare_expr (new_name);
7723 gcc_assert (CONSTANT_CLASS_P (new_name)
7724 || TREE_CODE (new_name) == SSA_NAME);
7725 new_vec = build_vector_from_val (vectype, t);
7726 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7727
7728 vec_def = induc_def;
7729 prev_stmt_vinfo = induction_phi_info;
7730 for (i = 1; i < ncopies; i++)
7731 {
7732 /* vec_i = vec_prev + vec_step */
7733 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
7734 vec_def, vec_step);
7735 vec_def = make_ssa_name (vec_dest, new_stmt);
7736 gimple_assign_set_lhs (new_stmt, vec_def);
7737
7738 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7739 new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7740 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt_info;
7741 prev_stmt_vinfo = new_stmt_info;
7742 }
7743 }
7744
7745 if (nested_in_vect_loop)
7746 {
7747 /* Find the loop-closed exit-phi of the induction, and record
7748 the final vector of induction results: */
7749 exit_phi = NULL;
7750 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7751 {
7752 gimple *use_stmt = USE_STMT (use_p);
7753 if (is_gimple_debug (use_stmt))
7754 continue;
7755
7756 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7757 {
7758 exit_phi = use_stmt;
7759 break;
7760 }
7761 }
7762 if (exit_phi)
7763 {
7764 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7765 /* FORNOW. Currently not supporting the case that an inner-loop induction
7766 is not used in the outer-loop (i.e. only outside the outer-loop). */
7767 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7768 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7769
7770 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
7771 if (dump_enabled_p ())
7772 {
7773 dump_printf_loc (MSG_NOTE, vect_location,
7774 "vector of inductions after inner-loop:");
7775 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
7776 }
7777 }
7778 }
7779
7780
7781 if (dump_enabled_p ())
7782 {
7783 dump_printf_loc (MSG_NOTE, vect_location,
7784 "transform induction: created def-use cycle: ");
7785 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
7786 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7787 SSA_NAME_DEF_STMT (vec_def), 0);
7788 }
7789
7790 return true;
7791 }
7792
7793 /* Function vectorizable_live_operation.
7794
7795 STMT_INFO computes a value that is used outside the loop. Check if
7796 it can be supported. */
7797
7798 bool
7799 vectorizable_live_operation (stmt_vec_info stmt_info,
7800 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7801 slp_tree slp_node, int slp_index,
7802 stmt_vec_info *vec_stmt,
7803 stmt_vector_for_cost *)
7804 {
7805 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7806 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7807 imm_use_iterator imm_iter;
7808 tree lhs, lhs_type, bitsize, vec_bitsize;
7809 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7810 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7811 int ncopies;
7812 gimple *use_stmt;
7813 auto_vec<tree> vec_oprnds;
7814 int vec_entry = 0;
7815 poly_uint64 vec_index = 0;
7816
7817 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7818
7819 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7820 return false;
7821
7822 /* FORNOW. CHECKME. */
7823 if (nested_in_vect_loop_p (loop, stmt_info))
7824 return false;
7825
7826 /* If STMT is not relevant and it is a simple assignment and its inputs are
7827 invariant then it can remain in place, unvectorized. The original last
7828 scalar value that it computes will be used. */
7829 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7830 {
7831 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
7832 if (dump_enabled_p ())
7833 dump_printf_loc (MSG_NOTE, vect_location,
7834 "statement is simple and uses invariant. Leaving in "
7835 "place.\n");
7836 return true;
7837 }
7838
7839 if (slp_node)
7840 ncopies = 1;
7841 else
7842 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7843
7844 if (slp_node)
7845 {
7846 gcc_assert (slp_index >= 0);
7847
7848 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7849 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7850
7851 /* Get the last occurrence of the scalar index from the concatenation of
7852 all the slp vectors. Calculate which slp vector it is and the index
7853 within. */
7854 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
7855
7856 /* Calculate which vector contains the result, and which lane of
7857 that vector we need. */
7858 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
7859 {
7860 if (dump_enabled_p ())
7861 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7862 "Cannot determine which vector holds the"
7863 " final result.\n");
7864 return false;
7865 }
7866 }
7867
7868 if (!vec_stmt)
7869 {
7870 /* No transformation required. */
7871 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7872 {
7873 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
7874 OPTIMIZE_FOR_SPEED))
7875 {
7876 if (dump_enabled_p ())
7877 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7878 "can't use a fully-masked loop because "
7879 "the target doesn't support extract last "
7880 "reduction.\n");
7881 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7882 }
7883 else if (slp_node)
7884 {
7885 if (dump_enabled_p ())
7886 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7887 "can't use a fully-masked loop because an "
7888 "SLP statement is live after the loop.\n");
7889 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7890 }
7891 else if (ncopies > 1)
7892 {
7893 if (dump_enabled_p ())
7894 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7895 "can't use a fully-masked loop because"
7896 " ncopies is greater than 1.\n");
7897 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7898 }
7899 else
7900 {
7901 gcc_assert (ncopies == 1 && !slp_node);
7902 vect_record_loop_mask (loop_vinfo,
7903 &LOOP_VINFO_MASKS (loop_vinfo),
7904 1, vectype);
7905 }
7906 }
7907 return true;
7908 }
7909
7910 /* If stmt has a related stmt, then use that for getting the lhs. */
7911 gimple *stmt = (is_pattern_stmt_p (stmt_info)
7912 ? STMT_VINFO_RELATED_STMT (stmt_info)->stmt
7913 : stmt_info->stmt);
7914
7915 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7916 : gimple_get_lhs (stmt);
7917 lhs_type = TREE_TYPE (lhs);
7918
7919 bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
7920 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
7921 : TYPE_SIZE (TREE_TYPE (vectype)));
7922 vec_bitsize = TYPE_SIZE (vectype);
7923
7924 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7925 tree vec_lhs, bitstart;
7926 if (slp_node)
7927 {
7928 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7929
7930 /* Get the correct slp vectorized stmt. */
7931 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]->stmt;
7932 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
7933 vec_lhs = gimple_phi_result (phi);
7934 else
7935 vec_lhs = gimple_get_lhs (vec_stmt);
7936
7937 /* Get entry to use. */
7938 bitstart = bitsize_int (vec_index);
7939 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7940 }
7941 else
7942 {
7943 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7944 vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt);
7945 gcc_checking_assert (ncopies == 1
7946 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7947
7948 /* For multiple copies, get the last copy. */
7949 for (int i = 1; i < ncopies; ++i)
7950 vec_lhs = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_lhs);
7951
7952 /* Get the last lane in the vector. */
7953 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7954 }
7955
7956 gimple_seq stmts = NULL;
7957 tree new_tree;
7958 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
7959 {
7960 /* Emit:
7961
7962 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7963
7964 where VEC_LHS is the vectorized live-out result and MASK is
7965 the loop mask for the final iteration. */
7966 gcc_assert (ncopies == 1 && !slp_node);
7967 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
7968 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
7969 1, vectype, 0);
7970 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST,
7971 scalar_type, mask, vec_lhs);
7972
7973 /* Convert the extracted vector element to the required scalar type. */
7974 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
7975 }
7976 else
7977 {
7978 tree bftype = TREE_TYPE (vectype);
7979 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7980 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7981 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
7982 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
7983 &stmts, true, NULL_TREE);
7984 }
7985
7986 if (stmts)
7987 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
7988
7989 /* Replace use of lhs with newly computed result. If the use stmt is a
7990 single arg PHI, just replace all uses of PHI result. It's necessary
7991 because lcssa PHI defining lhs may be before newly inserted stmt. */
7992 use_operand_p use_p;
7993 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
7994 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
7995 && !is_gimple_debug (use_stmt))
7996 {
7997 if (gimple_code (use_stmt) == GIMPLE_PHI
7998 && gimple_phi_num_args (use_stmt) == 1)
7999 {
8000 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
8001 }
8002 else
8003 {
8004 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
8005 SET_USE (use_p, new_tree);
8006 }
8007 update_stmt (use_stmt);
8008 }
8009
8010 return true;
8011 }
8012
8013 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
8014
8015 static void
8016 vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
8017 {
8018 ssa_op_iter op_iter;
8019 imm_use_iterator imm_iter;
8020 def_operand_p def_p;
8021 gimple *ustmt;
8022
8023 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
8024 {
8025 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
8026 {
8027 basic_block bb;
8028
8029 if (!is_gimple_debug (ustmt))
8030 continue;
8031
8032 bb = gimple_bb (ustmt);
8033
8034 if (!flow_bb_inside_loop_p (loop, bb))
8035 {
8036 if (gimple_debug_bind_p (ustmt))
8037 {
8038 if (dump_enabled_p ())
8039 dump_printf_loc (MSG_NOTE, vect_location,
8040 "killing debug use\n");
8041
8042 gimple_debug_bind_reset_value (ustmt);
8043 update_stmt (ustmt);
8044 }
8045 else
8046 gcc_unreachable ();
8047 }
8048 }
8049 }
8050 }
8051
8052 /* Given loop represented by LOOP_VINFO, return true if computation of
8053 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8054 otherwise. */
8055
8056 static bool
8057 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8058 {
8059 /* Constant case. */
8060 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8061 {
8062 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8063 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8064
8065 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8066 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8067 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8068 return true;
8069 }
8070
8071 widest_int max;
8072 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8073 /* Check the upper bound of loop niters. */
8074 if (get_max_loop_iterations (loop, &max))
8075 {
8076 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8077 signop sgn = TYPE_SIGN (type);
8078 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8079 if (max < type_max)
8080 return true;
8081 }
8082 return false;
8083 }
8084
8085 /* Return a mask type with half the number of elements as TYPE. */
8086
8087 tree
8088 vect_halve_mask_nunits (tree type)
8089 {
8090 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
8091 return build_truth_vector_type (nunits, current_vector_size);
8092 }
8093
8094 /* Return a mask type with twice as many elements as TYPE. */
8095
8096 tree
8097 vect_double_mask_nunits (tree type)
8098 {
8099 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
8100 return build_truth_vector_type (nunits, current_vector_size);
8101 }
8102
8103 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8104 contain a sequence of NVECTORS masks that each control a vector of type
8105 VECTYPE. */
8106
8107 void
8108 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8109 unsigned int nvectors, tree vectype)
8110 {
8111 gcc_assert (nvectors != 0);
8112 if (masks->length () < nvectors)
8113 masks->safe_grow_cleared (nvectors);
8114 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8115 /* The number of scalars per iteration and the number of vectors are
8116 both compile-time constants. */
8117 unsigned int nscalars_per_iter
8118 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8119 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8120 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8121 {
8122 rgm->max_nscalars_per_iter = nscalars_per_iter;
8123 rgm->mask_type = build_same_sized_truth_vector_type (vectype);
8124 }
8125 }
8126
8127 /* Given a complete set of masks MASKS, extract mask number INDEX
8128 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8129 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8130
8131 See the comment above vec_loop_masks for more details about the mask
8132 arrangement. */
8133
8134 tree
8135 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8136 unsigned int nvectors, tree vectype, unsigned int index)
8137 {
8138 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8139 tree mask_type = rgm->mask_type;
8140
8141 /* Populate the rgroup's mask array, if this is the first time we've
8142 used it. */
8143 if (rgm->masks.is_empty ())
8144 {
8145 rgm->masks.safe_grow_cleared (nvectors);
8146 for (unsigned int i = 0; i < nvectors; ++i)
8147 {
8148 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8149 /* Provide a dummy definition until the real one is available. */
8150 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8151 rgm->masks[i] = mask;
8152 }
8153 }
8154
8155 tree mask = rgm->masks[index];
8156 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8157 TYPE_VECTOR_SUBPARTS (vectype)))
8158 {
8159 /* A loop mask for data type X can be reused for data type Y
8160 if X has N times more elements than Y and if Y's elements
8161 are N times bigger than X's. In this case each sequence
8162 of N elements in the loop mask will be all-zero or all-one.
8163 We can then view-convert the mask so that each sequence of
8164 N elements is replaced by a single element. */
8165 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8166 TYPE_VECTOR_SUBPARTS (vectype)));
8167 gimple_seq seq = NULL;
8168 mask_type = build_same_sized_truth_vector_type (vectype);
8169 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8170 if (seq)
8171 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8172 }
8173 return mask;
8174 }
8175
8176 /* Scale profiling counters by estimation for LOOP which is vectorized
8177 by factor VF. */
8178
8179 static void
8180 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
8181 {
8182 edge preheader = loop_preheader_edge (loop);
8183 /* Reduce loop iterations by the vectorization factor. */
8184 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8185 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8186
8187 if (freq_h.nonzero_p ())
8188 {
8189 profile_probability p;
8190
8191 /* Avoid dropping loop body profile counter to 0 because of zero count
8192 in loop's preheader. */
8193 if (!(freq_e == profile_count::zero ()))
8194 freq_e = freq_e.force_nonzero ();
8195 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8196 scale_loop_frequencies (loop, p);
8197 }
8198
8199 edge exit_e = single_exit (loop);
8200 exit_e->probability = profile_probability::always ()
8201 .apply_scale (1, new_est_niter + 1);
8202
8203 edge exit_l = single_pred_edge (loop->latch);
8204 profile_probability prob = exit_l->probability;
8205 exit_l->probability = exit_e->probability.invert ();
8206 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8207 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8208 }
8209
8210 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
8211 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its stmt_vec_info.
8212 *SLP_SCHEDULE is a running record of whether we have called
8213 vect_schedule_slp. */
8214
8215 static void
8216 vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
8217 gimple_stmt_iterator *gsi,
8218 stmt_vec_info *seen_store, bool *slp_scheduled)
8219 {
8220 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8221 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8222
8223 if (dump_enabled_p ())
8224 {
8225 dump_printf_loc (MSG_NOTE, vect_location,
8226 "------>vectorizing statement: ");
8227 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
8228 }
8229
8230 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8231 vect_loop_kill_debug_uses (loop, stmt_info);
8232
8233 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8234 && !STMT_VINFO_LIVE_P (stmt_info))
8235 return;
8236
8237 if (STMT_VINFO_VECTYPE (stmt_info))
8238 {
8239 poly_uint64 nunits
8240 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8241 if (!STMT_SLP_TYPE (stmt_info)
8242 && maybe_ne (nunits, vf)
8243 && dump_enabled_p ())
8244 /* For SLP VF is set according to unrolling factor, and not
8245 to vector size, hence for SLP this print is not valid. */
8246 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8247 }
8248
8249 /* SLP. Schedule all the SLP instances when the first SLP stmt is
8250 reached. */
8251 if (slp_vect_type slptype = STMT_SLP_TYPE (stmt_info))
8252 {
8253
8254 if (!*slp_scheduled)
8255 {
8256 *slp_scheduled = true;
8257
8258 DUMP_VECT_SCOPE ("scheduling SLP instances");
8259
8260 vect_schedule_slp (loop_vinfo);
8261 }
8262
8263 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
8264 if (slptype == pure_slp)
8265 return;
8266 }
8267
8268 if (dump_enabled_p ())
8269 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8270
8271 bool grouped_store = false;
8272 if (vect_transform_stmt (stmt_info, gsi, &grouped_store, NULL, NULL))
8273 *seen_store = stmt_info;
8274 }
8275
8276 /* Function vect_transform_loop.
8277
8278 The analysis phase has determined that the loop is vectorizable.
8279 Vectorize the loop - created vectorized stmts to replace the scalar
8280 stmts in the loop, and update the loop exit condition.
8281 Returns scalar epilogue loop if any. */
8282
8283 struct loop *
8284 vect_transform_loop (loop_vec_info loop_vinfo)
8285 {
8286 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8287 struct loop *epilogue = NULL;
8288 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8289 int nbbs = loop->num_nodes;
8290 int i;
8291 tree niters_vector = NULL_TREE;
8292 tree step_vector = NULL_TREE;
8293 tree niters_vector_mult_vf = NULL_TREE;
8294 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8295 unsigned int lowest_vf = constant_lower_bound (vf);
8296 bool slp_scheduled = false;
8297 gimple *stmt;
8298 bool check_profitability = false;
8299 unsigned int th;
8300
8301 DUMP_VECT_SCOPE ("vec_transform_loop");
8302
8303 loop_vinfo->shared->check_datarefs ();
8304
8305 /* Use the more conservative vectorization threshold. If the number
8306 of iterations is constant assume the cost check has been performed
8307 by our caller. If the threshold makes all loops profitable that
8308 run at least the (estimated) vectorization factor number of times
8309 checking is pointless, too. */
8310 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8311 if (th >= vect_vf_for_cost (loop_vinfo)
8312 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8313 {
8314 if (dump_enabled_p ())
8315 dump_printf_loc (MSG_NOTE, vect_location,
8316 "Profitability threshold is %d loop iterations.\n",
8317 th);
8318 check_profitability = true;
8319 }
8320
8321 /* Make sure there exists a single-predecessor exit bb. Do this before
8322 versioning. */
8323 edge e = single_exit (loop);
8324 if (! single_pred_p (e->dest))
8325 {
8326 split_loop_exit_edge (e);
8327 if (dump_enabled_p ())
8328 dump_printf (MSG_NOTE, "split exit edge\n");
8329 }
8330
8331 /* Version the loop first, if required, so the profitability check
8332 comes first. */
8333
8334 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8335 {
8336 poly_uint64 versioning_threshold
8337 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
8338 if (check_profitability
8339 && ordered_p (poly_uint64 (th), versioning_threshold))
8340 {
8341 versioning_threshold = ordered_max (poly_uint64 (th),
8342 versioning_threshold);
8343 check_profitability = false;
8344 }
8345 vect_loop_versioning (loop_vinfo, th, check_profitability,
8346 versioning_threshold);
8347 check_profitability = false;
8348 }
8349
8350 /* Make sure there exists a single-predecessor exit bb also on the
8351 scalar loop copy. Do this after versioning but before peeling
8352 so CFG structure is fine for both scalar and if-converted loop
8353 to make slpeel_duplicate_current_defs_from_edges face matched
8354 loop closed PHI nodes on the exit. */
8355 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8356 {
8357 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8358 if (! single_pred_p (e->dest))
8359 {
8360 split_loop_exit_edge (e);
8361 if (dump_enabled_p ())
8362 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8363 }
8364 }
8365
8366 tree niters = vect_build_loop_niters (loop_vinfo);
8367 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8368 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8369 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8370 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8371 &step_vector, &niters_vector_mult_vf, th,
8372 check_profitability, niters_no_overflow);
8373
8374 if (niters_vector == NULL_TREE)
8375 {
8376 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8377 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8378 && known_eq (lowest_vf, vf))
8379 {
8380 niters_vector
8381 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8382 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8383 step_vector = build_one_cst (TREE_TYPE (niters));
8384 }
8385 else
8386 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8387 &step_vector, niters_no_overflow);
8388 }
8389
8390 /* 1) Make sure the loop header has exactly two entries
8391 2) Make sure we have a preheader basic block. */
8392
8393 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8394
8395 split_edge (loop_preheader_edge (loop));
8396
8397 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8398 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8399 /* This will deal with any possible peeling. */
8400 vect_prepare_for_masked_peels (loop_vinfo);
8401
8402 /* FORNOW: the vectorizer supports only loops which body consist
8403 of one basic block (header + empty latch). When the vectorizer will
8404 support more involved loop forms, the order by which the BBs are
8405 traversed need to be reconsidered. */
8406
8407 for (i = 0; i < nbbs; i++)
8408 {
8409 basic_block bb = bbs[i];
8410 stmt_vec_info stmt_info;
8411
8412 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8413 gsi_next (&si))
8414 {
8415 gphi *phi = si.phi ();
8416 if (dump_enabled_p ())
8417 {
8418 dump_printf_loc (MSG_NOTE, vect_location,
8419 "------>vectorizing phi: ");
8420 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
8421 }
8422 stmt_info = loop_vinfo->lookup_stmt (phi);
8423 if (!stmt_info)
8424 continue;
8425
8426 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8427 vect_loop_kill_debug_uses (loop, stmt_info);
8428
8429 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8430 && !STMT_VINFO_LIVE_P (stmt_info))
8431 continue;
8432
8433 if (STMT_VINFO_VECTYPE (stmt_info)
8434 && (maybe_ne
8435 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8436 && dump_enabled_p ())
8437 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8438
8439 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8440 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8441 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
8442 && ! PURE_SLP_STMT (stmt_info))
8443 {
8444 if (dump_enabled_p ())
8445 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8446 vect_transform_stmt (stmt_info, NULL, NULL, NULL, NULL);
8447 }
8448 }
8449
8450 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8451 !gsi_end_p (si);)
8452 {
8453 stmt = gsi_stmt (si);
8454 /* During vectorization remove existing clobber stmts. */
8455 if (gimple_clobber_p (stmt))
8456 {
8457 unlink_stmt_vdef (stmt);
8458 gsi_remove (&si, true);
8459 release_defs (stmt);
8460 }
8461 else
8462 {
8463 stmt_info = loop_vinfo->lookup_stmt (stmt);
8464
8465 /* vector stmts created in the outer-loop during vectorization of
8466 stmts in an inner-loop may not have a stmt_info, and do not
8467 need to be vectorized. */
8468 stmt_vec_info seen_store = NULL;
8469 if (stmt_info)
8470 {
8471 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8472 {
8473 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
8474 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
8475 !gsi_end_p (subsi); gsi_next (&subsi))
8476 {
8477 stmt_vec_info pat_stmt_info
8478 = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
8479 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
8480 &si, &seen_store,
8481 &slp_scheduled);
8482 }
8483 stmt_vec_info pat_stmt_info
8484 = STMT_VINFO_RELATED_STMT (stmt_info);
8485 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info, &si,
8486 &seen_store, &slp_scheduled);
8487 }
8488 vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
8489 &seen_store, &slp_scheduled);
8490 }
8491 if (seen_store)
8492 {
8493 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
8494 {
8495 /* Interleaving. If IS_STORE is TRUE, the
8496 vectorization of the interleaving chain was
8497 completed - free all the stores in the chain. */
8498 gsi_next (&si);
8499 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
8500 }
8501 else
8502 {
8503 /* Free the attached stmt_vec_info and remove the
8504 stmt. */
8505 free_stmt_vec_info (stmt);
8506 unlink_stmt_vdef (stmt);
8507 gsi_remove (&si, true);
8508 release_defs (stmt);
8509 }
8510 }
8511 else
8512 gsi_next (&si);
8513 }
8514 }
8515
8516 /* Stub out scalar statements that must not survive vectorization.
8517 Doing this here helps with grouped statements, or statements that
8518 are involved in patterns. */
8519 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
8520 !gsi_end_p (gsi); gsi_next (&gsi))
8521 {
8522 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
8523 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
8524 {
8525 tree lhs = gimple_get_lhs (call);
8526 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8527 {
8528 tree zero = build_zero_cst (TREE_TYPE (lhs));
8529 gimple *new_stmt = gimple_build_assign (lhs, zero);
8530 gsi_replace (&gsi, new_stmt, true);
8531 }
8532 }
8533 }
8534 } /* BBs in loop */
8535
8536 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8537 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8538 if (integer_onep (step_vector))
8539 niters_no_overflow = true;
8540 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
8541 niters_vector_mult_vf, !niters_no_overflow);
8542
8543 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
8544 scale_profile_for_vect_loop (loop, assumed_vf);
8545
8546 /* True if the final iteration might not handle a full vector's
8547 worth of scalar iterations. */
8548 bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
8549 /* The minimum number of iterations performed by the epilogue. This
8550 is 1 when peeling for gaps because we always need a final scalar
8551 iteration. */
8552 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
8553 /* +1 to convert latch counts to loop iteration counts,
8554 -min_epilogue_iters to remove iterations that cannot be performed
8555 by the vector code. */
8556 int bias_for_lowest = 1 - min_epilogue_iters;
8557 int bias_for_assumed = bias_for_lowest;
8558 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
8559 if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8560 {
8561 /* When the amount of peeling is known at compile time, the first
8562 iteration will have exactly alignment_npeels active elements.
8563 In the worst case it will have at least one. */
8564 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
8565 bias_for_lowest += lowest_vf - min_first_active;
8566 bias_for_assumed += assumed_vf - min_first_active;
8567 }
8568 /* In these calculations the "- 1" converts loop iteration counts
8569 back to latch counts. */
8570 if (loop->any_upper_bound)
8571 loop->nb_iterations_upper_bound
8572 = (final_iter_may_be_partial
8573 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
8574 lowest_vf) - 1
8575 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
8576 lowest_vf) - 1);
8577 if (loop->any_likely_upper_bound)
8578 loop->nb_iterations_likely_upper_bound
8579 = (final_iter_may_be_partial
8580 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
8581 + bias_for_lowest, lowest_vf) - 1
8582 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
8583 + bias_for_lowest, lowest_vf) - 1);
8584 if (loop->any_estimate)
8585 loop->nb_iterations_estimate
8586 = (final_iter_may_be_partial
8587 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
8588 assumed_vf) - 1
8589 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
8590 assumed_vf) - 1);
8591
8592 if (dump_enabled_p ())
8593 {
8594 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8595 {
8596 dump_printf_loc (MSG_NOTE, vect_location,
8597 "LOOP VECTORIZED\n");
8598 if (loop->inner)
8599 dump_printf_loc (MSG_NOTE, vect_location,
8600 "OUTER LOOP VECTORIZED\n");
8601 dump_printf (MSG_NOTE, "\n");
8602 }
8603 else
8604 {
8605 dump_printf_loc (MSG_NOTE, vect_location,
8606 "LOOP EPILOGUE VECTORIZED (VS=");
8607 dump_dec (MSG_NOTE, current_vector_size);
8608 dump_printf (MSG_NOTE, ")\n");
8609 }
8610 }
8611
8612 /* Free SLP instances here because otherwise stmt reference counting
8613 won't work. */
8614 slp_instance instance;
8615 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
8616 vect_free_slp_instance (instance, true);
8617 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
8618 /* Clear-up safelen field since its value is invalid after vectorization
8619 since vectorized loop can have loop-carried dependencies. */
8620 loop->safelen = 0;
8621
8622 /* Don't vectorize epilogue for epilogue. */
8623 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8624 epilogue = NULL;
8625
8626 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
8627 epilogue = NULL;
8628
8629 if (epilogue)
8630 {
8631 auto_vector_sizes vector_sizes;
8632 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
8633 unsigned int next_size = 0;
8634
8635 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8636 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
8637 && known_eq (vf, lowest_vf))
8638 {
8639 unsigned int eiters
8640 = (LOOP_VINFO_INT_NITERS (loop_vinfo)
8641 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
8642 eiters = eiters % lowest_vf;
8643 epilogue->nb_iterations_upper_bound = eiters - 1;
8644
8645 unsigned int ratio;
8646 while (next_size < vector_sizes.length ()
8647 && !(constant_multiple_p (current_vector_size,
8648 vector_sizes[next_size], &ratio)
8649 && eiters >= lowest_vf / ratio))
8650 next_size += 1;
8651 }
8652 else
8653 while (next_size < vector_sizes.length ()
8654 && maybe_lt (current_vector_size, vector_sizes[next_size]))
8655 next_size += 1;
8656
8657 if (next_size == vector_sizes.length ())
8658 epilogue = NULL;
8659 }
8660
8661 if (epilogue)
8662 {
8663 epilogue->force_vectorize = loop->force_vectorize;
8664 epilogue->safelen = loop->safelen;
8665 epilogue->dont_vectorize = false;
8666
8667 /* We may need to if-convert epilogue to vectorize it. */
8668 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8669 tree_if_conversion (epilogue);
8670 }
8671
8672 return epilogue;
8673 }
8674
8675 /* The code below is trying to perform simple optimization - revert
8676 if-conversion for masked stores, i.e. if the mask of a store is zero
8677 do not perform it and all stored value producers also if possible.
8678 For example,
8679 for (i=0; i<n; i++)
8680 if (c[i])
8681 {
8682 p1[i] += 1;
8683 p2[i] = p3[i] +2;
8684 }
8685 this transformation will produce the following semi-hammock:
8686
8687 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8688 {
8689 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8690 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8691 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8692 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8693 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8694 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8695 }
8696 */
8697
8698 void
8699 optimize_mask_stores (struct loop *loop)
8700 {
8701 basic_block *bbs = get_loop_body (loop);
8702 unsigned nbbs = loop->num_nodes;
8703 unsigned i;
8704 basic_block bb;
8705 struct loop *bb_loop;
8706 gimple_stmt_iterator gsi;
8707 gimple *stmt;
8708 auto_vec<gimple *> worklist;
8709
8710 vect_location = find_loop_location (loop);
8711 /* Pick up all masked stores in loop if any. */
8712 for (i = 0; i < nbbs; i++)
8713 {
8714 bb = bbs[i];
8715 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
8716 gsi_next (&gsi))
8717 {
8718 stmt = gsi_stmt (gsi);
8719 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8720 worklist.safe_push (stmt);
8721 }
8722 }
8723
8724 free (bbs);
8725 if (worklist.is_empty ())
8726 return;
8727
8728 /* Loop has masked stores. */
8729 while (!worklist.is_empty ())
8730 {
8731 gimple *last, *last_store;
8732 edge e, efalse;
8733 tree mask;
8734 basic_block store_bb, join_bb;
8735 gimple_stmt_iterator gsi_to;
8736 tree vdef, new_vdef;
8737 gphi *phi;
8738 tree vectype;
8739 tree zero;
8740
8741 last = worklist.pop ();
8742 mask = gimple_call_arg (last, 2);
8743 bb = gimple_bb (last);
8744 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8745 the same loop as if_bb. It could be different to LOOP when two
8746 level loop-nest is vectorized and mask_store belongs to the inner
8747 one. */
8748 e = split_block (bb, last);
8749 bb_loop = bb->loop_father;
8750 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
8751 join_bb = e->dest;
8752 store_bb = create_empty_bb (bb);
8753 add_bb_to_loop (store_bb, bb_loop);
8754 e->flags = EDGE_TRUE_VALUE;
8755 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
8756 /* Put STORE_BB to likely part. */
8757 efalse->probability = profile_probability::unlikely ();
8758 store_bb->count = efalse->count ();
8759 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
8760 if (dom_info_available_p (CDI_DOMINATORS))
8761 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
8762 if (dump_enabled_p ())
8763 dump_printf_loc (MSG_NOTE, vect_location,
8764 "Create new block %d to sink mask stores.",
8765 store_bb->index);
8766 /* Create vector comparison with boolean result. */
8767 vectype = TREE_TYPE (mask);
8768 zero = build_zero_cst (vectype);
8769 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
8770 gsi = gsi_last_bb (bb);
8771 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8772 /* Create new PHI node for vdef of the last masked store:
8773 .MEM_2 = VDEF <.MEM_1>
8774 will be converted to
8775 .MEM.3 = VDEF <.MEM_1>
8776 and new PHI node will be created in join bb
8777 .MEM_2 = PHI <.MEM_1, .MEM_3>
8778 */
8779 vdef = gimple_vdef (last);
8780 new_vdef = make_ssa_name (gimple_vop (cfun), last);
8781 gimple_set_vdef (last, new_vdef);
8782 phi = create_phi_node (vdef, join_bb);
8783 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
8784
8785 /* Put all masked stores with the same mask to STORE_BB if possible. */
8786 while (true)
8787 {
8788 gimple_stmt_iterator gsi_from;
8789 gimple *stmt1 = NULL;
8790
8791 /* Move masked store to STORE_BB. */
8792 last_store = last;
8793 gsi = gsi_for_stmt (last);
8794 gsi_from = gsi;
8795 /* Shift GSI to the previous stmt for further traversal. */
8796 gsi_prev (&gsi);
8797 gsi_to = gsi_start_bb (store_bb);
8798 gsi_move_before (&gsi_from, &gsi_to);
8799 /* Setup GSI_TO to the non-empty block start. */
8800 gsi_to = gsi_start_bb (store_bb);
8801 if (dump_enabled_p ())
8802 {
8803 dump_printf_loc (MSG_NOTE, vect_location,
8804 "Move stmt to created bb\n");
8805 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
8806 }
8807 /* Move all stored value producers if possible. */
8808 while (!gsi_end_p (gsi))
8809 {
8810 tree lhs;
8811 imm_use_iterator imm_iter;
8812 use_operand_p use_p;
8813 bool res;
8814
8815 /* Skip debug statements. */
8816 if (is_gimple_debug (gsi_stmt (gsi)))
8817 {
8818 gsi_prev (&gsi);
8819 continue;
8820 }
8821 stmt1 = gsi_stmt (gsi);
8822 /* Do not consider statements writing to memory or having
8823 volatile operand. */
8824 if (gimple_vdef (stmt1)
8825 || gimple_has_volatile_ops (stmt1))
8826 break;
8827 gsi_from = gsi;
8828 gsi_prev (&gsi);
8829 lhs = gimple_get_lhs (stmt1);
8830 if (!lhs)
8831 break;
8832
8833 /* LHS of vectorized stmt must be SSA_NAME. */
8834 if (TREE_CODE (lhs) != SSA_NAME)
8835 break;
8836
8837 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8838 {
8839 /* Remove dead scalar statement. */
8840 if (has_zero_uses (lhs))
8841 {
8842 gsi_remove (&gsi_from, true);
8843 continue;
8844 }
8845 }
8846
8847 /* Check that LHS does not have uses outside of STORE_BB. */
8848 res = true;
8849 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
8850 {
8851 gimple *use_stmt;
8852 use_stmt = USE_STMT (use_p);
8853 if (is_gimple_debug (use_stmt))
8854 continue;
8855 if (gimple_bb (use_stmt) != store_bb)
8856 {
8857 res = false;
8858 break;
8859 }
8860 }
8861 if (!res)
8862 break;
8863
8864 if (gimple_vuse (stmt1)
8865 && gimple_vuse (stmt1) != gimple_vuse (last_store))
8866 break;
8867
8868 /* Can move STMT1 to STORE_BB. */
8869 if (dump_enabled_p ())
8870 {
8871 dump_printf_loc (MSG_NOTE, vect_location,
8872 "Move stmt to created bb\n");
8873 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
8874 }
8875 gsi_move_before (&gsi_from, &gsi_to);
8876 /* Shift GSI_TO for further insertion. */
8877 gsi_prev (&gsi_to);
8878 }
8879 /* Put other masked stores with the same mask to STORE_BB. */
8880 if (worklist.is_empty ()
8881 || gimple_call_arg (worklist.last (), 2) != mask
8882 || worklist.last () != stmt1)
8883 break;
8884 last = worklist.pop ();
8885 }
8886 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);
8887 }
8888 }