]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vect-loop.c
[07/11] Use single basic block array in loop_vec_info
[thirdparty/gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
56 #include "tree-eh.h"
57
58 /* Loop Vectorization Pass.
59
60 This pass tries to vectorize loops.
61
62 For example, the vectorizer transforms the following simple loop:
63
64 short a[N]; short b[N]; short c[N]; int i;
65
66 for (i=0; i<N; i++){
67 a[i] = b[i] + c[i];
68 }
69
70 as if it was manually vectorized by rewriting the source code into:
71
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 v8hi va, vb, vc;
76
77 for (i=0; i<N/8; i++){
78 vb = pb[i];
79 vc = pc[i];
80 va = vb + vc;
81 pa[i] = va;
82 }
83
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
95
96 Analysis phase:
97 ===============
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
101
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
106
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
117
118 For example, say stmt S1 was vectorized into stmt VS1:
119
120 VS1: vb = px[i];
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 S2: a = b;
123
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
128
129 VS1: vb = px[i];
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
131 VS2: va = vb;
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
133
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
136
137 Target modeling:
138 =================
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
144
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
151
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
154 */
155
156 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
157
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
161
162 static bool
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
164 bool vectype_maybe_set_p,
165 poly_uint64 *vf,
166 vec<stmt_vec_info > *mask_producers)
167 {
168 gimple *stmt = stmt_info->stmt;
169
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
173 {
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return true;
177 }
178
179 tree stmt_vectype, nunits_vectype;
180 if (!vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
181 &nunits_vectype))
182 return false;
183
184 if (stmt_vectype)
185 {
186 if (STMT_VINFO_VECTYPE (stmt_info))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
191 || vectype_maybe_set_p)
192 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
193 else if (stmt_vectype == boolean_type_node)
194 mask_producers->safe_push (stmt_info);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
197 }
198
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
201
202 return true;
203 }
204
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
211
212 static bool
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
214 vec<stmt_vec_info > *mask_producers)
215 {
216 vec_info *vinfo = stmt_info->vinfo;
217 if (dump_enabled_p ())
218 {
219 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
221 }
222 if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers))
223 return false;
224
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
226 && STMT_VINFO_RELATED_STMT (stmt_info))
227 {
228 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
229 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
230
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
233 !gsi_end_p (si); gsi_next (&si))
234 {
235 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
236 if (dump_enabled_p ())
237 {
238 dump_printf_loc (MSG_NOTE, vect_location,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
241 def_stmt_info->stmt, 0);
242 }
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
244 vf, mask_producers))
245 return false;
246 }
247
248 if (dump_enabled_p ())
249 {
250 dump_printf_loc (MSG_NOTE, vect_location,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
253 }
254 if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers))
255 return false;
256 }
257
258 return true;
259 }
260
261 /* Function vect_determine_vectorization_factor
262
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
268
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
272 in the loop.
273
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
275 original loop:
276 for (i=0; i<N; i++){
277 a[i] = b[i] + c[i];
278 }
279
280 vectorized loop:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
283 }
284 */
285
286 static bool
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
288 {
289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
290 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
291 unsigned nbbs = loop->num_nodes;
292 poly_uint64 vectorization_factor = 1;
293 tree scalar_type = NULL_TREE;
294 gphi *phi;
295 tree vectype;
296 stmt_vec_info stmt_info;
297 unsigned i;
298 auto_vec<stmt_vec_info> mask_producers;
299
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
301
302 for (i = 0; i < nbbs; i++)
303 {
304 basic_block bb = bbs[i];
305
306 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
307 gsi_next (&si))
308 {
309 phi = si.phi ();
310 stmt_info = loop_vinfo->lookup_stmt (phi);
311 if (dump_enabled_p ())
312 {
313 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
315 }
316
317 gcc_assert (stmt_info);
318
319 if (STMT_VINFO_RELEVANT_P (stmt_info)
320 || STMT_VINFO_LIVE_P (stmt_info))
321 {
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
323 scalar_type = TREE_TYPE (PHI_RESULT (phi));
324
325 if (dump_enabled_p ())
326 {
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
330 dump_printf (MSG_NOTE, "\n");
331 }
332
333 vectype = get_vectype_for_scalar_type (scalar_type);
334 if (!vectype)
335 {
336 if (dump_enabled_p ())
337 {
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
339 "not vectorized: unsupported "
340 "data-type ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
342 scalar_type);
343 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
344 }
345 return false;
346 }
347 STMT_VINFO_VECTYPE (stmt_info) = vectype;
348
349 if (dump_enabled_p ())
350 {
351 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
352 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
353 dump_printf (MSG_NOTE, "\n");
354 }
355
356 if (dump_enabled_p ())
357 {
358 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
359 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
360 dump_printf (MSG_NOTE, "\n");
361 }
362
363 vect_update_max_nunits (&vectorization_factor, vectype);
364 }
365 }
366
367 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
368 gsi_next (&si))
369 {
370 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
371 if (!vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
372 &mask_producers))
373 return false;
374 }
375 }
376
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
379 {
380 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
381 dump_dec (MSG_NOTE, vectorization_factor);
382 dump_printf (MSG_NOTE, "\n");
383 }
384
385 if (known_le (vectorization_factor, 1U))
386 {
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
389 "not vectorized: unsupported data-type\n");
390 return false;
391 }
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
393
394 for (i = 0; i < mask_producers.length (); i++)
395 {
396 stmt_info = mask_producers[i];
397 tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
398 if (!mask_type)
399 return false;
400 STMT_VINFO_VECTYPE (stmt_info) = mask_type;
401 }
402
403 return true;
404 }
405
406
407 /* Function vect_is_simple_iv_evolution.
408
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
411
412 static bool
413 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
414 tree * step)
415 {
416 tree init_expr;
417 tree step_expr;
418 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
419 basic_block bb;
420
421 /* When there is no evolution in this loop, the evolution function
422 is not "simple". */
423 if (evolution_part == NULL_TREE)
424 return false;
425
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part))
429 return false;
430
431 step_expr = evolution_part;
432 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
433
434 if (dump_enabled_p ())
435 {
436 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
437 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
438 dump_printf (MSG_NOTE, ", init: ");
439 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
440 dump_printf (MSG_NOTE, "\n");
441 }
442
443 *init = init_expr;
444 *step = step_expr;
445
446 if (TREE_CODE (step_expr) != INTEGER_CST
447 && (TREE_CODE (step_expr) != SSA_NAME
448 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
449 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
452 || !flag_associative_math)))
453 && (TREE_CODE (step_expr) != REAL_CST
454 || !flag_associative_math))
455 {
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
458 "step unknown.\n");
459 return false;
460 }
461
462 return true;
463 }
464
465 /* Function vect_analyze_scalar_cycles_1.
466
467 Examine the cross iteration def-use cycles of scalar variables
468 in LOOP. LOOP_VINFO represents the loop that is now being
469 considered for vectorization (can be LOOP, or an outer-loop
470 enclosing LOOP). */
471
472 static void
473 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
474 {
475 basic_block bb = loop->header;
476 tree init, step;
477 auto_vec<stmt_vec_info, 64> worklist;
478 gphi_iterator gsi;
479 bool double_reduc;
480
481 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
482
483 /* First - identify all inductions. Reduction detection assumes that all the
484 inductions have been identified, therefore, this order must not be
485 changed. */
486 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
487 {
488 gphi *phi = gsi.phi ();
489 tree access_fn = NULL;
490 tree def = PHI_RESULT (phi);
491 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
492
493 if (dump_enabled_p ())
494 {
495 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
496 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
497 }
498
499 /* Skip virtual phi's. The data dependences that are associated with
500 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
501 if (virtual_operand_p (def))
502 continue;
503
504 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
505
506 /* Analyze the evolution function. */
507 access_fn = analyze_scalar_evolution (loop, def);
508 if (access_fn)
509 {
510 STRIP_NOPS (access_fn);
511 if (dump_enabled_p ())
512 {
513 dump_printf_loc (MSG_NOTE, vect_location,
514 "Access function of PHI: ");
515 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
516 dump_printf (MSG_NOTE, "\n");
517 }
518 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
519 = initial_condition_in_loop_num (access_fn, loop->num);
520 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
521 = evolution_part_in_loop_num (access_fn, loop->num);
522 }
523
524 if (!access_fn
525 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
526 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
527 && TREE_CODE (step) != INTEGER_CST))
528 {
529 worklist.safe_push (stmt_vinfo);
530 continue;
531 }
532
533 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
534 != NULL_TREE);
535 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
536
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
539 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
540 }
541
542
543 /* Second - identify all reductions and nested cycles. */
544 while (worklist.length () > 0)
545 {
546 stmt_vec_info stmt_vinfo = worklist.pop ();
547 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
548 tree def = PHI_RESULT (phi);
549
550 if (dump_enabled_p ())
551 {
552 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
553 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
554 }
555
556 gcc_assert (!virtual_operand_p (def)
557 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
558
559 stmt_vec_info reduc_stmt_info
560 = vect_force_simple_reduction (loop_vinfo, stmt_vinfo,
561 &double_reduc, false);
562 if (reduc_stmt_info)
563 {
564 if (double_reduc)
565 {
566 if (dump_enabled_p ())
567 dump_printf_loc (MSG_NOTE, vect_location,
568 "Detected double reduction.\n");
569
570 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
571 STMT_VINFO_DEF_TYPE (reduc_stmt_info)
572 = vect_double_reduction_def;
573 }
574 else
575 {
576 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
577 {
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE, vect_location,
580 "Detected vectorizable nested cycle.\n");
581
582 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
583 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_nested_cycle;
584 }
585 else
586 {
587 if (dump_enabled_p ())
588 dump_printf_loc (MSG_NOTE, vect_location,
589 "Detected reduction.\n");
590
591 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
592 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
593 /* Store the reduction cycles for possible vectorization in
594 loop-aware SLP if it was not detected as reduction
595 chain. */
596 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info))
597 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
598 (reduc_stmt_info);
599 }
600 }
601 }
602 else
603 if (dump_enabled_p ())
604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
605 "Unknown def-use cycle pattern.\n");
606 }
607 }
608
609
610 /* Function vect_analyze_scalar_cycles.
611
612 Examine the cross iteration def-use cycles of scalar variables, by
613 analyzing the loop-header PHIs of scalar variables. Classify each
614 cycle as one of the following: invariant, induction, reduction, unknown.
615 We do that for the loop represented by LOOP_VINFO, and also to its
616 inner-loop, if exists.
617 Examples for scalar cycles:
618
619 Example1: reduction:
620
621 loop1:
622 for (i=0; i<N; i++)
623 sum += a[i];
624
625 Example2: induction:
626
627 loop2:
628 for (i=0; i<N; i++)
629 a[i] = i; */
630
631 static void
632 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
633 {
634 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
635
636 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
637
638 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
639 Reductions in such inner-loop therefore have different properties than
640 the reductions in the nest that gets vectorized:
641 1. When vectorized, they are executed in the same order as in the original
642 scalar loop, so we can't change the order of computation when
643 vectorizing them.
644 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
645 current checks are too strict. */
646
647 if (loop->inner)
648 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
649 }
650
651 /* Transfer group and reduction information from STMT_INFO to its
652 pattern stmt. */
653
654 static void
655 vect_fixup_reduc_chain (stmt_vec_info stmt_info)
656 {
657 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
658 stmt_vec_info stmtp;
659 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
660 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
661 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
662 do
663 {
664 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
665 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
666 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
667 if (stmt_info)
668 REDUC_GROUP_NEXT_ELEMENT (stmtp)
669 = STMT_VINFO_RELATED_STMT (stmt_info);
670 }
671 while (stmt_info);
672 STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
673 }
674
675 /* Fixup scalar cycles that now have their stmts detected as patterns. */
676
677 static void
678 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
679 {
680 stmt_vec_info first;
681 unsigned i;
682
683 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
684 if (STMT_VINFO_IN_PATTERN_P (first))
685 {
686 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
687 while (next)
688 {
689 if (! STMT_VINFO_IN_PATTERN_P (next))
690 break;
691 next = REDUC_GROUP_NEXT_ELEMENT (next);
692 }
693 /* If not all stmt in the chain are patterns try to handle
694 the chain without patterns. */
695 if (! next)
696 {
697 vect_fixup_reduc_chain (first);
698 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
699 = STMT_VINFO_RELATED_STMT (first);
700 }
701 }
702 }
703
704 /* Function vect_get_loop_niters.
705
706 Determine how many iterations the loop is executed and place it
707 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
708 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
709 niter information holds in ASSUMPTIONS.
710
711 Return the loop exit condition. */
712
713
714 static gcond *
715 vect_get_loop_niters (struct loop *loop, tree *assumptions,
716 tree *number_of_iterations, tree *number_of_iterationsm1)
717 {
718 edge exit = single_exit (loop);
719 struct tree_niter_desc niter_desc;
720 tree niter_assumptions, niter, may_be_zero;
721 gcond *cond = get_loop_exit_condition (loop);
722
723 *assumptions = boolean_true_node;
724 *number_of_iterationsm1 = chrec_dont_know;
725 *number_of_iterations = chrec_dont_know;
726 DUMP_VECT_SCOPE ("get_loop_niters");
727
728 if (!exit)
729 return cond;
730
731 niter = chrec_dont_know;
732 may_be_zero = NULL_TREE;
733 niter_assumptions = boolean_true_node;
734 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
735 || chrec_contains_undetermined (niter_desc.niter))
736 return cond;
737
738 niter_assumptions = niter_desc.assumptions;
739 may_be_zero = niter_desc.may_be_zero;
740 niter = niter_desc.niter;
741
742 if (may_be_zero && integer_zerop (may_be_zero))
743 may_be_zero = NULL_TREE;
744
745 if (may_be_zero)
746 {
747 if (COMPARISON_CLASS_P (may_be_zero))
748 {
749 /* Try to combine may_be_zero with assumptions, this can simplify
750 computation of niter expression. */
751 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
752 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
753 niter_assumptions,
754 fold_build1 (TRUTH_NOT_EXPR,
755 boolean_type_node,
756 may_be_zero));
757 else
758 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
759 build_int_cst (TREE_TYPE (niter), 0),
760 rewrite_to_non_trapping_overflow (niter));
761
762 may_be_zero = NULL_TREE;
763 }
764 else if (integer_nonzerop (may_be_zero))
765 {
766 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
767 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
768 return cond;
769 }
770 else
771 return cond;
772 }
773
774 *assumptions = niter_assumptions;
775 *number_of_iterationsm1 = niter;
776
777 /* We want the number of loop header executions which is the number
778 of latch executions plus one.
779 ??? For UINT_MAX latch executions this number overflows to zero
780 for loops like do { n++; } while (n != 0); */
781 if (niter && !chrec_contains_undetermined (niter))
782 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
783 build_int_cst (TREE_TYPE (niter), 1));
784 *number_of_iterations = niter;
785
786 return cond;
787 }
788
789 /* Function bb_in_loop_p
790
791 Used as predicate for dfs order traversal of the loop bbs. */
792
793 static bool
794 bb_in_loop_p (const_basic_block bb, const void *data)
795 {
796 const struct loop *const loop = (const struct loop *)data;
797 if (flow_bb_inside_loop_p (loop, bb))
798 return true;
799 return false;
800 }
801
802
803 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
804 stmt_vec_info structs for all the stmts in LOOP_IN. */
805
806 _loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
807 : vec_info (vec_info::loop, init_cost (loop_in), shared),
808 loop (loop_in),
809 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
810 num_itersm1 (NULL_TREE),
811 num_iters (NULL_TREE),
812 num_iters_unchanged (NULL_TREE),
813 num_iters_assumptions (NULL_TREE),
814 th (0),
815 versioning_threshold (0),
816 vectorization_factor (0),
817 max_vectorization_factor (0),
818 mask_skip_niters (NULL_TREE),
819 mask_compare_type (NULL_TREE),
820 unaligned_dr (NULL),
821 peeling_for_alignment (0),
822 ptr_mask (0),
823 ivexpr_map (NULL),
824 slp_unrolling_factor (1),
825 single_scalar_iteration_cost (0),
826 vectorizable (false),
827 can_fully_mask_p (true),
828 fully_masked_p (false),
829 peeling_for_gaps (false),
830 peeling_for_niter (false),
831 operands_swapped (false),
832 no_data_dependencies (false),
833 has_mask_store (false),
834 scalar_loop (NULL),
835 orig_loop_info (NULL)
836 {
837 /* CHECKME: We want to visit all BBs before their successors (except for
838 latch blocks, for which this assertion wouldn't hold). In the simple
839 case of the loop forms we allow, a dfs order of the BBs would the same
840 as reversed postorder traversal, so we are safe. */
841
842 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
843 bbs, loop->num_nodes, loop);
844 gcc_assert (nbbs == loop->num_nodes);
845
846 for (unsigned int i = 0; i < nbbs; i++)
847 {
848 basic_block bb = bbs[i];
849 gimple_stmt_iterator si;
850
851 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
852 {
853 gimple *phi = gsi_stmt (si);
854 gimple_set_uid (phi, 0);
855 add_stmt (phi);
856 }
857
858 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
859 {
860 gimple *stmt = gsi_stmt (si);
861 gimple_set_uid (stmt, 0);
862 add_stmt (stmt);
863 }
864 }
865 }
866
867 /* Free all levels of MASKS. */
868
869 void
870 release_vec_loop_masks (vec_loop_masks *masks)
871 {
872 rgroup_masks *rgm;
873 unsigned int i;
874 FOR_EACH_VEC_ELT (*masks, i, rgm)
875 rgm->masks.release ();
876 masks->release ();
877 }
878
879 /* Free all memory used by the _loop_vec_info, as well as all the
880 stmt_vec_info structs of all the stmts in the loop. */
881
882 _loop_vec_info::~_loop_vec_info ()
883 {
884 int nbbs;
885 gimple_stmt_iterator si;
886 int j;
887
888 nbbs = loop->num_nodes;
889 for (j = 0; j < nbbs; j++)
890 {
891 basic_block bb = bbs[j];
892 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
893 {
894 gimple *stmt = gsi_stmt (si);
895
896 /* We may have broken canonical form by moving a constant
897 into RHS1 of a commutative op. Fix such occurrences. */
898 if (operands_swapped && is_gimple_assign (stmt))
899 {
900 enum tree_code code = gimple_assign_rhs_code (stmt);
901
902 if ((code == PLUS_EXPR
903 || code == POINTER_PLUS_EXPR
904 || code == MULT_EXPR)
905 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
906 swap_ssa_operands (stmt,
907 gimple_assign_rhs1_ptr (stmt),
908 gimple_assign_rhs2_ptr (stmt));
909 else if (code == COND_EXPR
910 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
911 {
912 tree cond_expr = gimple_assign_rhs1 (stmt);
913 enum tree_code cond_code = TREE_CODE (cond_expr);
914
915 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
916 {
917 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
918 0));
919 cond_code = invert_tree_comparison (cond_code,
920 honor_nans);
921 if (cond_code != ERROR_MARK)
922 {
923 TREE_SET_CODE (cond_expr, cond_code);
924 swap_ssa_operands (stmt,
925 gimple_assign_rhs2_ptr (stmt),
926 gimple_assign_rhs3_ptr (stmt));
927 }
928 }
929 }
930 }
931 gsi_next (&si);
932 }
933 }
934
935 free (bbs);
936
937 release_vec_loop_masks (&masks);
938 delete ivexpr_map;
939
940 loop->aux = NULL;
941 }
942
943 /* Return an invariant or register for EXPR and emit necessary
944 computations in the LOOP_VINFO loop preheader. */
945
946 tree
947 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
948 {
949 if (is_gimple_reg (expr)
950 || is_gimple_min_invariant (expr))
951 return expr;
952
953 if (! loop_vinfo->ivexpr_map)
954 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
955 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
956 if (! cached)
957 {
958 gimple_seq stmts = NULL;
959 cached = force_gimple_operand (unshare_expr (expr),
960 &stmts, true, NULL_TREE);
961 if (stmts)
962 {
963 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
964 gsi_insert_seq_on_edge_immediate (e, stmts);
965 }
966 }
967 return cached;
968 }
969
970 /* Return true if we can use CMP_TYPE as the comparison type to produce
971 all masks required to mask LOOP_VINFO. */
972
973 static bool
974 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
975 {
976 rgroup_masks *rgm;
977 unsigned int i;
978 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
979 if (rgm->mask_type != NULL_TREE
980 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
981 cmp_type, rgm->mask_type,
982 OPTIMIZE_FOR_SPEED))
983 return false;
984 return true;
985 }
986
987 /* Calculate the maximum number of scalars per iteration for every
988 rgroup in LOOP_VINFO. */
989
990 static unsigned int
991 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
992 {
993 unsigned int res = 1;
994 unsigned int i;
995 rgroup_masks *rgm;
996 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
997 res = MAX (res, rgm->max_nscalars_per_iter);
998 return res;
999 }
1000
1001 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1002 whether we can actually generate the masks required. Return true if so,
1003 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1004
1005 static bool
1006 vect_verify_full_masking (loop_vec_info loop_vinfo)
1007 {
1008 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1009 unsigned int min_ni_width;
1010
1011 /* Use a normal loop if there are no statements that need masking.
1012 This only happens in rare degenerate cases: it means that the loop
1013 has no loads, no stores, and no live-out values. */
1014 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1015 return false;
1016
1017 /* Get the maximum number of iterations that is representable
1018 in the counter type. */
1019 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1020 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1021
1022 /* Get a more refined estimate for the number of iterations. */
1023 widest_int max_back_edges;
1024 if (max_loop_iterations (loop, &max_back_edges))
1025 max_ni = wi::smin (max_ni, max_back_edges + 1);
1026
1027 /* Account for rgroup masks, in which each bit is replicated N times. */
1028 max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
1029
1030 /* Work out how many bits we need to represent the limit. */
1031 min_ni_width = wi::min_precision (max_ni, UNSIGNED);
1032
1033 /* Find a scalar mode for which WHILE_ULT is supported. */
1034 opt_scalar_int_mode cmp_mode_iter;
1035 tree cmp_type = NULL_TREE;
1036 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1037 {
1038 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1039 if (cmp_bits >= min_ni_width
1040 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1041 {
1042 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1043 if (this_type
1044 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1045 {
1046 /* Although we could stop as soon as we find a valid mode,
1047 it's often better to continue until we hit Pmode, since the
1048 operands to the WHILE are more likely to be reusable in
1049 address calculations. */
1050 cmp_type = this_type;
1051 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1052 break;
1053 }
1054 }
1055 }
1056
1057 if (!cmp_type)
1058 return false;
1059
1060 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
1061 return true;
1062 }
1063
1064 /* Calculate the cost of one scalar iteration of the loop. */
1065 static void
1066 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1067 {
1068 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1069 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1070 int nbbs = loop->num_nodes, factor;
1071 int innerloop_iters, i;
1072
1073 /* Gather costs for statements in the scalar loop. */
1074
1075 /* FORNOW. */
1076 innerloop_iters = 1;
1077 if (loop->inner)
1078 innerloop_iters = 50; /* FIXME */
1079
1080 for (i = 0; i < nbbs; i++)
1081 {
1082 gimple_stmt_iterator si;
1083 basic_block bb = bbs[i];
1084
1085 if (bb->loop_father == loop->inner)
1086 factor = innerloop_iters;
1087 else
1088 factor = 1;
1089
1090 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1091 {
1092 gimple *stmt = gsi_stmt (si);
1093 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1094
1095 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1096 continue;
1097
1098 /* Skip stmts that are not vectorized inside the loop. */
1099 if (stmt_info
1100 && !STMT_VINFO_RELEVANT_P (stmt_info)
1101 && (!STMT_VINFO_LIVE_P (stmt_info)
1102 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1103 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1104 continue;
1105
1106 vect_cost_for_stmt kind;
1107 if (STMT_VINFO_DATA_REF (stmt_info))
1108 {
1109 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1110 kind = scalar_load;
1111 else
1112 kind = scalar_store;
1113 }
1114 else
1115 kind = scalar_stmt;
1116
1117 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1118 factor, kind, stmt_info, 0, vect_prologue);
1119 }
1120 }
1121
1122 /* Now accumulate cost. */
1123 void *target_cost_data = init_cost (loop);
1124 stmt_info_for_cost *si;
1125 int j;
1126 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1127 j, si)
1128 (void) add_stmt_cost (target_cost_data, si->count,
1129 si->kind, si->stmt_info, si->misalign,
1130 vect_body);
1131 unsigned dummy, body_cost = 0;
1132 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1133 destroy_cost_data (target_cost_data);
1134 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1135 }
1136
1137
1138 /* Function vect_analyze_loop_form_1.
1139
1140 Verify that certain CFG restrictions hold, including:
1141 - the loop has a pre-header
1142 - the loop has a single entry and exit
1143 - the loop exit condition is simple enough
1144 - the number of iterations can be analyzed, i.e, a countable loop. The
1145 niter could be analyzed under some assumptions. */
1146
1147 bool
1148 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1149 tree *assumptions, tree *number_of_iterationsm1,
1150 tree *number_of_iterations, gcond **inner_loop_cond)
1151 {
1152 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1153
1154 /* Different restrictions apply when we are considering an inner-most loop,
1155 vs. an outer (nested) loop.
1156 (FORNOW. May want to relax some of these restrictions in the future). */
1157
1158 if (!loop->inner)
1159 {
1160 /* Inner-most loop. We currently require that the number of BBs is
1161 exactly 2 (the header and latch). Vectorizable inner-most loops
1162 look like this:
1163
1164 (pre-header)
1165 |
1166 header <--------+
1167 | | |
1168 | +--> latch --+
1169 |
1170 (exit-bb) */
1171
1172 if (loop->num_nodes != 2)
1173 {
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1176 "not vectorized: control flow in loop.\n");
1177 return false;
1178 }
1179
1180 if (empty_block_p (loop->header))
1181 {
1182 if (dump_enabled_p ())
1183 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1184 "not vectorized: empty loop.\n");
1185 return false;
1186 }
1187 }
1188 else
1189 {
1190 struct loop *innerloop = loop->inner;
1191 edge entryedge;
1192
1193 /* Nested loop. We currently require that the loop is doubly-nested,
1194 contains a single inner loop, and the number of BBs is exactly 5.
1195 Vectorizable outer-loops look like this:
1196
1197 (pre-header)
1198 |
1199 header <---+
1200 | |
1201 inner-loop |
1202 | |
1203 tail ------+
1204 |
1205 (exit-bb)
1206
1207 The inner-loop has the properties expected of inner-most loops
1208 as described above. */
1209
1210 if ((loop->inner)->inner || (loop->inner)->next)
1211 {
1212 if (dump_enabled_p ())
1213 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1214 "not vectorized: multiple nested loops.\n");
1215 return false;
1216 }
1217
1218 if (loop->num_nodes != 5)
1219 {
1220 if (dump_enabled_p ())
1221 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1222 "not vectorized: control flow in loop.\n");
1223 return false;
1224 }
1225
1226 entryedge = loop_preheader_edge (innerloop);
1227 if (entryedge->src != loop->header
1228 || !single_exit (innerloop)
1229 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1230 {
1231 if (dump_enabled_p ())
1232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1233 "not vectorized: unsupported outerloop form.\n");
1234 return false;
1235 }
1236
1237 /* Analyze the inner-loop. */
1238 tree inner_niterm1, inner_niter, inner_assumptions;
1239 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1240 &inner_assumptions, &inner_niterm1,
1241 &inner_niter, NULL)
1242 /* Don't support analyzing niter under assumptions for inner
1243 loop. */
1244 || !integer_onep (inner_assumptions))
1245 {
1246 if (dump_enabled_p ())
1247 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1248 "not vectorized: Bad inner loop.\n");
1249 return false;
1250 }
1251
1252 if (!expr_invariant_in_loop_p (loop, inner_niter))
1253 {
1254 if (dump_enabled_p ())
1255 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1256 "not vectorized: inner-loop count not"
1257 " invariant.\n");
1258 return false;
1259 }
1260
1261 if (dump_enabled_p ())
1262 dump_printf_loc (MSG_NOTE, vect_location,
1263 "Considering outer-loop vectorization.\n");
1264 }
1265
1266 if (!single_exit (loop)
1267 || EDGE_COUNT (loop->header->preds) != 2)
1268 {
1269 if (dump_enabled_p ())
1270 {
1271 if (!single_exit (loop))
1272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1273 "not vectorized: multiple exits.\n");
1274 else if (EDGE_COUNT (loop->header->preds) != 2)
1275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1276 "not vectorized: too many incoming edges.\n");
1277 }
1278 return false;
1279 }
1280
1281 /* We assume that the loop exit condition is at the end of the loop. i.e,
1282 that the loop is represented as a do-while (with a proper if-guard
1283 before the loop if needed), where the loop header contains all the
1284 executable statements, and the latch is empty. */
1285 if (!empty_block_p (loop->latch)
1286 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1287 {
1288 if (dump_enabled_p ())
1289 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1290 "not vectorized: latch block not empty.\n");
1291 return false;
1292 }
1293
1294 /* Make sure the exit is not abnormal. */
1295 edge e = single_exit (loop);
1296 if (e->flags & EDGE_ABNORMAL)
1297 {
1298 if (dump_enabled_p ())
1299 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1300 "not vectorized: abnormal loop exit edge.\n");
1301 return false;
1302 }
1303
1304 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1305 number_of_iterationsm1);
1306 if (!*loop_cond)
1307 {
1308 if (dump_enabled_p ())
1309 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1310 "not vectorized: complicated exit condition.\n");
1311 return false;
1312 }
1313
1314 if (integer_zerop (*assumptions)
1315 || !*number_of_iterations
1316 || chrec_contains_undetermined (*number_of_iterations))
1317 {
1318 if (dump_enabled_p ())
1319 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1320 "not vectorized: number of iterations cannot be "
1321 "computed.\n");
1322 return false;
1323 }
1324
1325 if (integer_zerop (*number_of_iterations))
1326 {
1327 if (dump_enabled_p ())
1328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1329 "not vectorized: number of iterations = 0.\n");
1330 return false;
1331 }
1332
1333 return true;
1334 }
1335
1336 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1337
1338 loop_vec_info
1339 vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
1340 {
1341 tree assumptions, number_of_iterations, number_of_iterationsm1;
1342 gcond *loop_cond, *inner_loop_cond = NULL;
1343
1344 if (! vect_analyze_loop_form_1 (loop, &loop_cond,
1345 &assumptions, &number_of_iterationsm1,
1346 &number_of_iterations, &inner_loop_cond))
1347 return NULL;
1348
1349 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1350 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1351 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1352 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1353 if (!integer_onep (assumptions))
1354 {
1355 /* We consider to vectorize this loop by versioning it under
1356 some assumptions. In order to do this, we need to clear
1357 existing information computed by scev and niter analyzer. */
1358 scev_reset_htab ();
1359 free_numbers_of_iterations_estimates (loop);
1360 /* Also set flag for this loop so that following scev and niter
1361 analysis are done under the assumptions. */
1362 loop_constraint_set (loop, LOOP_C_FINITE);
1363 /* Also record the assumptions for versioning. */
1364 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1365 }
1366
1367 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1368 {
1369 if (dump_enabled_p ())
1370 {
1371 dump_printf_loc (MSG_NOTE, vect_location,
1372 "Symbolic number of iterations is ");
1373 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1374 dump_printf (MSG_NOTE, "\n");
1375 }
1376 }
1377
1378 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1379 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1380 if (inner_loop_cond)
1381 {
1382 stmt_vec_info inner_loop_cond_info
1383 = loop_vinfo->lookup_stmt (inner_loop_cond);
1384 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1385 }
1386
1387 gcc_assert (!loop->aux);
1388 loop->aux = loop_vinfo;
1389 return loop_vinfo;
1390 }
1391
1392
1393
1394 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1395 statements update the vectorization factor. */
1396
1397 static void
1398 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1399 {
1400 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1401 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1402 int nbbs = loop->num_nodes;
1403 poly_uint64 vectorization_factor;
1404 int i;
1405
1406 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1407
1408 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1409 gcc_assert (known_ne (vectorization_factor, 0U));
1410
1411 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1412 vectorization factor of the loop is the unrolling factor required by
1413 the SLP instances. If that unrolling factor is 1, we say, that we
1414 perform pure SLP on loop - cross iteration parallelism is not
1415 exploited. */
1416 bool only_slp_in_loop = true;
1417 for (i = 0; i < nbbs; i++)
1418 {
1419 basic_block bb = bbs[i];
1420 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1421 gsi_next (&si))
1422 {
1423 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1424 stmt_info = vect_stmt_to_vectorize (stmt_info);
1425 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1426 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1427 && !PURE_SLP_STMT (stmt_info))
1428 /* STMT needs both SLP and loop-based vectorization. */
1429 only_slp_in_loop = false;
1430 }
1431 }
1432
1433 if (only_slp_in_loop)
1434 {
1435 dump_printf_loc (MSG_NOTE, vect_location,
1436 "Loop contains only SLP stmts\n");
1437 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1438 }
1439 else
1440 {
1441 dump_printf_loc (MSG_NOTE, vect_location,
1442 "Loop contains SLP and non-SLP stmts\n");
1443 /* Both the vectorization factor and unroll factor have the form
1444 current_vector_size * X for some rational X, so they must have
1445 a common multiple. */
1446 vectorization_factor
1447 = force_common_multiple (vectorization_factor,
1448 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1449 }
1450
1451 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1452 if (dump_enabled_p ())
1453 {
1454 dump_printf_loc (MSG_NOTE, vect_location,
1455 "Updating vectorization factor to ");
1456 dump_dec (MSG_NOTE, vectorization_factor);
1457 dump_printf (MSG_NOTE, ".\n");
1458 }
1459 }
1460
1461 /* Return true if STMT_INFO describes a double reduction phi and if
1462 the other phi in the reduction is also relevant for vectorization.
1463 This rejects cases such as:
1464
1465 outer1:
1466 x_1 = PHI <x_3(outer2), ...>;
1467 ...
1468
1469 inner:
1470 x_2 = ...;
1471 ...
1472
1473 outer2:
1474 x_3 = PHI <x_2(inner)>;
1475
1476 if nothing in x_2 or elsewhere makes x_1 relevant. */
1477
1478 static bool
1479 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1480 {
1481 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1482 return false;
1483
1484 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1485 }
1486
1487 /* Function vect_analyze_loop_operations.
1488
1489 Scan the loop stmts and make sure they are all vectorizable. */
1490
1491 static bool
1492 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1493 {
1494 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1495 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1496 int nbbs = loop->num_nodes;
1497 int i;
1498 stmt_vec_info stmt_info;
1499 bool need_to_vectorize = false;
1500 bool ok;
1501
1502 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1503
1504 stmt_vector_for_cost cost_vec;
1505 cost_vec.create (2);
1506
1507 for (i = 0; i < nbbs; i++)
1508 {
1509 basic_block bb = bbs[i];
1510
1511 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1512 gsi_next (&si))
1513 {
1514 gphi *phi = si.phi ();
1515 ok = true;
1516
1517 stmt_info = loop_vinfo->lookup_stmt (phi);
1518 if (dump_enabled_p ())
1519 {
1520 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1521 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1522 }
1523 if (virtual_operand_p (gimple_phi_result (phi)))
1524 continue;
1525
1526 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1527 (i.e., a phi in the tail of the outer-loop). */
1528 if (! is_loop_header_bb_p (bb))
1529 {
1530 /* FORNOW: we currently don't support the case that these phis
1531 are not used in the outerloop (unless it is double reduction,
1532 i.e., this phi is vect_reduction_def), cause this case
1533 requires to actually do something here. */
1534 if (STMT_VINFO_LIVE_P (stmt_info)
1535 && !vect_active_double_reduction_p (stmt_info))
1536 {
1537 if (dump_enabled_p ())
1538 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1539 "Unsupported loop-closed phi in "
1540 "outer-loop.\n");
1541 return false;
1542 }
1543
1544 /* If PHI is used in the outer loop, we check that its operand
1545 is defined in the inner loop. */
1546 if (STMT_VINFO_RELEVANT_P (stmt_info))
1547 {
1548 tree phi_op;
1549
1550 if (gimple_phi_num_args (phi) != 1)
1551 return false;
1552
1553 phi_op = PHI_ARG_DEF (phi, 0);
1554 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1555 if (!op_def_info)
1556 return false;
1557
1558 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1559 && (STMT_VINFO_RELEVANT (op_def_info)
1560 != vect_used_in_outer_by_reduction))
1561 return false;
1562 }
1563
1564 continue;
1565 }
1566
1567 gcc_assert (stmt_info);
1568
1569 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1570 || STMT_VINFO_LIVE_P (stmt_info))
1571 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1572 {
1573 /* A scalar-dependence cycle that we don't support. */
1574 if (dump_enabled_p ())
1575 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1576 "not vectorized: scalar dependence cycle.\n");
1577 return false;
1578 }
1579
1580 if (STMT_VINFO_RELEVANT_P (stmt_info))
1581 {
1582 need_to_vectorize = true;
1583 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1584 && ! PURE_SLP_STMT (stmt_info))
1585 ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
1586 &cost_vec);
1587 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1588 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1589 && ! PURE_SLP_STMT (stmt_info))
1590 ok = vectorizable_reduction (stmt_info, NULL, NULL, NULL, NULL,
1591 &cost_vec);
1592 }
1593
1594 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1595 if (ok
1596 && STMT_VINFO_LIVE_P (stmt_info)
1597 && !PURE_SLP_STMT (stmt_info))
1598 ok = vectorizable_live_operation (stmt_info, NULL, NULL, -1, NULL,
1599 &cost_vec);
1600
1601 if (!ok)
1602 {
1603 if (dump_enabled_p ())
1604 {
1605 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1606 "not vectorized: relevant phi not "
1607 "supported: ");
1608 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1609 }
1610 return false;
1611 }
1612 }
1613
1614 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1615 gsi_next (&si))
1616 {
1617 gimple *stmt = gsi_stmt (si);
1618 if (!gimple_clobber_p (stmt)
1619 && !vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
1620 &need_to_vectorize,
1621 NULL, NULL, &cost_vec))
1622 return false;
1623 }
1624 } /* bbs */
1625
1626 add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
1627 cost_vec.release ();
1628
1629 /* All operations in the loop are either irrelevant (deal with loop
1630 control, or dead), or only used outside the loop and can be moved
1631 out of the loop (e.g. invariants, inductions). The loop can be
1632 optimized away by scalar optimizations. We're better off not
1633 touching this loop. */
1634 if (!need_to_vectorize)
1635 {
1636 if (dump_enabled_p ())
1637 dump_printf_loc (MSG_NOTE, vect_location,
1638 "All the computation can be taken out of the loop.\n");
1639 if (dump_enabled_p ())
1640 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1641 "not vectorized: redundant loop. no profit to "
1642 "vectorize.\n");
1643 return false;
1644 }
1645
1646 return true;
1647 }
1648
1649 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1650 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1651 definitely no, or -1 if it's worth retrying. */
1652
1653 static int
1654 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1655 {
1656 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1657 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1658
1659 /* Only fully-masked loops can have iteration counts less than the
1660 vectorization factor. */
1661 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1662 {
1663 HOST_WIDE_INT max_niter;
1664
1665 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1666 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1667 else
1668 max_niter = max_stmt_executions_int (loop);
1669
1670 if (max_niter != -1
1671 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1672 {
1673 if (dump_enabled_p ())
1674 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1675 "not vectorized: iteration count smaller than "
1676 "vectorization factor.\n");
1677 return 0;
1678 }
1679 }
1680
1681 int min_profitable_iters, min_profitable_estimate;
1682 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1683 &min_profitable_estimate);
1684
1685 if (min_profitable_iters < 0)
1686 {
1687 if (dump_enabled_p ())
1688 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1689 "not vectorized: vectorization not profitable.\n");
1690 if (dump_enabled_p ())
1691 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1692 "not vectorized: vector version will never be "
1693 "profitable.\n");
1694 return -1;
1695 }
1696
1697 int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1698 * assumed_vf);
1699
1700 /* Use the cost model only if it is more conservative than user specified
1701 threshold. */
1702 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1703 min_profitable_iters);
1704
1705 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1706
1707 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1708 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1709 {
1710 if (dump_enabled_p ())
1711 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1712 "not vectorized: vectorization not profitable.\n");
1713 if (dump_enabled_p ())
1714 dump_printf_loc (MSG_NOTE, vect_location,
1715 "not vectorized: iteration count smaller than user "
1716 "specified loop bound parameter or minimum profitable "
1717 "iterations (whichever is more conservative).\n");
1718 return 0;
1719 }
1720
1721 HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
1722 if (estimated_niter == -1)
1723 estimated_niter = likely_max_stmt_executions_int (loop);
1724 if (estimated_niter != -1
1725 && ((unsigned HOST_WIDE_INT) estimated_niter
1726 < MAX (th, (unsigned) min_profitable_estimate)))
1727 {
1728 if (dump_enabled_p ())
1729 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1730 "not vectorized: estimated iteration count too "
1731 "small.\n");
1732 if (dump_enabled_p ())
1733 dump_printf_loc (MSG_NOTE, vect_location,
1734 "not vectorized: estimated iteration count smaller "
1735 "than specified loop bound parameter or minimum "
1736 "profitable iterations (whichever is more "
1737 "conservative).\n");
1738 return -1;
1739 }
1740
1741 return 1;
1742 }
1743
1744 static bool
1745 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1746 vec<data_reference_p> *datarefs,
1747 unsigned int *n_stmts)
1748 {
1749 *n_stmts = 0;
1750 for (unsigned i = 0; i < loop->num_nodes; i++)
1751 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1752 !gsi_end_p (gsi); gsi_next (&gsi))
1753 {
1754 gimple *stmt = gsi_stmt (gsi);
1755 if (is_gimple_debug (stmt))
1756 continue;
1757 ++(*n_stmts);
1758 if (!vect_find_stmt_data_reference (loop, stmt, datarefs))
1759 {
1760 if (is_gimple_call (stmt) && loop->safelen)
1761 {
1762 tree fndecl = gimple_call_fndecl (stmt), op;
1763 if (fndecl != NULL_TREE)
1764 {
1765 cgraph_node *node = cgraph_node::get (fndecl);
1766 if (node != NULL && node->simd_clones != NULL)
1767 {
1768 unsigned int j, n = gimple_call_num_args (stmt);
1769 for (j = 0; j < n; j++)
1770 {
1771 op = gimple_call_arg (stmt, j);
1772 if (DECL_P (op)
1773 || (REFERENCE_CLASS_P (op)
1774 && get_base_address (op)))
1775 break;
1776 }
1777 op = gimple_call_lhs (stmt);
1778 /* Ignore #pragma omp declare simd functions
1779 if they don't have data references in the
1780 call stmt itself. */
1781 if (j == n
1782 && !(op
1783 && (DECL_P (op)
1784 || (REFERENCE_CLASS_P (op)
1785 && get_base_address (op)))))
1786 continue;
1787 }
1788 }
1789 }
1790 return false;
1791 }
1792 /* If dependence analysis will give up due to the limit on the
1793 number of datarefs stop here and fail fatally. */
1794 if (datarefs->length ()
1795 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
1796 return false;
1797 }
1798 return true;
1799 }
1800
1801 /* Function vect_analyze_loop_2.
1802
1803 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1804 for it. The different analyses will record information in the
1805 loop_vec_info struct. */
1806 static bool
1807 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
1808 {
1809 bool ok;
1810 int res;
1811 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
1812 poly_uint64 min_vf = 2;
1813
1814 /* The first group of checks is independent of the vector size. */
1815 fatal = true;
1816
1817 /* Find all data references in the loop (which correspond to vdefs/vuses)
1818 and analyze their evolution in the loop. */
1819
1820 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1821
1822 /* Gather the data references and count stmts in the loop. */
1823 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
1824 {
1825 if (!vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
1826 &LOOP_VINFO_DATAREFS (loop_vinfo),
1827 n_stmts))
1828 {
1829 if (dump_enabled_p ())
1830 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1831 "not vectorized: loop contains function "
1832 "calls or data references that cannot "
1833 "be analyzed\n");
1834 return false;
1835 }
1836 loop_vinfo->shared->save_datarefs ();
1837 }
1838 else
1839 loop_vinfo->shared->check_datarefs ();
1840
1841 /* Analyze the data references and also adjust the minimal
1842 vectorization factor according to the loads and stores. */
1843
1844 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1845 if (!ok)
1846 {
1847 if (dump_enabled_p ())
1848 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1849 "bad data references.\n");
1850 return false;
1851 }
1852
1853 /* Classify all cross-iteration scalar data-flow cycles.
1854 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1855 vect_analyze_scalar_cycles (loop_vinfo);
1856
1857 vect_pattern_recog (loop_vinfo);
1858
1859 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1860
1861 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1862 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1863
1864 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1865 if (!ok)
1866 {
1867 if (dump_enabled_p ())
1868 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1869 "bad data access.\n");
1870 return false;
1871 }
1872
1873 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1874
1875 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1876 if (!ok)
1877 {
1878 if (dump_enabled_p ())
1879 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1880 "unexpected pattern.\n");
1881 return false;
1882 }
1883
1884 /* While the rest of the analysis below depends on it in some way. */
1885 fatal = false;
1886
1887 /* Analyze data dependences between the data-refs in the loop
1888 and adjust the maximum vectorization factor according to
1889 the dependences.
1890 FORNOW: fail at the first data dependence that we encounter. */
1891
1892 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1893 if (!ok
1894 || (max_vf != MAX_VECTORIZATION_FACTOR
1895 && maybe_lt (max_vf, min_vf)))
1896 {
1897 if (dump_enabled_p ())
1898 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1899 "bad data dependence.\n");
1900 return false;
1901 }
1902 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
1903
1904 ok = vect_determine_vectorization_factor (loop_vinfo);
1905 if (!ok)
1906 {
1907 if (dump_enabled_p ())
1908 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1909 "can't determine vectorization factor.\n");
1910 return false;
1911 }
1912 if (max_vf != MAX_VECTORIZATION_FACTOR
1913 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1914 {
1915 if (dump_enabled_p ())
1916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1917 "bad data dependence.\n");
1918 return false;
1919 }
1920
1921 /* Compute the scalar iteration cost. */
1922 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1923
1924 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1925 unsigned th;
1926
1927 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1928 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
1929 if (!ok)
1930 return false;
1931
1932 /* If there are any SLP instances mark them as pure_slp. */
1933 bool slp = vect_make_slp_decision (loop_vinfo);
1934 if (slp)
1935 {
1936 /* Find stmts that need to be both vectorized and SLPed. */
1937 vect_detect_hybrid_slp (loop_vinfo);
1938
1939 /* Update the vectorization factor based on the SLP decision. */
1940 vect_update_vf_for_slp (loop_vinfo);
1941 }
1942
1943 bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
1944
1945 /* We don't expect to have to roll back to anything other than an empty
1946 set of rgroups. */
1947 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
1948
1949 /* This is the point where we can re-start analysis with SLP forced off. */
1950 start_over:
1951
1952 /* Now the vectorization factor is final. */
1953 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1954 gcc_assert (known_ne (vectorization_factor, 0U));
1955
1956 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1957 {
1958 dump_printf_loc (MSG_NOTE, vect_location,
1959 "vectorization_factor = ");
1960 dump_dec (MSG_NOTE, vectorization_factor);
1961 dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
1962 LOOP_VINFO_INT_NITERS (loop_vinfo));
1963 }
1964
1965 HOST_WIDE_INT max_niter
1966 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1967
1968 /* Analyze the alignment of the data-refs in the loop.
1969 Fail if a data reference is found that cannot be vectorized. */
1970
1971 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1972 if (!ok)
1973 {
1974 if (dump_enabled_p ())
1975 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1976 "bad data alignment.\n");
1977 return false;
1978 }
1979
1980 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1981 It is important to call pruning after vect_analyze_data_ref_accesses,
1982 since we use grouping information gathered by interleaving analysis. */
1983 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1984 if (!ok)
1985 return false;
1986
1987 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
1988 vectorization. */
1989 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
1990 {
1991 /* This pass will decide on using loop versioning and/or loop peeling in
1992 order to enhance the alignment of data references in the loop. */
1993 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1994 if (!ok)
1995 {
1996 if (dump_enabled_p ())
1997 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1998 "bad data alignment.\n");
1999 return false;
2000 }
2001 }
2002
2003 if (slp)
2004 {
2005 /* Analyze operations in the SLP instances. Note this may
2006 remove unsupported SLP instances which makes the above
2007 SLP kind detection invalid. */
2008 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2009 vect_slp_analyze_operations (loop_vinfo);
2010 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2011 goto again;
2012 }
2013
2014 /* Scan all the remaining operations in the loop that are not subject
2015 to SLP and make sure they are vectorizable. */
2016 ok = vect_analyze_loop_operations (loop_vinfo);
2017 if (!ok)
2018 {
2019 if (dump_enabled_p ())
2020 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2021 "bad operation or unsupported loop bound.\n");
2022 return false;
2023 }
2024
2025 /* Decide whether to use a fully-masked loop for this vectorization
2026 factor. */
2027 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2028 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
2029 && vect_verify_full_masking (loop_vinfo));
2030 if (dump_enabled_p ())
2031 {
2032 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2033 dump_printf_loc (MSG_NOTE, vect_location,
2034 "using a fully-masked loop.\n");
2035 else
2036 dump_printf_loc (MSG_NOTE, vect_location,
2037 "not using a fully-masked loop.\n");
2038 }
2039
2040 /* If epilog loop is required because of data accesses with gaps,
2041 one additional iteration needs to be peeled. Check if there is
2042 enough iterations for vectorization. */
2043 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2044 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2045 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2046 {
2047 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2048 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2049
2050 if (known_lt (wi::to_widest (scalar_niters), vf))
2051 {
2052 if (dump_enabled_p ())
2053 dump_printf_loc (MSG_NOTE, vect_location,
2054 "loop has no enough iterations to support"
2055 " peeling for gaps.\n");
2056 return false;
2057 }
2058 }
2059
2060 /* Check the costings of the loop make vectorizing worthwhile. */
2061 res = vect_analyze_loop_costing (loop_vinfo);
2062 if (res < 0)
2063 goto again;
2064 if (!res)
2065 {
2066 if (dump_enabled_p ())
2067 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2068 "Loop costings not worthwhile.\n");
2069 return false;
2070 }
2071
2072 /* Decide whether we need to create an epilogue loop to handle
2073 remaining scalar iterations. */
2074 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2075
2076 unsigned HOST_WIDE_INT const_vf;
2077 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2078 /* The main loop handles all iterations. */
2079 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2080 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2081 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2082 {
2083 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
2084 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
2085 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2086 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2087 }
2088 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2089 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2090 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2091 < (unsigned) exact_log2 (const_vf))
2092 /* In case of versioning, check if the maximum number of
2093 iterations is greater than th. If they are identical,
2094 the epilogue is unnecessary. */
2095 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2096 || ((unsigned HOST_WIDE_INT) max_niter
2097 > (th / const_vf) * const_vf))))
2098 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2099
2100 /* If an epilogue loop is required make sure we can create one. */
2101 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2102 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2103 {
2104 if (dump_enabled_p ())
2105 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2106 if (!vect_can_advance_ivs_p (loop_vinfo)
2107 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2108 single_exit (LOOP_VINFO_LOOP
2109 (loop_vinfo))))
2110 {
2111 if (dump_enabled_p ())
2112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2113 "not vectorized: can't create required "
2114 "epilog loop\n");
2115 goto again;
2116 }
2117 }
2118
2119 /* During peeling, we need to check if number of loop iterations is
2120 enough for both peeled prolog loop and vector loop. This check
2121 can be merged along with threshold check of loop versioning, so
2122 increase threshold for this case if necessary. */
2123 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
2124 {
2125 poly_uint64 niters_th = 0;
2126
2127 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2128 {
2129 /* Niters for peeled prolog loop. */
2130 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2131 {
2132 dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2133 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2134 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2135 }
2136 else
2137 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2138 }
2139
2140 /* Niters for at least one iteration of vectorized loop. */
2141 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2142 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2143 /* One additional iteration because of peeling for gap. */
2144 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2145 niters_th += 1;
2146 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2147 }
2148
2149 gcc_assert (known_eq (vectorization_factor,
2150 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2151
2152 /* Ok to vectorize! */
2153 return true;
2154
2155 again:
2156 /* Try again with SLP forced off but if we didn't do any SLP there is
2157 no point in re-trying. */
2158 if (!slp)
2159 return false;
2160
2161 /* If there are reduction chains re-trying will fail anyway. */
2162 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2163 return false;
2164
2165 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2166 via interleaving or lane instructions. */
2167 slp_instance instance;
2168 slp_tree node;
2169 unsigned i, j;
2170 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2171 {
2172 stmt_vec_info vinfo;
2173 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2174 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2175 continue;
2176 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2177 unsigned int size = DR_GROUP_SIZE (vinfo);
2178 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2179 if (! vect_store_lanes_supported (vectype, size, false)
2180 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2181 && ! vect_grouped_store_supported (vectype, size))
2182 return false;
2183 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2184 {
2185 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2186 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2187 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2188 size = DR_GROUP_SIZE (vinfo);
2189 vectype = STMT_VINFO_VECTYPE (vinfo);
2190 if (! vect_load_lanes_supported (vectype, size, false)
2191 && ! vect_grouped_load_supported (vectype, single_element_p,
2192 size))
2193 return false;
2194 }
2195 }
2196
2197 if (dump_enabled_p ())
2198 dump_printf_loc (MSG_NOTE, vect_location,
2199 "re-trying with SLP disabled\n");
2200
2201 /* Roll back state appropriately. No SLP this time. */
2202 slp = false;
2203 /* Restore vectorization factor as it were without SLP. */
2204 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2205 /* Free the SLP instances. */
2206 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2207 vect_free_slp_instance (instance, false);
2208 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2209 /* Reset SLP type to loop_vect on all stmts. */
2210 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2211 {
2212 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2213 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2214 !gsi_end_p (si); gsi_next (&si))
2215 {
2216 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2217 STMT_SLP_TYPE (stmt_info) = loop_vect;
2218 }
2219 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2220 !gsi_end_p (si); gsi_next (&si))
2221 {
2222 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2223 STMT_SLP_TYPE (stmt_info) = loop_vect;
2224 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2225 {
2226 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2227 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2228 STMT_SLP_TYPE (stmt_info) = loop_vect;
2229 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2230 !gsi_end_p (pi); gsi_next (&pi))
2231 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2232 = loop_vect;
2233 }
2234 }
2235 }
2236 /* Free optimized alias test DDRS. */
2237 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2238 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2239 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2240 /* Reset target cost data. */
2241 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2242 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2243 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2244 /* Reset accumulated rgroup information. */
2245 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
2246 /* Reset assorted flags. */
2247 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2248 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2249 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2250 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2251 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
2252
2253 goto start_over;
2254 }
2255
2256 /* Function vect_analyze_loop.
2257
2258 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2259 for it. The different analyses will record information in the
2260 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2261 be vectorized. */
2262 loop_vec_info
2263 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
2264 vec_info_shared *shared)
2265 {
2266 loop_vec_info loop_vinfo;
2267 auto_vector_sizes vector_sizes;
2268
2269 /* Autodetect first vector size we try. */
2270 current_vector_size = 0;
2271 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
2272 unsigned int next_size = 0;
2273
2274 DUMP_VECT_SCOPE ("analyze_loop_nest");
2275
2276 if (loop_outer (loop)
2277 && loop_vec_info_for_loop (loop_outer (loop))
2278 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2279 {
2280 if (dump_enabled_p ())
2281 dump_printf_loc (MSG_NOTE, vect_location,
2282 "outer-loop already vectorized.\n");
2283 return NULL;
2284 }
2285
2286 if (!find_loop_nest (loop, &shared->loop_nest))
2287 {
2288 if (dump_enabled_p ())
2289 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2290 "not vectorized: loop nest containing two "
2291 "or more consecutive inner loops cannot be "
2292 "vectorized\n");
2293 return NULL;
2294 }
2295
2296 unsigned n_stmts = 0;
2297 poly_uint64 autodetected_vector_size = 0;
2298 while (1)
2299 {
2300 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2301 loop_vinfo = vect_analyze_loop_form (loop, shared);
2302 if (!loop_vinfo)
2303 {
2304 if (dump_enabled_p ())
2305 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2306 "bad loop form.\n");
2307 return NULL;
2308 }
2309
2310 bool fatal = false;
2311
2312 if (orig_loop_vinfo)
2313 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2314
2315 if (vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts))
2316 {
2317 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2318
2319 return loop_vinfo;
2320 }
2321
2322 delete loop_vinfo;
2323
2324 if (next_size == 0)
2325 autodetected_vector_size = current_vector_size;
2326
2327 if (next_size < vector_sizes.length ()
2328 && known_eq (vector_sizes[next_size], autodetected_vector_size))
2329 next_size += 1;
2330
2331 if (fatal
2332 || next_size == vector_sizes.length ()
2333 || known_eq (current_vector_size, 0U))
2334 return NULL;
2335
2336 /* Try the next biggest vector size. */
2337 current_vector_size = vector_sizes[next_size++];
2338 if (dump_enabled_p ())
2339 {
2340 dump_printf_loc (MSG_NOTE, vect_location,
2341 "***** Re-trying analysis with "
2342 "vector size ");
2343 dump_dec (MSG_NOTE, current_vector_size);
2344 dump_printf (MSG_NOTE, "\n");
2345 }
2346 }
2347 }
2348
2349 /* Return true if there is an in-order reduction function for CODE, storing
2350 it in *REDUC_FN if so. */
2351
2352 static bool
2353 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2354 {
2355 switch (code)
2356 {
2357 case PLUS_EXPR:
2358 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2359 return true;
2360
2361 default:
2362 return false;
2363 }
2364 }
2365
2366 /* Function reduction_fn_for_scalar_code
2367
2368 Input:
2369 CODE - tree_code of a reduction operations.
2370
2371 Output:
2372 REDUC_FN - the corresponding internal function to be used to reduce the
2373 vector of partial results into a single scalar result, or IFN_LAST
2374 if the operation is a supported reduction operation, but does not have
2375 such an internal function.
2376
2377 Return FALSE if CODE currently cannot be vectorized as reduction. */
2378
2379 static bool
2380 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2381 {
2382 switch (code)
2383 {
2384 case MAX_EXPR:
2385 *reduc_fn = IFN_REDUC_MAX;
2386 return true;
2387
2388 case MIN_EXPR:
2389 *reduc_fn = IFN_REDUC_MIN;
2390 return true;
2391
2392 case PLUS_EXPR:
2393 *reduc_fn = IFN_REDUC_PLUS;
2394 return true;
2395
2396 case BIT_AND_EXPR:
2397 *reduc_fn = IFN_REDUC_AND;
2398 return true;
2399
2400 case BIT_IOR_EXPR:
2401 *reduc_fn = IFN_REDUC_IOR;
2402 return true;
2403
2404 case BIT_XOR_EXPR:
2405 *reduc_fn = IFN_REDUC_XOR;
2406 return true;
2407
2408 case MULT_EXPR:
2409 case MINUS_EXPR:
2410 *reduc_fn = IFN_LAST;
2411 return true;
2412
2413 default:
2414 return false;
2415 }
2416 }
2417
2418 /* If there is a neutral value X such that SLP reduction NODE would not
2419 be affected by the introduction of additional X elements, return that X,
2420 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2421 is true if the SLP statements perform a single reduction, false if each
2422 statement performs an independent reduction. */
2423
2424 static tree
2425 neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
2426 bool reduc_chain)
2427 {
2428 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2429 stmt_vec_info stmt_vinfo = stmts[0];
2430 tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
2431 tree scalar_type = TREE_TYPE (vector_type);
2432 struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2433 gcc_assert (loop);
2434
2435 switch (code)
2436 {
2437 case WIDEN_SUM_EXPR:
2438 case DOT_PROD_EXPR:
2439 case SAD_EXPR:
2440 case PLUS_EXPR:
2441 case MINUS_EXPR:
2442 case BIT_IOR_EXPR:
2443 case BIT_XOR_EXPR:
2444 return build_zero_cst (scalar_type);
2445
2446 case MULT_EXPR:
2447 return build_one_cst (scalar_type);
2448
2449 case BIT_AND_EXPR:
2450 return build_all_ones_cst (scalar_type);
2451
2452 case MAX_EXPR:
2453 case MIN_EXPR:
2454 /* For MIN/MAX the initial values are neutral. A reduction chain
2455 has only a single initial value, so that value is neutral for
2456 all statements. */
2457 if (reduc_chain)
2458 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
2459 loop_preheader_edge (loop));
2460 return NULL_TREE;
2461
2462 default:
2463 return NULL_TREE;
2464 }
2465 }
2466
2467 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2468 STMT is printed with a message MSG. */
2469
2470 static void
2471 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2472 {
2473 dump_printf_loc (msg_type, vect_location, "%s", msg);
2474 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2475 }
2476
2477 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2478 operation. Return true if the results of DEF_STMT_INFO are something
2479 that can be accumulated by such a reduction. */
2480
2481 static bool
2482 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info)
2483 {
2484 return (is_gimple_assign (def_stmt_info->stmt)
2485 || is_gimple_call (def_stmt_info->stmt)
2486 || STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_induction_def
2487 || (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
2488 && STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_internal_def
2489 && !is_loop_header_bb_p (gimple_bb (def_stmt_info->stmt))));
2490 }
2491
2492 /* Detect SLP reduction of the form:
2493
2494 #a1 = phi <a5, a0>
2495 a2 = operation (a1)
2496 a3 = operation (a2)
2497 a4 = operation (a3)
2498 a5 = operation (a4)
2499
2500 #a = phi <a5>
2501
2502 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2503 FIRST_STMT is the first reduction stmt in the chain
2504 (a2 = operation (a1)).
2505
2506 Return TRUE if a reduction chain was detected. */
2507
2508 static bool
2509 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2510 gimple *first_stmt)
2511 {
2512 struct loop *loop = (gimple_bb (phi))->loop_father;
2513 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2514 enum tree_code code;
2515 gimple *loop_use_stmt = NULL;
2516 stmt_vec_info use_stmt_info, current_stmt_info = NULL;
2517 tree lhs;
2518 imm_use_iterator imm_iter;
2519 use_operand_p use_p;
2520 int nloop_uses, size = 0, n_out_of_loop_uses;
2521 bool found = false;
2522
2523 if (loop != vect_loop)
2524 return false;
2525
2526 lhs = PHI_RESULT (phi);
2527 code = gimple_assign_rhs_code (first_stmt);
2528 while (1)
2529 {
2530 nloop_uses = 0;
2531 n_out_of_loop_uses = 0;
2532 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2533 {
2534 gimple *use_stmt = USE_STMT (use_p);
2535 if (is_gimple_debug (use_stmt))
2536 continue;
2537
2538 /* Check if we got back to the reduction phi. */
2539 if (use_stmt == phi)
2540 {
2541 loop_use_stmt = use_stmt;
2542 found = true;
2543 break;
2544 }
2545
2546 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2547 {
2548 loop_use_stmt = use_stmt;
2549 nloop_uses++;
2550 }
2551 else
2552 n_out_of_loop_uses++;
2553
2554 /* There are can be either a single use in the loop or two uses in
2555 phi nodes. */
2556 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2557 return false;
2558 }
2559
2560 if (found)
2561 break;
2562
2563 /* We reached a statement with no loop uses. */
2564 if (nloop_uses == 0)
2565 return false;
2566
2567 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2568 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2569 return false;
2570
2571 if (!is_gimple_assign (loop_use_stmt)
2572 || code != gimple_assign_rhs_code (loop_use_stmt)
2573 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2574 return false;
2575
2576 /* Insert USE_STMT into reduction chain. */
2577 use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
2578 if (current_stmt_info)
2579 {
2580 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = use_stmt_info;
2581 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
2582 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2583 }
2584 else
2585 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = use_stmt_info;
2586
2587 lhs = gimple_assign_lhs (loop_use_stmt);
2588 current_stmt_info = use_stmt_info;
2589 size++;
2590 }
2591
2592 if (!found || loop_use_stmt != phi || size < 2)
2593 return false;
2594
2595 /* Swap the operands, if needed, to make the reduction operand be the second
2596 operand. */
2597 lhs = PHI_RESULT (phi);
2598 stmt_vec_info next_stmt_info = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2599 while (next_stmt_info)
2600 {
2601 gassign *next_stmt = as_a <gassign *> (next_stmt_info->stmt);
2602 if (gimple_assign_rhs2 (next_stmt) == lhs)
2603 {
2604 tree op = gimple_assign_rhs1 (next_stmt);
2605 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2606
2607 /* Check that the other def is either defined in the loop
2608 ("vect_internal_def"), or it's an induction (defined by a
2609 loop-header phi-node). */
2610 if (def_stmt_info
2611 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2612 && vect_valid_reduction_input_p (def_stmt_info))
2613 {
2614 lhs = gimple_assign_lhs (next_stmt);
2615 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2616 continue;
2617 }
2618
2619 return false;
2620 }
2621 else
2622 {
2623 tree op = gimple_assign_rhs2 (next_stmt);
2624 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2625
2626 /* Check that the other def is either defined in the loop
2627 ("vect_internal_def"), or it's an induction (defined by a
2628 loop-header phi-node). */
2629 if (def_stmt_info
2630 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2631 && vect_valid_reduction_input_p (def_stmt_info))
2632 {
2633 if (dump_enabled_p ())
2634 {
2635 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2636 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2637 }
2638
2639 swap_ssa_operands (next_stmt,
2640 gimple_assign_rhs1_ptr (next_stmt),
2641 gimple_assign_rhs2_ptr (next_stmt));
2642 update_stmt (next_stmt);
2643
2644 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2645 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2646 }
2647 else
2648 return false;
2649 }
2650
2651 lhs = gimple_assign_lhs (next_stmt);
2652 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2653 }
2654
2655 /* Save the chain for further analysis in SLP detection. */
2656 stmt_vec_info first_stmt_info
2657 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2658 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first_stmt_info);
2659 REDUC_GROUP_SIZE (first_stmt_info) = size;
2660
2661 return true;
2662 }
2663
2664 /* Return true if we need an in-order reduction for operation CODE
2665 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2666 overflow must wrap. */
2667
2668 static bool
2669 needs_fold_left_reduction_p (tree type, tree_code code,
2670 bool need_wrapping_integral_overflow)
2671 {
2672 /* CHECKME: check for !flag_finite_math_only too? */
2673 if (SCALAR_FLOAT_TYPE_P (type))
2674 switch (code)
2675 {
2676 case MIN_EXPR:
2677 case MAX_EXPR:
2678 return false;
2679
2680 default:
2681 return !flag_associative_math;
2682 }
2683
2684 if (INTEGRAL_TYPE_P (type))
2685 {
2686 if (!operation_no_trapping_overflow (type, code))
2687 return true;
2688 if (need_wrapping_integral_overflow
2689 && !TYPE_OVERFLOW_WRAPS (type)
2690 && operation_can_overflow (code))
2691 return true;
2692 return false;
2693 }
2694
2695 if (SAT_FIXED_POINT_TYPE_P (type))
2696 return true;
2697
2698 return false;
2699 }
2700
2701 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2702 reduction operation CODE has a handled computation expression. */
2703
2704 bool
2705 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
2706 tree loop_arg, enum tree_code code)
2707 {
2708 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
2709 auto_bitmap visited;
2710 tree lookfor = PHI_RESULT (phi);
2711 ssa_op_iter curri;
2712 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
2713 while (USE_FROM_PTR (curr) != loop_arg)
2714 curr = op_iter_next_use (&curri);
2715 curri.i = curri.numops;
2716 do
2717 {
2718 path.safe_push (std::make_pair (curri, curr));
2719 tree use = USE_FROM_PTR (curr);
2720 if (use == lookfor)
2721 break;
2722 gimple *def = SSA_NAME_DEF_STMT (use);
2723 if (gimple_nop_p (def)
2724 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
2725 {
2726 pop:
2727 do
2728 {
2729 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
2730 curri = x.first;
2731 curr = x.second;
2732 do
2733 curr = op_iter_next_use (&curri);
2734 /* Skip already visited or non-SSA operands (from iterating
2735 over PHI args). */
2736 while (curr != NULL_USE_OPERAND_P
2737 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2738 || ! bitmap_set_bit (visited,
2739 SSA_NAME_VERSION
2740 (USE_FROM_PTR (curr)))));
2741 }
2742 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
2743 if (curr == NULL_USE_OPERAND_P)
2744 break;
2745 }
2746 else
2747 {
2748 if (gimple_code (def) == GIMPLE_PHI)
2749 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
2750 else
2751 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
2752 while (curr != NULL_USE_OPERAND_P
2753 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2754 || ! bitmap_set_bit (visited,
2755 SSA_NAME_VERSION
2756 (USE_FROM_PTR (curr)))))
2757 curr = op_iter_next_use (&curri);
2758 if (curr == NULL_USE_OPERAND_P)
2759 goto pop;
2760 }
2761 }
2762 while (1);
2763 if (dump_file && (dump_flags & TDF_DETAILS))
2764 {
2765 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
2766 unsigned i;
2767 std::pair<ssa_op_iter, use_operand_p> *x;
2768 FOR_EACH_VEC_ELT (path, i, x)
2769 {
2770 dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
2771 dump_printf (MSG_NOTE, " ");
2772 }
2773 dump_printf (MSG_NOTE, "\n");
2774 }
2775
2776 /* Check whether the reduction path detected is valid. */
2777 bool fail = path.length () == 0;
2778 bool neg = false;
2779 for (unsigned i = 1; i < path.length (); ++i)
2780 {
2781 gimple *use_stmt = USE_STMT (path[i].second);
2782 tree op = USE_FROM_PTR (path[i].second);
2783 if (! has_single_use (op)
2784 || ! is_gimple_assign (use_stmt))
2785 {
2786 fail = true;
2787 break;
2788 }
2789 if (gimple_assign_rhs_code (use_stmt) != code)
2790 {
2791 if (code == PLUS_EXPR
2792 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2793 {
2794 /* Track whether we negate the reduction value each iteration. */
2795 if (gimple_assign_rhs2 (use_stmt) == op)
2796 neg = ! neg;
2797 }
2798 else
2799 {
2800 fail = true;
2801 break;
2802 }
2803 }
2804 }
2805 return ! fail && ! neg;
2806 }
2807
2808
2809 /* Function vect_is_simple_reduction
2810
2811 (1) Detect a cross-iteration def-use cycle that represents a simple
2812 reduction computation. We look for the following pattern:
2813
2814 loop_header:
2815 a1 = phi < a0, a2 >
2816 a3 = ...
2817 a2 = operation (a3, a1)
2818
2819 or
2820
2821 a3 = ...
2822 loop_header:
2823 a1 = phi < a0, a2 >
2824 a2 = operation (a3, a1)
2825
2826 such that:
2827 1. operation is commutative and associative and it is safe to
2828 change the order of the computation
2829 2. no uses for a2 in the loop (a2 is used out of the loop)
2830 3. no uses of a1 in the loop besides the reduction operation
2831 4. no uses of a1 outside the loop.
2832
2833 Conditions 1,4 are tested here.
2834 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2835
2836 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2837 nested cycles.
2838
2839 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2840 reductions:
2841
2842 a1 = phi < a0, a2 >
2843 inner loop (def of a3)
2844 a2 = phi < a3 >
2845
2846 (4) Detect condition expressions, ie:
2847 for (int i = 0; i < N; i++)
2848 if (a[i] < val)
2849 ret_val = a[i];
2850
2851 */
2852
2853 static stmt_vec_info
2854 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
2855 bool *double_reduc,
2856 bool need_wrapping_integral_overflow,
2857 enum vect_reduction_type *v_reduc_type)
2858 {
2859 gphi *phi = as_a <gphi *> (phi_info->stmt);
2860 struct loop *loop = (gimple_bb (phi))->loop_father;
2861 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2862 gimple *phi_use_stmt = NULL;
2863 enum tree_code orig_code, code;
2864 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2865 tree type;
2866 int nloop_uses;
2867 tree name;
2868 imm_use_iterator imm_iter;
2869 use_operand_p use_p;
2870 bool phi_def;
2871
2872 *double_reduc = false;
2873 *v_reduc_type = TREE_CODE_REDUCTION;
2874
2875 tree phi_name = PHI_RESULT (phi);
2876 /* ??? If there are no uses of the PHI result the inner loop reduction
2877 won't be detected as possibly double-reduction by vectorizable_reduction
2878 because that tries to walk the PHI arg from the preheader edge which
2879 can be constant. See PR60382. */
2880 if (has_zero_uses (phi_name))
2881 return NULL;
2882 nloop_uses = 0;
2883 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
2884 {
2885 gimple *use_stmt = USE_STMT (use_p);
2886 if (is_gimple_debug (use_stmt))
2887 continue;
2888
2889 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2890 {
2891 if (dump_enabled_p ())
2892 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2893 "intermediate value used outside loop.\n");
2894
2895 return NULL;
2896 }
2897
2898 nloop_uses++;
2899 if (nloop_uses > 1)
2900 {
2901 if (dump_enabled_p ())
2902 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2903 "reduction value used in loop.\n");
2904 return NULL;
2905 }
2906
2907 phi_use_stmt = use_stmt;
2908 }
2909
2910 edge latch_e = loop_latch_edge (loop);
2911 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2912 if (TREE_CODE (loop_arg) != SSA_NAME)
2913 {
2914 if (dump_enabled_p ())
2915 {
2916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2917 "reduction: not ssa_name: ");
2918 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2919 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2920 }
2921 return NULL;
2922 }
2923
2924 stmt_vec_info def_stmt_info = loop_info->lookup_def (loop_arg);
2925 if (!def_stmt_info)
2926 return NULL;
2927
2928 if (gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt))
2929 {
2930 name = gimple_assign_lhs (def_stmt);
2931 phi_def = false;
2932 }
2933 else if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
2934 {
2935 name = PHI_RESULT (def_stmt);
2936 phi_def = true;
2937 }
2938 else
2939 {
2940 if (dump_enabled_p ())
2941 {
2942 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2943 "reduction: unhandled reduction operation: ");
2944 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2945 def_stmt_info->stmt, 0);
2946 }
2947 return NULL;
2948 }
2949
2950 nloop_uses = 0;
2951 auto_vec<gphi *, 3> lcphis;
2952 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2953 {
2954 gimple *use_stmt = USE_STMT (use_p);
2955 if (is_gimple_debug (use_stmt))
2956 continue;
2957 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2958 nloop_uses++;
2959 else
2960 /* We can have more than one loop-closed PHI. */
2961 lcphis.safe_push (as_a <gphi *> (use_stmt));
2962 if (nloop_uses > 1)
2963 {
2964 if (dump_enabled_p ())
2965 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2966 "reduction used in loop.\n");
2967 return NULL;
2968 }
2969 }
2970
2971 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2972 defined in the inner loop. */
2973 if (phi_def)
2974 {
2975 gphi *def_stmt = as_a <gphi *> (def_stmt_info->stmt);
2976 op1 = PHI_ARG_DEF (def_stmt, 0);
2977
2978 if (gimple_phi_num_args (def_stmt) != 1
2979 || TREE_CODE (op1) != SSA_NAME)
2980 {
2981 if (dump_enabled_p ())
2982 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2983 "unsupported phi node definition.\n");
2984
2985 return NULL;
2986 }
2987
2988 gimple *def1 = SSA_NAME_DEF_STMT (op1);
2989 if (gimple_bb (def1)
2990 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2991 && loop->inner
2992 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2993 && is_gimple_assign (def1)
2994 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
2995 {
2996 if (dump_enabled_p ())
2997 report_vect_op (MSG_NOTE, def_stmt,
2998 "detected double reduction: ");
2999
3000 *double_reduc = true;
3001 return def_stmt_info;
3002 }
3003
3004 return NULL;
3005 }
3006
3007 /* If we are vectorizing an inner reduction we are executing that
3008 in the original order only in case we are not dealing with a
3009 double reduction. */
3010 bool check_reduction = true;
3011 if (flow_loop_nested_p (vect_loop, loop))
3012 {
3013 gphi *lcphi;
3014 unsigned i;
3015 check_reduction = false;
3016 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
3017 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
3018 {
3019 gimple *use_stmt = USE_STMT (use_p);
3020 if (is_gimple_debug (use_stmt))
3021 continue;
3022 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
3023 check_reduction = true;
3024 }
3025 }
3026
3027 gassign *def_stmt = as_a <gassign *> (def_stmt_info->stmt);
3028 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
3029 code = orig_code = gimple_assign_rhs_code (def_stmt);
3030
3031 /* We can handle "res -= x[i]", which is non-associative by
3032 simply rewriting this into "res += -x[i]". Avoid changing
3033 gimple instruction for the first simple tests and only do this
3034 if we're allowed to change code at all. */
3035 if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
3036 code = PLUS_EXPR;
3037
3038 if (code == COND_EXPR)
3039 {
3040 if (! nested_in_vect_loop)
3041 *v_reduc_type = COND_REDUCTION;
3042
3043 op3 = gimple_assign_rhs1 (def_stmt);
3044 if (COMPARISON_CLASS_P (op3))
3045 {
3046 op4 = TREE_OPERAND (op3, 1);
3047 op3 = TREE_OPERAND (op3, 0);
3048 }
3049 if (op3 == phi_name || op4 == phi_name)
3050 {
3051 if (dump_enabled_p ())
3052 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3053 "reduction: condition depends on previous"
3054 " iteration: ");
3055 return NULL;
3056 }
3057
3058 op1 = gimple_assign_rhs2 (def_stmt);
3059 op2 = gimple_assign_rhs3 (def_stmt);
3060 }
3061 else if (!commutative_tree_code (code) || !associative_tree_code (code))
3062 {
3063 if (dump_enabled_p ())
3064 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3065 "reduction: not commutative/associative: ");
3066 return NULL;
3067 }
3068 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
3069 {
3070 op1 = gimple_assign_rhs1 (def_stmt);
3071 op2 = gimple_assign_rhs2 (def_stmt);
3072 }
3073 else
3074 {
3075 if (dump_enabled_p ())
3076 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3077 "reduction: not handled operation: ");
3078 return NULL;
3079 }
3080
3081 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
3082 {
3083 if (dump_enabled_p ())
3084 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3085 "reduction: both uses not ssa_names: ");
3086
3087 return NULL;
3088 }
3089
3090 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
3091 if ((TREE_CODE (op1) == SSA_NAME
3092 && !types_compatible_p (type,TREE_TYPE (op1)))
3093 || (TREE_CODE (op2) == SSA_NAME
3094 && !types_compatible_p (type, TREE_TYPE (op2)))
3095 || (op3 && TREE_CODE (op3) == SSA_NAME
3096 && !types_compatible_p (type, TREE_TYPE (op3)))
3097 || (op4 && TREE_CODE (op4) == SSA_NAME
3098 && !types_compatible_p (type, TREE_TYPE (op4))))
3099 {
3100 if (dump_enabled_p ())
3101 {
3102 dump_printf_loc (MSG_NOTE, vect_location,
3103 "reduction: multiple types: operation type: ");
3104 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
3105 dump_printf (MSG_NOTE, ", operands types: ");
3106 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3107 TREE_TYPE (op1));
3108 dump_printf (MSG_NOTE, ",");
3109 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3110 TREE_TYPE (op2));
3111 if (op3)
3112 {
3113 dump_printf (MSG_NOTE, ",");
3114 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3115 TREE_TYPE (op3));
3116 }
3117
3118 if (op4)
3119 {
3120 dump_printf (MSG_NOTE, ",");
3121 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3122 TREE_TYPE (op4));
3123 }
3124 dump_printf (MSG_NOTE, "\n");
3125 }
3126
3127 return NULL;
3128 }
3129
3130 /* Check whether it's ok to change the order of the computation.
3131 Generally, when vectorizing a reduction we change the order of the
3132 computation. This may change the behavior of the program in some
3133 cases, so we need to check that this is ok. One exception is when
3134 vectorizing an outer-loop: the inner-loop is executed sequentially,
3135 and therefore vectorizing reductions in the inner-loop during
3136 outer-loop vectorization is safe. */
3137 if (check_reduction
3138 && *v_reduc_type == TREE_CODE_REDUCTION
3139 && needs_fold_left_reduction_p (type, code,
3140 need_wrapping_integral_overflow))
3141 *v_reduc_type = FOLD_LEFT_REDUCTION;
3142
3143 /* Reduction is safe. We're dealing with one of the following:
3144 1) integer arithmetic and no trapv
3145 2) floating point arithmetic, and special flags permit this optimization
3146 3) nested cycle (i.e., outer loop vectorization). */
3147 stmt_vec_info def1_info = loop_info->lookup_def (op1);
3148 stmt_vec_info def2_info = loop_info->lookup_def (op2);
3149 if (code != COND_EXPR && !def1_info && !def2_info)
3150 {
3151 if (dump_enabled_p ())
3152 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3153 return NULL;
3154 }
3155
3156 /* Check that one def is the reduction def, defined by PHI,
3157 the other def is either defined in the loop ("vect_internal_def"),
3158 or it's an induction (defined by a loop-header phi-node). */
3159
3160 if (def2_info
3161 && def2_info->stmt == phi
3162 && (code == COND_EXPR
3163 || !def1_info
3164 || vect_valid_reduction_input_p (def1_info)))
3165 {
3166 if (dump_enabled_p ())
3167 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3168 return def_stmt_info;
3169 }
3170
3171 if (def1_info
3172 && def1_info->stmt == phi
3173 && (code == COND_EXPR
3174 || !def2_info
3175 || vect_valid_reduction_input_p (def2_info)))
3176 {
3177 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3178 {
3179 /* Check if we can swap operands (just for simplicity - so that
3180 the rest of the code can assume that the reduction variable
3181 is always the last (second) argument). */
3182 if (code == COND_EXPR)
3183 {
3184 /* Swap cond_expr by inverting the condition. */
3185 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3186 enum tree_code invert_code = ERROR_MARK;
3187 enum tree_code cond_code = TREE_CODE (cond_expr);
3188
3189 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3190 {
3191 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3192 invert_code = invert_tree_comparison (cond_code, honor_nans);
3193 }
3194 if (invert_code != ERROR_MARK)
3195 {
3196 TREE_SET_CODE (cond_expr, invert_code);
3197 swap_ssa_operands (def_stmt,
3198 gimple_assign_rhs2_ptr (def_stmt),
3199 gimple_assign_rhs3_ptr (def_stmt));
3200 }
3201 else
3202 {
3203 if (dump_enabled_p ())
3204 report_vect_op (MSG_NOTE, def_stmt,
3205 "detected reduction: cannot swap operands "
3206 "for cond_expr");
3207 return NULL;
3208 }
3209 }
3210 else
3211 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3212 gimple_assign_rhs2_ptr (def_stmt));
3213
3214 if (dump_enabled_p ())
3215 report_vect_op (MSG_NOTE, def_stmt,
3216 "detected reduction: need to swap operands: ");
3217
3218 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3219 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3220 }
3221 else
3222 {
3223 if (dump_enabled_p ())
3224 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3225 }
3226
3227 return def_stmt_info;
3228 }
3229
3230 /* Try to find SLP reduction chain. */
3231 if (! nested_in_vect_loop
3232 && code != COND_EXPR
3233 && orig_code != MINUS_EXPR
3234 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3235 {
3236 if (dump_enabled_p ())
3237 report_vect_op (MSG_NOTE, def_stmt,
3238 "reduction: detected reduction chain: ");
3239
3240 return def_stmt_info;
3241 }
3242
3243 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3244 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (def_stmt_info);
3245 while (first)
3246 {
3247 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
3248 REDUC_GROUP_FIRST_ELEMENT (first) = NULL;
3249 REDUC_GROUP_NEXT_ELEMENT (first) = NULL;
3250 first = next;
3251 }
3252
3253 /* Look for the expression computing loop_arg from loop PHI result. */
3254 if (check_reduction_path (vect_location, loop, phi, loop_arg, code))
3255 return def_stmt_info;
3256
3257 if (dump_enabled_p ())
3258 {
3259 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3260 "reduction: unknown pattern: ");
3261 }
3262
3263 return NULL;
3264 }
3265
3266 /* Wrapper around vect_is_simple_reduction, which will modify code
3267 in-place if it enables detection of more reductions. Arguments
3268 as there. */
3269
3270 stmt_vec_info
3271 vect_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3272 bool *double_reduc,
3273 bool need_wrapping_integral_overflow)
3274 {
3275 enum vect_reduction_type v_reduc_type;
3276 stmt_vec_info def_info
3277 = vect_is_simple_reduction (loop_info, phi_info, double_reduc,
3278 need_wrapping_integral_overflow,
3279 &v_reduc_type);
3280 if (def_info)
3281 {
3282 STMT_VINFO_REDUC_TYPE (phi_info) = v_reduc_type;
3283 STMT_VINFO_REDUC_DEF (phi_info) = def_info;
3284 STMT_VINFO_REDUC_TYPE (def_info) = v_reduc_type;
3285 STMT_VINFO_REDUC_DEF (def_info) = phi_info;
3286 }
3287 return def_info;
3288 }
3289
3290 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3291 int
3292 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3293 int *peel_iters_epilogue,
3294 stmt_vector_for_cost *scalar_cost_vec,
3295 stmt_vector_for_cost *prologue_cost_vec,
3296 stmt_vector_for_cost *epilogue_cost_vec)
3297 {
3298 int retval = 0;
3299 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3300
3301 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3302 {
3303 *peel_iters_epilogue = assumed_vf / 2;
3304 if (dump_enabled_p ())
3305 dump_printf_loc (MSG_NOTE, vect_location,
3306 "cost model: epilogue peel iters set to vf/2 "
3307 "because loop iterations are unknown .\n");
3308
3309 /* If peeled iterations are known but number of scalar loop
3310 iterations are unknown, count a taken branch per peeled loop. */
3311 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3312 NULL, 0, vect_prologue);
3313 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3314 NULL, 0, vect_epilogue);
3315 }
3316 else
3317 {
3318 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3319 peel_iters_prologue = niters < peel_iters_prologue ?
3320 niters : peel_iters_prologue;
3321 *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3322 /* If we need to peel for gaps, but no peeling is required, we have to
3323 peel VF iterations. */
3324 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3325 *peel_iters_epilogue = assumed_vf;
3326 }
3327
3328 stmt_info_for_cost *si;
3329 int j;
3330 if (peel_iters_prologue)
3331 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3332 retval += record_stmt_cost (prologue_cost_vec,
3333 si->count * peel_iters_prologue,
3334 si->kind, si->stmt_info, si->misalign,
3335 vect_prologue);
3336 if (*peel_iters_epilogue)
3337 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3338 retval += record_stmt_cost (epilogue_cost_vec,
3339 si->count * *peel_iters_epilogue,
3340 si->kind, si->stmt_info, si->misalign,
3341 vect_epilogue);
3342
3343 return retval;
3344 }
3345
3346 /* Function vect_estimate_min_profitable_iters
3347
3348 Return the number of iterations required for the vector version of the
3349 loop to be profitable relative to the cost of the scalar version of the
3350 loop.
3351
3352 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3353 of iterations for vectorization. -1 value means loop vectorization
3354 is not profitable. This returned value may be used for dynamic
3355 profitability check.
3356
3357 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3358 for static check against estimated number of iterations. */
3359
3360 static void
3361 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3362 int *ret_min_profitable_niters,
3363 int *ret_min_profitable_estimate)
3364 {
3365 int min_profitable_iters;
3366 int min_profitable_estimate;
3367 int peel_iters_prologue;
3368 int peel_iters_epilogue;
3369 unsigned vec_inside_cost = 0;
3370 int vec_outside_cost = 0;
3371 unsigned vec_prologue_cost = 0;
3372 unsigned vec_epilogue_cost = 0;
3373 int scalar_single_iter_cost = 0;
3374 int scalar_outside_cost = 0;
3375 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3376 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3377 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3378
3379 /* Cost model disabled. */
3380 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3381 {
3382 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3383 *ret_min_profitable_niters = 0;
3384 *ret_min_profitable_estimate = 0;
3385 return;
3386 }
3387
3388 /* Requires loop versioning tests to handle misalignment. */
3389 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3390 {
3391 /* FIXME: Make cost depend on complexity of individual check. */
3392 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3393 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3394 vect_prologue);
3395 dump_printf (MSG_NOTE,
3396 "cost model: Adding cost of checks for loop "
3397 "versioning to treat misalignment.\n");
3398 }
3399
3400 /* Requires loop versioning with alias checks. */
3401 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3402 {
3403 /* FIXME: Make cost depend on complexity of individual check. */
3404 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3405 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3406 vect_prologue);
3407 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3408 if (len)
3409 /* Count LEN - 1 ANDs and LEN comparisons. */
3410 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3411 NULL, 0, vect_prologue);
3412 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3413 if (len)
3414 {
3415 /* Count LEN - 1 ANDs and LEN comparisons. */
3416 unsigned int nstmts = len * 2 - 1;
3417 /* +1 for each bias that needs adding. */
3418 for (unsigned int i = 0; i < len; ++i)
3419 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3420 nstmts += 1;
3421 (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
3422 NULL, 0, vect_prologue);
3423 }
3424 dump_printf (MSG_NOTE,
3425 "cost model: Adding cost of checks for loop "
3426 "versioning aliasing.\n");
3427 }
3428
3429 /* Requires loop versioning with niter checks. */
3430 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3431 {
3432 /* FIXME: Make cost depend on complexity of individual check. */
3433 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3434 vect_prologue);
3435 dump_printf (MSG_NOTE,
3436 "cost model: Adding cost of checks for loop "
3437 "versioning niters.\n");
3438 }
3439
3440 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3441 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3442 vect_prologue);
3443
3444 /* Count statements in scalar loop. Using this as scalar cost for a single
3445 iteration for now.
3446
3447 TODO: Add outer loop support.
3448
3449 TODO: Consider assigning different costs to different scalar
3450 statements. */
3451
3452 scalar_single_iter_cost
3453 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3454
3455 /* Add additional cost for the peeled instructions in prologue and epilogue
3456 loop. (For fully-masked loops there will be no peeling.)
3457
3458 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3459 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3460
3461 TODO: Build an expression that represents peel_iters for prologue and
3462 epilogue to be used in a run-time test. */
3463
3464 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3465 {
3466 peel_iters_prologue = 0;
3467 peel_iters_epilogue = 0;
3468
3469 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
3470 {
3471 /* We need to peel exactly one iteration. */
3472 peel_iters_epilogue += 1;
3473 stmt_info_for_cost *si;
3474 int j;
3475 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
3476 j, si)
3477 (void) add_stmt_cost (target_cost_data, si->count,
3478 si->kind, si->stmt_info, si->misalign,
3479 vect_epilogue);
3480 }
3481 }
3482 else if (npeel < 0)
3483 {
3484 peel_iters_prologue = assumed_vf / 2;
3485 dump_printf (MSG_NOTE, "cost model: "
3486 "prologue peel iters set to vf/2.\n");
3487
3488 /* If peeling for alignment is unknown, loop bound of main loop becomes
3489 unknown. */
3490 peel_iters_epilogue = assumed_vf / 2;
3491 dump_printf (MSG_NOTE, "cost model: "
3492 "epilogue peel iters set to vf/2 because "
3493 "peeling for alignment is unknown.\n");
3494
3495 /* If peeled iterations are unknown, count a taken branch and a not taken
3496 branch per peeled loop. Even if scalar loop iterations are known,
3497 vector iterations are not known since peeled prologue iterations are
3498 not known. Hence guards remain the same. */
3499 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3500 NULL, 0, vect_prologue);
3501 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3502 NULL, 0, vect_prologue);
3503 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3504 NULL, 0, vect_epilogue);
3505 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3506 NULL, 0, vect_epilogue);
3507 stmt_info_for_cost *si;
3508 int j;
3509 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3510 {
3511 (void) add_stmt_cost (target_cost_data,
3512 si->count * peel_iters_prologue,
3513 si->kind, si->stmt_info, si->misalign,
3514 vect_prologue);
3515 (void) add_stmt_cost (target_cost_data,
3516 si->count * peel_iters_epilogue,
3517 si->kind, si->stmt_info, si->misalign,
3518 vect_epilogue);
3519 }
3520 }
3521 else
3522 {
3523 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3524 stmt_info_for_cost *si;
3525 int j;
3526 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3527
3528 prologue_cost_vec.create (2);
3529 epilogue_cost_vec.create (2);
3530 peel_iters_prologue = npeel;
3531
3532 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3533 &peel_iters_epilogue,
3534 &LOOP_VINFO_SCALAR_ITERATION_COST
3535 (loop_vinfo),
3536 &prologue_cost_vec,
3537 &epilogue_cost_vec);
3538
3539 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3540 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3541 si->misalign, vect_prologue);
3542
3543 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3544 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3545 si->misalign, vect_epilogue);
3546
3547 prologue_cost_vec.release ();
3548 epilogue_cost_vec.release ();
3549 }
3550
3551 /* FORNOW: The scalar outside cost is incremented in one of the
3552 following ways:
3553
3554 1. The vectorizer checks for alignment and aliasing and generates
3555 a condition that allows dynamic vectorization. A cost model
3556 check is ANDED with the versioning condition. Hence scalar code
3557 path now has the added cost of the versioning check.
3558
3559 if (cost > th & versioning_check)
3560 jmp to vector code
3561
3562 Hence run-time scalar is incremented by not-taken branch cost.
3563
3564 2. The vectorizer then checks if a prologue is required. If the
3565 cost model check was not done before during versioning, it has to
3566 be done before the prologue check.
3567
3568 if (cost <= th)
3569 prologue = scalar_iters
3570 if (prologue == 0)
3571 jmp to vector code
3572 else
3573 execute prologue
3574 if (prologue == num_iters)
3575 go to exit
3576
3577 Hence the run-time scalar cost is incremented by a taken branch,
3578 plus a not-taken branch, plus a taken branch cost.
3579
3580 3. The vectorizer then checks if an epilogue is required. If the
3581 cost model check was not done before during prologue check, it
3582 has to be done with the epilogue check.
3583
3584 if (prologue == 0)
3585 jmp to vector code
3586 else
3587 execute prologue
3588 if (prologue == num_iters)
3589 go to exit
3590 vector code:
3591 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3592 jmp to epilogue
3593
3594 Hence the run-time scalar cost should be incremented by 2 taken
3595 branches.
3596
3597 TODO: The back end may reorder the BBS's differently and reverse
3598 conditions/branch directions. Change the estimates below to
3599 something more reasonable. */
3600
3601 /* If the number of iterations is known and we do not do versioning, we can
3602 decide whether to vectorize at compile time. Hence the scalar version
3603 do not carry cost model guard costs. */
3604 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3605 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3606 {
3607 /* Cost model check occurs at versioning. */
3608 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3609 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3610 else
3611 {
3612 /* Cost model check occurs at prologue generation. */
3613 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3614 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3615 + vect_get_stmt_cost (cond_branch_not_taken);
3616 /* Cost model check occurs at epilogue generation. */
3617 else
3618 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3619 }
3620 }
3621
3622 /* Complete the target-specific cost calculations. */
3623 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3624 &vec_inside_cost, &vec_epilogue_cost);
3625
3626 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3627
3628 if (dump_enabled_p ())
3629 {
3630 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3631 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3632 vec_inside_cost);
3633 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3634 vec_prologue_cost);
3635 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3636 vec_epilogue_cost);
3637 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3638 scalar_single_iter_cost);
3639 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3640 scalar_outside_cost);
3641 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3642 vec_outside_cost);
3643 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3644 peel_iters_prologue);
3645 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3646 peel_iters_epilogue);
3647 }
3648
3649 /* Calculate number of iterations required to make the vector version
3650 profitable, relative to the loop bodies only. The following condition
3651 must hold true:
3652 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3653 where
3654 SIC = scalar iteration cost, VIC = vector iteration cost,
3655 VOC = vector outside cost, VF = vectorization factor,
3656 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3657 SOC = scalar outside cost for run time cost model check. */
3658
3659 if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
3660 {
3661 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
3662 * assumed_vf
3663 - vec_inside_cost * peel_iters_prologue
3664 - vec_inside_cost * peel_iters_epilogue);
3665 if (min_profitable_iters <= 0)
3666 min_profitable_iters = 0;
3667 else
3668 {
3669 min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
3670 - vec_inside_cost);
3671
3672 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
3673 <= (((int) vec_inside_cost * min_profitable_iters)
3674 + (((int) vec_outside_cost - scalar_outside_cost)
3675 * assumed_vf)))
3676 min_profitable_iters++;
3677 }
3678 }
3679 /* vector version will never be profitable. */
3680 else
3681 {
3682 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3683 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3684 "vectorization did not happen for a simd loop");
3685
3686 if (dump_enabled_p ())
3687 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3688 "cost model: the vector iteration cost = %d "
3689 "divided by the scalar iteration cost = %d "
3690 "is greater or equal to the vectorization factor = %d"
3691 ".\n",
3692 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3693 *ret_min_profitable_niters = -1;
3694 *ret_min_profitable_estimate = -1;
3695 return;
3696 }
3697
3698 dump_printf (MSG_NOTE,
3699 " Calculated minimum iters for profitability: %d\n",
3700 min_profitable_iters);
3701
3702 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
3703 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
3704 /* We want the vectorized loop to execute at least once. */
3705 min_profitable_iters = assumed_vf + peel_iters_prologue;
3706
3707 if (dump_enabled_p ())
3708 dump_printf_loc (MSG_NOTE, vect_location,
3709 " Runtime profitability threshold = %d\n",
3710 min_profitable_iters);
3711
3712 *ret_min_profitable_niters = min_profitable_iters;
3713
3714 /* Calculate number of iterations required to make the vector version
3715 profitable, relative to the loop bodies only.
3716
3717 Non-vectorized variant is SIC * niters and it must win over vector
3718 variant on the expected loop trip count. The following condition must hold true:
3719 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3720
3721 if (vec_outside_cost <= 0)
3722 min_profitable_estimate = 0;
3723 else
3724 {
3725 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
3726 * assumed_vf
3727 - vec_inside_cost * peel_iters_prologue
3728 - vec_inside_cost * peel_iters_epilogue)
3729 / ((scalar_single_iter_cost * assumed_vf)
3730 - vec_inside_cost);
3731 }
3732 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3733 if (dump_enabled_p ())
3734 dump_printf_loc (MSG_NOTE, vect_location,
3735 " Static estimate profitability threshold = %d\n",
3736 min_profitable_estimate);
3737
3738 *ret_min_profitable_estimate = min_profitable_estimate;
3739 }
3740
3741 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3742 vector elements (not bits) for a vector with NELT elements. */
3743 static void
3744 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
3745 vec_perm_builder *sel)
3746 {
3747 /* The encoding is a single stepped pattern. Any wrap-around is handled
3748 by vec_perm_indices. */
3749 sel->new_vector (nelt, 1, 3);
3750 for (unsigned int i = 0; i < 3; i++)
3751 sel->quick_push (i + offset);
3752 }
3753
3754 /* Checks whether the target supports whole-vector shifts for vectors of mode
3755 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3756 it supports vec_perm_const with masks for all necessary shift amounts. */
3757 static bool
3758 have_whole_vector_shift (machine_mode mode)
3759 {
3760 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3761 return true;
3762
3763 /* Variable-length vectors should be handled via the optab. */
3764 unsigned int nelt;
3765 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
3766 return false;
3767
3768 vec_perm_builder sel;
3769 vec_perm_indices indices;
3770 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
3771 {
3772 calc_vec_perm_mask_for_shift (i, nelt, &sel);
3773 indices.new_vector (sel, 2, nelt);
3774 if (!can_vec_perm_const_p (mode, indices, false))
3775 return false;
3776 }
3777 return true;
3778 }
3779
3780 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3781 functions. Design better to avoid maintenance issues. */
3782
3783 /* Function vect_model_reduction_cost.
3784
3785 Models cost for a reduction operation, including the vector ops
3786 generated within the strip-mine loop, the initial definition before
3787 the loop, and the epilogue code that must be generated. */
3788
3789 static void
3790 vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
3791 int ncopies, stmt_vector_for_cost *cost_vec)
3792 {
3793 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
3794 enum tree_code code;
3795 optab optab;
3796 tree vectype;
3797 machine_mode mode;
3798 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3799 struct loop *loop = NULL;
3800
3801 if (loop_vinfo)
3802 loop = LOOP_VINFO_LOOP (loop_vinfo);
3803
3804 /* Condition reductions generate two reductions in the loop. */
3805 vect_reduction_type reduction_type
3806 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
3807 if (reduction_type == COND_REDUCTION)
3808 ncopies *= 2;
3809
3810 vectype = STMT_VINFO_VECTYPE (stmt_info);
3811 mode = TYPE_MODE (vectype);
3812 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
3813
3814 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
3815
3816 if (reduction_type == EXTRACT_LAST_REDUCTION
3817 || reduction_type == FOLD_LEFT_REDUCTION)
3818 {
3819 /* No extra instructions needed in the prologue. */
3820 prologue_cost = 0;
3821
3822 if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
3823 /* Count one reduction-like operation per vector. */
3824 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
3825 stmt_info, 0, vect_body);
3826 else
3827 {
3828 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3829 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
3830 inside_cost = record_stmt_cost (cost_vec, nelements,
3831 vec_to_scalar, stmt_info, 0,
3832 vect_body);
3833 inside_cost += record_stmt_cost (cost_vec, nelements,
3834 scalar_stmt, stmt_info, 0,
3835 vect_body);
3836 }
3837 }
3838 else
3839 {
3840 /* Add in cost for initial definition.
3841 For cond reduction we have four vectors: initial index, step,
3842 initial result of the data reduction, initial value of the index
3843 reduction. */
3844 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
3845 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
3846 scalar_to_vec, stmt_info, 0,
3847 vect_prologue);
3848
3849 /* Cost of reduction op inside loop. */
3850 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3851 stmt_info, 0, vect_body);
3852 }
3853
3854 /* Determine cost of epilogue code.
3855
3856 We have a reduction operator that will reduce the vector in one statement.
3857 Also requires scalar extract. */
3858
3859 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
3860 {
3861 if (reduc_fn != IFN_LAST)
3862 {
3863 if (reduction_type == COND_REDUCTION)
3864 {
3865 /* An EQ stmt and an COND_EXPR stmt. */
3866 epilogue_cost += record_stmt_cost (cost_vec, 2,
3867 vector_stmt, stmt_info, 0,
3868 vect_epilogue);
3869 /* Reduction of the max index and a reduction of the found
3870 values. */
3871 epilogue_cost += record_stmt_cost (cost_vec, 2,
3872 vec_to_scalar, stmt_info, 0,
3873 vect_epilogue);
3874 /* A broadcast of the max value. */
3875 epilogue_cost += record_stmt_cost (cost_vec, 1,
3876 scalar_to_vec, stmt_info, 0,
3877 vect_epilogue);
3878 }
3879 else
3880 {
3881 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
3882 stmt_info, 0, vect_epilogue);
3883 epilogue_cost += record_stmt_cost (cost_vec, 1,
3884 vec_to_scalar, stmt_info, 0,
3885 vect_epilogue);
3886 }
3887 }
3888 else if (reduction_type == COND_REDUCTION)
3889 {
3890 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
3891 /* Extraction of scalar elements. */
3892 epilogue_cost += record_stmt_cost (cost_vec,
3893 2 * estimated_nunits,
3894 vec_to_scalar, stmt_info, 0,
3895 vect_epilogue);
3896 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3897 epilogue_cost += record_stmt_cost (cost_vec,
3898 2 * estimated_nunits - 3,
3899 scalar_stmt, stmt_info, 0,
3900 vect_epilogue);
3901 }
3902 else if (reduction_type == EXTRACT_LAST_REDUCTION
3903 || reduction_type == FOLD_LEFT_REDUCTION)
3904 /* No extra instructions need in the epilogue. */
3905 ;
3906 else
3907 {
3908 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3909 tree bitsize =
3910 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
3911 int element_bitsize = tree_to_uhwi (bitsize);
3912 int nelements = vec_size_in_bits / element_bitsize;
3913
3914 if (code == COND_EXPR)
3915 code = MAX_EXPR;
3916
3917 optab = optab_for_tree_code (code, vectype, optab_default);
3918
3919 /* We have a whole vector shift available. */
3920 if (optab != unknown_optab
3921 && VECTOR_MODE_P (mode)
3922 && optab_handler (optab, mode) != CODE_FOR_nothing
3923 && have_whole_vector_shift (mode))
3924 {
3925 /* Final reduction via vector shifts and the reduction operator.
3926 Also requires scalar extract. */
3927 epilogue_cost += record_stmt_cost (cost_vec,
3928 exact_log2 (nelements) * 2,
3929 vector_stmt, stmt_info, 0,
3930 vect_epilogue);
3931 epilogue_cost += record_stmt_cost (cost_vec, 1,
3932 vec_to_scalar, stmt_info, 0,
3933 vect_epilogue);
3934 }
3935 else
3936 /* Use extracts and reduction op for final reduction. For N
3937 elements, we have N extracts and N-1 reduction ops. */
3938 epilogue_cost += record_stmt_cost (cost_vec,
3939 nelements + nelements - 1,
3940 vector_stmt, stmt_info, 0,
3941 vect_epilogue);
3942 }
3943 }
3944
3945 if (dump_enabled_p ())
3946 dump_printf (MSG_NOTE,
3947 "vect_model_reduction_cost: inside_cost = %d, "
3948 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3949 prologue_cost, epilogue_cost);
3950 }
3951
3952
3953 /* Function vect_model_induction_cost.
3954
3955 Models cost for induction operations. */
3956
3957 static void
3958 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
3959 stmt_vector_for_cost *cost_vec)
3960 {
3961 unsigned inside_cost, prologue_cost;
3962
3963 if (PURE_SLP_STMT (stmt_info))
3964 return;
3965
3966 /* loop cost for vec_loop. */
3967 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3968 stmt_info, 0, vect_body);
3969
3970 /* prologue cost for vec_init and vec_step. */
3971 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
3972 stmt_info, 0, vect_prologue);
3973
3974 if (dump_enabled_p ())
3975 dump_printf_loc (MSG_NOTE, vect_location,
3976 "vect_model_induction_cost: inside_cost = %d, "
3977 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3978 }
3979
3980
3981
3982 /* Function get_initial_def_for_reduction
3983
3984 Input:
3985 STMT_VINFO - a stmt that performs a reduction operation in the loop.
3986 INIT_VAL - the initial value of the reduction variable
3987
3988 Output:
3989 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3990 of the reduction (used for adjusting the epilog - see below).
3991 Return a vector variable, initialized according to the operation that
3992 STMT_VINFO performs. This vector will be used as the initial value
3993 of the vector of partial results.
3994
3995 Option1 (adjust in epilog): Initialize the vector as follows:
3996 add/bit or/xor: [0,0,...,0,0]
3997 mult/bit and: [1,1,...,1,1]
3998 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3999 and when necessary (e.g. add/mult case) let the caller know
4000 that it needs to adjust the result by init_val.
4001
4002 Option2: Initialize the vector as follows:
4003 add/bit or/xor: [init_val,0,0,...,0]
4004 mult/bit and: [init_val,1,1,...,1]
4005 min/max/cond_expr: [init_val,init_val,...,init_val]
4006 and no adjustments are needed.
4007
4008 For example, for the following code:
4009
4010 s = init_val;
4011 for (i=0;i<n;i++)
4012 s = s + a[i];
4013
4014 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
4015 For a vector of 4 units, we want to return either [0,0,0,init_val],
4016 or [0,0,0,0] and let the caller know that it needs to adjust
4017 the result at the end by 'init_val'.
4018
4019 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4020 initialization vector is simpler (same element in all entries), if
4021 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4022
4023 A cost model should help decide between these two schemes. */
4024
4025 tree
4026 get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, tree init_val,
4027 tree *adjustment_def)
4028 {
4029 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
4030 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4031 tree scalar_type = TREE_TYPE (init_val);
4032 tree vectype = get_vectype_for_scalar_type (scalar_type);
4033 enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
4034 tree def_for_init;
4035 tree init_def;
4036 REAL_VALUE_TYPE real_init_val = dconst0;
4037 int int_init_val = 0;
4038 gimple_seq stmts = NULL;
4039
4040 gcc_assert (vectype);
4041
4042 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4043 || SCALAR_FLOAT_TYPE_P (scalar_type));
4044
4045 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
4046 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
4047
4048 vect_reduction_type reduction_type
4049 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
4050
4051 switch (code)
4052 {
4053 case WIDEN_SUM_EXPR:
4054 case DOT_PROD_EXPR:
4055 case SAD_EXPR:
4056 case PLUS_EXPR:
4057 case MINUS_EXPR:
4058 case BIT_IOR_EXPR:
4059 case BIT_XOR_EXPR:
4060 case MULT_EXPR:
4061 case BIT_AND_EXPR:
4062 {
4063 /* ADJUSTMENT_DEF is NULL when called from
4064 vect_create_epilog_for_reduction to vectorize double reduction. */
4065 if (adjustment_def)
4066 *adjustment_def = init_val;
4067
4068 if (code == MULT_EXPR)
4069 {
4070 real_init_val = dconst1;
4071 int_init_val = 1;
4072 }
4073
4074 if (code == BIT_AND_EXPR)
4075 int_init_val = -1;
4076
4077 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4078 def_for_init = build_real (scalar_type, real_init_val);
4079 else
4080 def_for_init = build_int_cst (scalar_type, int_init_val);
4081
4082 if (adjustment_def)
4083 /* Option1: the first element is '0' or '1' as well. */
4084 init_def = gimple_build_vector_from_val (&stmts, vectype,
4085 def_for_init);
4086 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4087 {
4088 /* Option2 (variable length): the first element is INIT_VAL. */
4089 init_def = gimple_build_vector_from_val (&stmts, vectype,
4090 def_for_init);
4091 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4092 vectype, init_def, init_val);
4093 }
4094 else
4095 {
4096 /* Option2: the first element is INIT_VAL. */
4097 tree_vector_builder elts (vectype, 1, 2);
4098 elts.quick_push (init_val);
4099 elts.quick_push (def_for_init);
4100 init_def = gimple_build_vector (&stmts, &elts);
4101 }
4102 }
4103 break;
4104
4105 case MIN_EXPR:
4106 case MAX_EXPR:
4107 case COND_EXPR:
4108 {
4109 if (adjustment_def)
4110 {
4111 *adjustment_def = NULL_TREE;
4112 if (reduction_type != COND_REDUCTION
4113 && reduction_type != EXTRACT_LAST_REDUCTION)
4114 {
4115 init_def = vect_get_vec_def_for_operand (init_val, stmt_vinfo);
4116 break;
4117 }
4118 }
4119 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4120 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4121 }
4122 break;
4123
4124 default:
4125 gcc_unreachable ();
4126 }
4127
4128 if (stmts)
4129 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4130 return init_def;
4131 }
4132
4133 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4134 NUMBER_OF_VECTORS is the number of vector defs to create.
4135 If NEUTRAL_OP is nonnull, introducing extra elements of that
4136 value will not change the result. */
4137
4138 static void
4139 get_initial_defs_for_reduction (slp_tree slp_node,
4140 vec<tree> *vec_oprnds,
4141 unsigned int number_of_vectors,
4142 bool reduc_chain, tree neutral_op)
4143 {
4144 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4145 stmt_vec_info stmt_vinfo = stmts[0];
4146 unsigned HOST_WIDE_INT nunits;
4147 unsigned j, number_of_places_left_in_vector;
4148 tree vector_type;
4149 tree vop;
4150 int group_size = stmts.length ();
4151 unsigned int vec_num, i;
4152 unsigned number_of_copies = 1;
4153 vec<tree> voprnds;
4154 voprnds.create (number_of_vectors);
4155 struct loop *loop;
4156 auto_vec<tree, 16> permute_results;
4157
4158 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4159
4160 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4161
4162 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4163 gcc_assert (loop);
4164 edge pe = loop_preheader_edge (loop);
4165
4166 gcc_assert (!reduc_chain || neutral_op);
4167
4168 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4169 created vectors. It is greater than 1 if unrolling is performed.
4170
4171 For example, we have two scalar operands, s1 and s2 (e.g., group of
4172 strided accesses of size two), while NUNITS is four (i.e., four scalars
4173 of this type can be packed in a vector). The output vector will contain
4174 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4175 will be 2).
4176
4177 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4178 vectors containing the operands.
4179
4180 For example, NUNITS is four as before, and the group size is 8
4181 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4182 {s5, s6, s7, s8}. */
4183
4184 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4185 nunits = group_size;
4186
4187 number_of_copies = nunits * number_of_vectors / group_size;
4188
4189 number_of_places_left_in_vector = nunits;
4190 bool constant_p = true;
4191 tree_vector_builder elts (vector_type, nunits, 1);
4192 elts.quick_grow (nunits);
4193 for (j = 0; j < number_of_copies; j++)
4194 {
4195 for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
4196 {
4197 tree op;
4198 /* Get the def before the loop. In reduction chain we have only
4199 one initial value. */
4200 if ((j != (number_of_copies - 1)
4201 || (reduc_chain && i != 0))
4202 && neutral_op)
4203 op = neutral_op;
4204 else
4205 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4206
4207 /* Create 'vect_ = {op0,op1,...,opn}'. */
4208 number_of_places_left_in_vector--;
4209 elts[number_of_places_left_in_vector] = op;
4210 if (!CONSTANT_CLASS_P (op))
4211 constant_p = false;
4212
4213 if (number_of_places_left_in_vector == 0)
4214 {
4215 gimple_seq ctor_seq = NULL;
4216 tree init;
4217 if (constant_p && !neutral_op
4218 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4219 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4220 /* Build the vector directly from ELTS. */
4221 init = gimple_build_vector (&ctor_seq, &elts);
4222 else if (neutral_op)
4223 {
4224 /* Build a vector of the neutral value and shift the
4225 other elements into place. */
4226 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4227 neutral_op);
4228 int k = nunits;
4229 while (k > 0 && elts[k - 1] == neutral_op)
4230 k -= 1;
4231 while (k > 0)
4232 {
4233 k -= 1;
4234 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4235 vector_type, init, elts[k]);
4236 }
4237 }
4238 else
4239 {
4240 /* First time round, duplicate ELTS to fill the
4241 required number of vectors, then cherry pick the
4242 appropriate result for each iteration. */
4243 if (vec_oprnds->is_empty ())
4244 duplicate_and_interleave (&ctor_seq, vector_type, elts,
4245 number_of_vectors,
4246 permute_results);
4247 init = permute_results[number_of_vectors - j - 1];
4248 }
4249 if (ctor_seq != NULL)
4250 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4251 voprnds.quick_push (init);
4252
4253 number_of_places_left_in_vector = nunits;
4254 elts.new_vector (vector_type, nunits, 1);
4255 elts.quick_grow (nunits);
4256 constant_p = true;
4257 }
4258 }
4259 }
4260
4261 /* Since the vectors are created in the reverse order, we should invert
4262 them. */
4263 vec_num = voprnds.length ();
4264 for (j = vec_num; j != 0; j--)
4265 {
4266 vop = voprnds[j - 1];
4267 vec_oprnds->quick_push (vop);
4268 }
4269
4270 voprnds.release ();
4271
4272 /* In case that VF is greater than the unrolling factor needed for the SLP
4273 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4274 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4275 to replicate the vectors. */
4276 tree neutral_vec = NULL;
4277 while (number_of_vectors > vec_oprnds->length ())
4278 {
4279 if (neutral_op)
4280 {
4281 if (!neutral_vec)
4282 {
4283 gimple_seq ctor_seq = NULL;
4284 neutral_vec = gimple_build_vector_from_val
4285 (&ctor_seq, vector_type, neutral_op);
4286 if (ctor_seq != NULL)
4287 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4288 }
4289 vec_oprnds->quick_push (neutral_vec);
4290 }
4291 else
4292 {
4293 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4294 vec_oprnds->quick_push (vop);
4295 }
4296 }
4297 }
4298
4299
4300 /* Function vect_create_epilog_for_reduction
4301
4302 Create code at the loop-epilog to finalize the result of a reduction
4303 computation.
4304
4305 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4306 reduction statements.
4307 STMT_INFO is the scalar reduction stmt that is being vectorized.
4308 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4309 number of elements that we can fit in a vectype (nunits). In this case
4310 we have to generate more than one vector stmt - i.e - we need to "unroll"
4311 the vector stmt by a factor VF/nunits. For more details see documentation
4312 in vectorizable_operation.
4313 REDUC_FN is the internal function for the epilog reduction.
4314 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4315 computation.
4316 REDUC_INDEX is the index of the operand in the right hand side of the
4317 statement that is defined by REDUCTION_PHI.
4318 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4319 SLP_NODE is an SLP node containing a group of reduction statements. The
4320 first one in this group is STMT_INFO.
4321 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4322 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4323 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4324 any value of the IV in the loop.
4325 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4326 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4327 null if this is not an SLP reduction
4328
4329 This function:
4330 1. Creates the reduction def-use cycles: sets the arguments for
4331 REDUCTION_PHIS:
4332 The loop-entry argument is the vectorized initial-value of the reduction.
4333 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4334 sums.
4335 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4336 by calling the function specified by REDUC_FN if available, or by
4337 other means (whole-vector shifts or a scalar loop).
4338 The function also creates a new phi node at the loop exit to preserve
4339 loop-closed form, as illustrated below.
4340
4341 The flow at the entry to this function:
4342
4343 loop:
4344 vec_def = phi <null, null> # REDUCTION_PHI
4345 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4346 s_loop = scalar_stmt # (scalar) STMT_INFO
4347 loop_exit:
4348 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4349 use <s_out0>
4350 use <s_out0>
4351
4352 The above is transformed by this function into:
4353
4354 loop:
4355 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4356 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4357 s_loop = scalar_stmt # (scalar) STMT_INFO
4358 loop_exit:
4359 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4360 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4361 v_out2 = reduce <v_out1>
4362 s_out3 = extract_field <v_out2, 0>
4363 s_out4 = adjust_result <s_out3>
4364 use <s_out4>
4365 use <s_out4>
4366 */
4367
4368 static void
4369 vect_create_epilog_for_reduction (vec<tree> vect_defs,
4370 stmt_vec_info stmt_info,
4371 gimple *reduc_def_stmt,
4372 int ncopies, internal_fn reduc_fn,
4373 vec<stmt_vec_info> reduction_phis,
4374 bool double_reduc,
4375 slp_tree slp_node,
4376 slp_instance slp_node_instance,
4377 tree induc_val, enum tree_code induc_code,
4378 tree neutral_op)
4379 {
4380 stmt_vec_info prev_phi_info;
4381 tree vectype;
4382 machine_mode mode;
4383 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4384 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4385 basic_block exit_bb;
4386 tree scalar_dest;
4387 tree scalar_type;
4388 gimple *new_phi = NULL, *phi;
4389 stmt_vec_info phi_info;
4390 gimple_stmt_iterator exit_gsi;
4391 tree vec_dest;
4392 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4393 gimple *epilog_stmt = NULL;
4394 enum tree_code code = gimple_assign_rhs_code (stmt_info->stmt);
4395 gimple *exit_phi;
4396 tree bitsize;
4397 tree adjustment_def = NULL;
4398 tree vec_initial_def = NULL;
4399 tree expr, def, initial_def = NULL;
4400 tree orig_name, scalar_result;
4401 imm_use_iterator imm_iter, phi_imm_iter;
4402 use_operand_p use_p, phi_use_p;
4403 gimple *use_stmt;
4404 stmt_vec_info reduction_phi_info = NULL;
4405 bool nested_in_vect_loop = false;
4406 auto_vec<gimple *> new_phis;
4407 auto_vec<stmt_vec_info> inner_phis;
4408 int j, i;
4409 auto_vec<tree> scalar_results;
4410 unsigned int group_size = 1, k, ratio;
4411 auto_vec<tree> vec_initial_defs;
4412 auto_vec<gimple *> phis;
4413 bool slp_reduc = false;
4414 bool direct_slp_reduc;
4415 tree new_phi_result;
4416 stmt_vec_info inner_phi = NULL;
4417 tree induction_index = NULL_TREE;
4418
4419 if (slp_node)
4420 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4421
4422 if (nested_in_vect_loop_p (loop, stmt_info))
4423 {
4424 outer_loop = loop;
4425 loop = loop->inner;
4426 nested_in_vect_loop = true;
4427 gcc_assert (!slp_node);
4428 }
4429
4430 vectype = STMT_VINFO_VECTYPE (stmt_info);
4431 gcc_assert (vectype);
4432 mode = TYPE_MODE (vectype);
4433
4434 /* 1. Create the reduction def-use cycle:
4435 Set the arguments of REDUCTION_PHIS, i.e., transform
4436
4437 loop:
4438 vec_def = phi <null, null> # REDUCTION_PHI
4439 VECT_DEF = vector_stmt # vectorized form of STMT
4440 ...
4441
4442 into:
4443
4444 loop:
4445 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4446 VECT_DEF = vector_stmt # vectorized form of STMT
4447 ...
4448
4449 (in case of SLP, do it for all the phis). */
4450
4451 /* Get the loop-entry arguments. */
4452 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4453 if (slp_node)
4454 {
4455 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4456 vec_initial_defs.reserve (vec_num);
4457 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4458 &vec_initial_defs, vec_num,
4459 REDUC_GROUP_FIRST_ELEMENT (stmt_info),
4460 neutral_op);
4461 }
4462 else
4463 {
4464 /* Get at the scalar def before the loop, that defines the initial value
4465 of the reduction variable. */
4466 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4467 loop_preheader_edge (loop));
4468 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4469 and we can't use zero for induc_val, use initial_def. Similarly
4470 for REDUC_MIN and initial_def larger than the base. */
4471 if (TREE_CODE (initial_def) == INTEGER_CST
4472 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4473 == INTEGER_INDUC_COND_REDUCTION)
4474 && !integer_zerop (induc_val)
4475 && ((induc_code == MAX_EXPR
4476 && tree_int_cst_lt (initial_def, induc_val))
4477 || (induc_code == MIN_EXPR
4478 && tree_int_cst_lt (induc_val, initial_def))))
4479 induc_val = initial_def;
4480
4481 if (double_reduc)
4482 /* In case of double reduction we only create a vector variable
4483 to be put in the reduction phi node. The actual statement
4484 creation is done later in this function. */
4485 vec_initial_def = vect_create_destination_var (initial_def, vectype);
4486 else if (nested_in_vect_loop)
4487 {
4488 /* Do not use an adjustment def as that case is not supported
4489 correctly if ncopies is not one. */
4490 vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
4491 vec_initial_def = vect_get_vec_def_for_operand (initial_def,
4492 stmt_info);
4493 }
4494 else
4495 vec_initial_def
4496 = get_initial_def_for_reduction (stmt_info, initial_def,
4497 &adjustment_def);
4498 vec_initial_defs.create (1);
4499 vec_initial_defs.quick_push (vec_initial_def);
4500 }
4501
4502 /* Set phi nodes arguments. */
4503 FOR_EACH_VEC_ELT (reduction_phis, i, phi_info)
4504 {
4505 tree vec_init_def = vec_initial_defs[i];
4506 tree def = vect_defs[i];
4507 for (j = 0; j < ncopies; j++)
4508 {
4509 if (j != 0)
4510 {
4511 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4512 if (nested_in_vect_loop)
4513 vec_init_def
4514 = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_init_def);
4515 }
4516
4517 /* Set the loop-entry arg of the reduction-phi. */
4518
4519 gphi *phi = as_a <gphi *> (phi_info->stmt);
4520 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4521 == INTEGER_INDUC_COND_REDUCTION)
4522 {
4523 /* Initialise the reduction phi to zero. This prevents initial
4524 values of non-zero interferring with the reduction op. */
4525 gcc_assert (ncopies == 1);
4526 gcc_assert (i == 0);
4527
4528 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4529 tree induc_val_vec
4530 = build_vector_from_val (vec_init_def_type, induc_val);
4531
4532 add_phi_arg (phi, induc_val_vec, loop_preheader_edge (loop),
4533 UNKNOWN_LOCATION);
4534 }
4535 else
4536 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
4537 UNKNOWN_LOCATION);
4538
4539 /* Set the loop-latch arg for the reduction-phi. */
4540 if (j > 0)
4541 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4542
4543 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
4544
4545 if (dump_enabled_p ())
4546 {
4547 dump_printf_loc (MSG_NOTE, vect_location,
4548 "transform reduction: created def-use cycle: ");
4549 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4550 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4551 }
4552 }
4553 }
4554
4555 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4556 which is updated with the current index of the loop for every match of
4557 the original loop's cond_expr (VEC_STMT). This results in a vector
4558 containing the last time the condition passed for that vector lane.
4559 The first match will be a 1 to allow 0 to be used for non-matching
4560 indexes. If there are no matches at all then the vector will be all
4561 zeroes. */
4562 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4563 {
4564 tree indx_before_incr, indx_after_incr;
4565 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4566
4567 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
4568 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4569
4570 int scalar_precision
4571 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4572 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4573 tree cr_index_vector_type = build_vector_type
4574 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4575
4576 /* First we create a simple vector induction variable which starts
4577 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4578 vector size (STEP). */
4579
4580 /* Create a {1,2,3,...} vector. */
4581 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4582
4583 /* Create a vector of the step value. */
4584 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4585 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4586
4587 /* Create an induction variable. */
4588 gimple_stmt_iterator incr_gsi;
4589 bool insert_after;
4590 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4591 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4592 insert_after, &indx_before_incr, &indx_after_incr);
4593
4594 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4595 filled with zeros (VEC_ZERO). */
4596
4597 /* Create a vector of 0s. */
4598 tree zero = build_zero_cst (cr_index_scalar_type);
4599 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4600
4601 /* Create a vector phi node. */
4602 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4603 new_phi = create_phi_node (new_phi_tree, loop->header);
4604 loop_vinfo->add_stmt (new_phi);
4605 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4606 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4607
4608 /* Now take the condition from the loops original cond_expr
4609 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4610 every match uses values from the induction variable
4611 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4612 (NEW_PHI_TREE).
4613 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4614 the new cond_expr (INDEX_COND_EXPR). */
4615
4616 /* Duplicate the condition from vec_stmt. */
4617 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4618
4619 /* Create a conditional, where the condition is taken from vec_stmt
4620 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4621 else is the phi (NEW_PHI_TREE). */
4622 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4623 ccompare, indx_before_incr,
4624 new_phi_tree);
4625 induction_index = make_ssa_name (cr_index_vector_type);
4626 gimple *index_condition = gimple_build_assign (induction_index,
4627 index_cond_expr);
4628 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4629 stmt_vec_info index_vec_info = loop_vinfo->add_stmt (index_condition);
4630 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4631
4632 /* Update the phi with the vec cond. */
4633 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4634 loop_latch_edge (loop), UNKNOWN_LOCATION);
4635 }
4636
4637 /* 2. Create epilog code.
4638 The reduction epilog code operates across the elements of the vector
4639 of partial results computed by the vectorized loop.
4640 The reduction epilog code consists of:
4641
4642 step 1: compute the scalar result in a vector (v_out2)
4643 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4644 step 3: adjust the scalar result (s_out3) if needed.
4645
4646 Step 1 can be accomplished using one the following three schemes:
4647 (scheme 1) using reduc_fn, if available.
4648 (scheme 2) using whole-vector shifts, if available.
4649 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4650 combined.
4651
4652 The overall epilog code looks like this:
4653
4654 s_out0 = phi <s_loop> # original EXIT_PHI
4655 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4656 v_out2 = reduce <v_out1> # step 1
4657 s_out3 = extract_field <v_out2, 0> # step 2
4658 s_out4 = adjust_result <s_out3> # step 3
4659
4660 (step 3 is optional, and steps 1 and 2 may be combined).
4661 Lastly, the uses of s_out0 are replaced by s_out4. */
4662
4663
4664 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4665 v_out1 = phi <VECT_DEF>
4666 Store them in NEW_PHIS. */
4667
4668 exit_bb = single_exit (loop)->dest;
4669 prev_phi_info = NULL;
4670 new_phis.create (vect_defs.length ());
4671 FOR_EACH_VEC_ELT (vect_defs, i, def)
4672 {
4673 for (j = 0; j < ncopies; j++)
4674 {
4675 tree new_def = copy_ssa_name (def);
4676 phi = create_phi_node (new_def, exit_bb);
4677 stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
4678 if (j == 0)
4679 new_phis.quick_push (phi);
4680 else
4681 {
4682 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4683 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
4684 }
4685
4686 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4687 prev_phi_info = phi_info;
4688 }
4689 }
4690
4691 /* The epilogue is created for the outer-loop, i.e., for the loop being
4692 vectorized. Create exit phis for the outer loop. */
4693 if (double_reduc)
4694 {
4695 loop = outer_loop;
4696 exit_bb = single_exit (loop)->dest;
4697 inner_phis.create (vect_defs.length ());
4698 FOR_EACH_VEC_ELT (new_phis, i, phi)
4699 {
4700 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
4701 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4702 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4703 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4704 PHI_RESULT (phi));
4705 prev_phi_info = loop_vinfo->add_stmt (outer_phi);
4706 inner_phis.quick_push (phi_info);
4707 new_phis[i] = outer_phi;
4708 while (STMT_VINFO_RELATED_STMT (phi_info))
4709 {
4710 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4711 new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
4712 outer_phi = create_phi_node (new_result, exit_bb);
4713 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4714 PHI_RESULT (phi_info->stmt));
4715 stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
4716 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
4717 prev_phi_info = outer_phi_info;
4718 }
4719 }
4720 }
4721
4722 exit_gsi = gsi_after_labels (exit_bb);
4723
4724 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4725 (i.e. when reduc_fn is not available) and in the final adjustment
4726 code (if needed). Also get the original scalar reduction variable as
4727 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4728 represents a reduction pattern), the tree-code and scalar-def are
4729 taken from the original stmt that the pattern-stmt (STMT) replaces.
4730 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4731 are taken from STMT. */
4732
4733 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
4734 if (orig_stmt_info != stmt_info)
4735 {
4736 /* Reduction pattern */
4737 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4738 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4739 }
4740
4741 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4742 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4743 partial results are added and not subtracted. */
4744 if (code == MINUS_EXPR)
4745 code = PLUS_EXPR;
4746
4747 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
4748 scalar_type = TREE_TYPE (scalar_dest);
4749 scalar_results.create (group_size);
4750 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4751 bitsize = TYPE_SIZE (scalar_type);
4752
4753 /* In case this is a reduction in an inner-loop while vectorizing an outer
4754 loop - we don't need to extract a single scalar result at the end of the
4755 inner-loop (unless it is double reduction, i.e., the use of reduction is
4756 outside the outer-loop). The final vector of partial results will be used
4757 in the vectorized outer-loop, or reduced to a scalar result at the end of
4758 the outer-loop. */
4759 if (nested_in_vect_loop && !double_reduc)
4760 goto vect_finalize_reduction;
4761
4762 /* SLP reduction without reduction chain, e.g.,
4763 # a1 = phi <a2, a0>
4764 # b1 = phi <b2, b0>
4765 a2 = operation (a1)
4766 b2 = operation (b1) */
4767 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
4768
4769 /* True if we should implement SLP_REDUC using native reduction operations
4770 instead of scalar operations. */
4771 direct_slp_reduc = (reduc_fn != IFN_LAST
4772 && slp_reduc
4773 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
4774
4775 /* In case of reduction chain, e.g.,
4776 # a1 = phi <a3, a0>
4777 a2 = operation (a1)
4778 a3 = operation (a2),
4779
4780 we may end up with more than one vector result. Here we reduce them to
4781 one vector. */
4782 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
4783 {
4784 tree first_vect = PHI_RESULT (new_phis[0]);
4785 gassign *new_vec_stmt = NULL;
4786 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4787 for (k = 1; k < new_phis.length (); k++)
4788 {
4789 gimple *next_phi = new_phis[k];
4790 tree second_vect = PHI_RESULT (next_phi);
4791 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4792 new_vec_stmt = gimple_build_assign (tem, code,
4793 first_vect, second_vect);
4794 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4795 first_vect = tem;
4796 }
4797
4798 new_phi_result = first_vect;
4799 if (new_vec_stmt)
4800 {
4801 new_phis.truncate (0);
4802 new_phis.safe_push (new_vec_stmt);
4803 }
4804 }
4805 /* Likewise if we couldn't use a single defuse cycle. */
4806 else if (ncopies > 1)
4807 {
4808 gcc_assert (new_phis.length () == 1);
4809 tree first_vect = PHI_RESULT (new_phis[0]);
4810 gassign *new_vec_stmt = NULL;
4811 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4812 stmt_vec_info next_phi_info = loop_vinfo->lookup_stmt (new_phis[0]);
4813 for (int k = 1; k < ncopies; ++k)
4814 {
4815 next_phi_info = STMT_VINFO_RELATED_STMT (next_phi_info);
4816 tree second_vect = PHI_RESULT (next_phi_info->stmt);
4817 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4818 new_vec_stmt = gimple_build_assign (tem, code,
4819 first_vect, second_vect);
4820 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4821 first_vect = tem;
4822 }
4823 new_phi_result = first_vect;
4824 new_phis.truncate (0);
4825 new_phis.safe_push (new_vec_stmt);
4826 }
4827 else
4828 new_phi_result = PHI_RESULT (new_phis[0]);
4829
4830 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4831 && reduc_fn != IFN_LAST)
4832 {
4833 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4834 various data values where the condition matched and another vector
4835 (INDUCTION_INDEX) containing all the indexes of those matches. We
4836 need to extract the last matching index (which will be the index with
4837 highest value) and use this to index into the data vector.
4838 For the case where there were no matches, the data vector will contain
4839 all default values and the index vector will be all zeros. */
4840
4841 /* Get various versions of the type of the vector of indexes. */
4842 tree index_vec_type = TREE_TYPE (induction_index);
4843 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4844 tree index_scalar_type = TREE_TYPE (index_vec_type);
4845 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4846 (index_vec_type);
4847
4848 /* Get an unsigned integer version of the type of the data vector. */
4849 int scalar_precision
4850 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
4851 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4852 tree vectype_unsigned = build_vector_type
4853 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4854
4855 /* First we need to create a vector (ZERO_VEC) of zeros and another
4856 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4857 can create using a MAX reduction and then expanding.
4858 In the case where the loop never made any matches, the max index will
4859 be zero. */
4860
4861 /* Vector of {0, 0, 0,...}. */
4862 tree zero_vec = make_ssa_name (vectype);
4863 tree zero_vec_rhs = build_zero_cst (vectype);
4864 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4865 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4866
4867 /* Find maximum value from the vector of found indexes. */
4868 tree max_index = make_ssa_name (index_scalar_type);
4869 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4870 1, induction_index);
4871 gimple_call_set_lhs (max_index_stmt, max_index);
4872 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4873
4874 /* Vector of {max_index, max_index, max_index,...}. */
4875 tree max_index_vec = make_ssa_name (index_vec_type);
4876 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4877 max_index);
4878 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4879 max_index_vec_rhs);
4880 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4881
4882 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4883 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4884 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4885 otherwise. Only one value should match, resulting in a vector
4886 (VEC_COND) with one data value and the rest zeros.
4887 In the case where the loop never made any matches, every index will
4888 match, resulting in a vector with all data values (which will all be
4889 the default value). */
4890
4891 /* Compare the max index vector to the vector of found indexes to find
4892 the position of the max value. */
4893 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4894 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4895 induction_index,
4896 max_index_vec);
4897 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4898
4899 /* Use the compare to choose either values from the data vector or
4900 zero. */
4901 tree vec_cond = make_ssa_name (vectype);
4902 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4903 vec_compare, new_phi_result,
4904 zero_vec);
4905 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4906
4907 /* Finally we need to extract the data value from the vector (VEC_COND)
4908 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4909 reduction, but because this doesn't exist, we can use a MAX reduction
4910 instead. The data value might be signed or a float so we need to cast
4911 it first.
4912 In the case where the loop never made any matches, the data values are
4913 all identical, and so will reduce down correctly. */
4914
4915 /* Make the matched data values unsigned. */
4916 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4917 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4918 vec_cond);
4919 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4920 VIEW_CONVERT_EXPR,
4921 vec_cond_cast_rhs);
4922 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4923
4924 /* Reduce down to a scalar value. */
4925 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4926 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4927 1, vec_cond_cast);
4928 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
4929 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4930
4931 /* Convert the reduced value back to the result type and set as the
4932 result. */
4933 gimple_seq stmts = NULL;
4934 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4935 data_reduc);
4936 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4937 scalar_results.safe_push (new_temp);
4938 }
4939 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4940 && reduc_fn == IFN_LAST)
4941 {
4942 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4943 idx = 0;
4944 idx_val = induction_index[0];
4945 val = data_reduc[0];
4946 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4947 if (induction_index[i] > idx_val)
4948 val = data_reduc[i], idx_val = induction_index[i];
4949 return val; */
4950
4951 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4952 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4953 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4954 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4955 /* Enforced by vectorizable_reduction, which ensures we have target
4956 support before allowing a conditional reduction on variable-length
4957 vectors. */
4958 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
4959 tree idx_val = NULL_TREE, val = NULL_TREE;
4960 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
4961 {
4962 tree old_idx_val = idx_val;
4963 tree old_val = val;
4964 idx_val = make_ssa_name (idx_eltype);
4965 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
4966 build3 (BIT_FIELD_REF, idx_eltype,
4967 induction_index,
4968 bitsize_int (el_size),
4969 bitsize_int (off)));
4970 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4971 val = make_ssa_name (data_eltype);
4972 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
4973 build3 (BIT_FIELD_REF,
4974 data_eltype,
4975 new_phi_result,
4976 bitsize_int (el_size),
4977 bitsize_int (off)));
4978 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4979 if (off != 0)
4980 {
4981 tree new_idx_val = idx_val;
4982 tree new_val = val;
4983 if (off != v_size - el_size)
4984 {
4985 new_idx_val = make_ssa_name (idx_eltype);
4986 epilog_stmt = gimple_build_assign (new_idx_val,
4987 MAX_EXPR, idx_val,
4988 old_idx_val);
4989 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4990 }
4991 new_val = make_ssa_name (data_eltype);
4992 epilog_stmt = gimple_build_assign (new_val,
4993 COND_EXPR,
4994 build2 (GT_EXPR,
4995 boolean_type_node,
4996 idx_val,
4997 old_idx_val),
4998 val, old_val);
4999 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5000 idx_val = new_idx_val;
5001 val = new_val;
5002 }
5003 }
5004 /* Convert the reduced value back to the result type and set as the
5005 result. */
5006 gimple_seq stmts = NULL;
5007 val = gimple_convert (&stmts, scalar_type, val);
5008 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5009 scalar_results.safe_push (val);
5010 }
5011
5012 /* 2.3 Create the reduction code, using one of the three schemes described
5013 above. In SLP we simply need to extract all the elements from the
5014 vector (without reducing them), so we use scalar shifts. */
5015 else if (reduc_fn != IFN_LAST && !slp_reduc)
5016 {
5017 tree tmp;
5018 tree vec_elem_type;
5019
5020 /* Case 1: Create:
5021 v_out2 = reduc_expr <v_out1> */
5022
5023 if (dump_enabled_p ())
5024 dump_printf_loc (MSG_NOTE, vect_location,
5025 "Reduce using direct vector reduction.\n");
5026
5027 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5028 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
5029 {
5030 tree tmp_dest
5031 = vect_create_destination_var (scalar_dest, vec_elem_type);
5032 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5033 new_phi_result);
5034 gimple_set_lhs (epilog_stmt, tmp_dest);
5035 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
5036 gimple_set_lhs (epilog_stmt, new_temp);
5037 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5038
5039 epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
5040 new_temp);
5041 }
5042 else
5043 {
5044 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5045 new_phi_result);
5046 gimple_set_lhs (epilog_stmt, new_scalar_dest);
5047 }
5048
5049 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5050 gimple_set_lhs (epilog_stmt, new_temp);
5051 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5052
5053 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5054 == INTEGER_INDUC_COND_REDUCTION)
5055 && !operand_equal_p (initial_def, induc_val, 0))
5056 {
5057 /* Earlier we set the initial value to be a vector if induc_val
5058 values. Check the result and if it is induc_val then replace
5059 with the original initial value, unless induc_val is
5060 the same as initial_def already. */
5061 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5062 induc_val);
5063
5064 tmp = make_ssa_name (new_scalar_dest);
5065 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5066 initial_def, new_temp);
5067 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5068 new_temp = tmp;
5069 }
5070
5071 scalar_results.safe_push (new_temp);
5072 }
5073 else if (direct_slp_reduc)
5074 {
5075 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5076 with the elements for other SLP statements replaced with the
5077 neutral value. We can then do a normal reduction on each vector. */
5078
5079 /* Enforced by vectorizable_reduction. */
5080 gcc_assert (new_phis.length () == 1);
5081 gcc_assert (pow2p_hwi (group_size));
5082
5083 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5084 vec<stmt_vec_info> orig_phis
5085 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5086 gimple_seq seq = NULL;
5087
5088 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5089 and the same element size as VECTYPE. */
5090 tree index = build_index_vector (vectype, 0, 1);
5091 tree index_type = TREE_TYPE (index);
5092 tree index_elt_type = TREE_TYPE (index_type);
5093 tree mask_type = build_same_sized_truth_vector_type (index_type);
5094
5095 /* Create a vector that, for each element, identifies which of
5096 the REDUC_GROUP_SIZE results should use it. */
5097 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5098 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5099 build_vector_from_val (index_type, index_mask));
5100
5101 /* Get a neutral vector value. This is simply a splat of the neutral
5102 scalar value if we have one, otherwise the initial scalar value
5103 is itself a neutral value. */
5104 tree vector_identity = NULL_TREE;
5105 if (neutral_op)
5106 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5107 neutral_op);
5108 for (unsigned int i = 0; i < group_size; ++i)
5109 {
5110 /* If there's no univeral neutral value, we can use the
5111 initial scalar value from the original PHI. This is used
5112 for MIN and MAX reduction, for example. */
5113 if (!neutral_op)
5114 {
5115 tree scalar_value
5116 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5117 loop_preheader_edge (loop));
5118 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5119 scalar_value);
5120 }
5121
5122 /* Calculate the equivalent of:
5123
5124 sel[j] = (index[j] == i);
5125
5126 which selects the elements of NEW_PHI_RESULT that should
5127 be included in the result. */
5128 tree compare_val = build_int_cst (index_elt_type, i);
5129 compare_val = build_vector_from_val (index_type, compare_val);
5130 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5131 index, compare_val);
5132
5133 /* Calculate the equivalent of:
5134
5135 vec = seq ? new_phi_result : vector_identity;
5136
5137 VEC is now suitable for a full vector reduction. */
5138 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5139 sel, new_phi_result, vector_identity);
5140
5141 /* Do the reduction and convert it to the appropriate type. */
5142 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5143 TREE_TYPE (vectype), vec);
5144 scalar = gimple_convert (&seq, scalar_type, scalar);
5145 scalar_results.safe_push (scalar);
5146 }
5147 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5148 }
5149 else
5150 {
5151 bool reduce_with_shift;
5152 tree vec_temp;
5153
5154 /* COND reductions all do the final reduction with MAX_EXPR
5155 or MIN_EXPR. */
5156 if (code == COND_EXPR)
5157 {
5158 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5159 == INTEGER_INDUC_COND_REDUCTION)
5160 code = induc_code;
5161 else
5162 code = MAX_EXPR;
5163 }
5164
5165 /* See if the target wants to do the final (shift) reduction
5166 in a vector mode of smaller size and first reduce upper/lower
5167 halves against each other. */
5168 enum machine_mode mode1 = mode;
5169 tree vectype1 = vectype;
5170 unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
5171 unsigned sz1 = sz;
5172 if (!slp_reduc
5173 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5174 sz1 = GET_MODE_SIZE (mode1).to_constant ();
5175
5176 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
5177 reduce_with_shift = have_whole_vector_shift (mode1);
5178 if (!VECTOR_MODE_P (mode1))
5179 reduce_with_shift = false;
5180 else
5181 {
5182 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5183 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5184 reduce_with_shift = false;
5185 }
5186
5187 /* First reduce the vector to the desired vector size we should
5188 do shift reduction on by combining upper and lower halves. */
5189 new_temp = new_phi_result;
5190 while (sz > sz1)
5191 {
5192 gcc_assert (!slp_reduc);
5193 sz /= 2;
5194 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
5195
5196 /* The target has to make sure we support lowpart/highpart
5197 extraction, either via direct vector extract or through
5198 an integer mode punning. */
5199 tree dst1, dst2;
5200 if (convert_optab_handler (vec_extract_optab,
5201 TYPE_MODE (TREE_TYPE (new_temp)),
5202 TYPE_MODE (vectype1))
5203 != CODE_FOR_nothing)
5204 {
5205 /* Extract sub-vectors directly once vec_extract becomes
5206 a conversion optab. */
5207 dst1 = make_ssa_name (vectype1);
5208 epilog_stmt
5209 = gimple_build_assign (dst1, BIT_FIELD_REF,
5210 build3 (BIT_FIELD_REF, vectype1,
5211 new_temp, TYPE_SIZE (vectype1),
5212 bitsize_int (0)));
5213 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5214 dst2 = make_ssa_name (vectype1);
5215 epilog_stmt
5216 = gimple_build_assign (dst2, BIT_FIELD_REF,
5217 build3 (BIT_FIELD_REF, vectype1,
5218 new_temp, TYPE_SIZE (vectype1),
5219 bitsize_int (sz * BITS_PER_UNIT)));
5220 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5221 }
5222 else
5223 {
5224 /* Extract via punning to appropriately sized integer mode
5225 vector. */
5226 tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
5227 1);
5228 tree etype = build_vector_type (eltype, 2);
5229 gcc_assert (convert_optab_handler (vec_extract_optab,
5230 TYPE_MODE (etype),
5231 TYPE_MODE (eltype))
5232 != CODE_FOR_nothing);
5233 tree tem = make_ssa_name (etype);
5234 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5235 build1 (VIEW_CONVERT_EXPR,
5236 etype, new_temp));
5237 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5238 new_temp = tem;
5239 tem = make_ssa_name (eltype);
5240 epilog_stmt
5241 = gimple_build_assign (tem, BIT_FIELD_REF,
5242 build3 (BIT_FIELD_REF, eltype,
5243 new_temp, TYPE_SIZE (eltype),
5244 bitsize_int (0)));
5245 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5246 dst1 = make_ssa_name (vectype1);
5247 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5248 build1 (VIEW_CONVERT_EXPR,
5249 vectype1, tem));
5250 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5251 tem = make_ssa_name (eltype);
5252 epilog_stmt
5253 = gimple_build_assign (tem, BIT_FIELD_REF,
5254 build3 (BIT_FIELD_REF, eltype,
5255 new_temp, TYPE_SIZE (eltype),
5256 bitsize_int (sz * BITS_PER_UNIT)));
5257 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5258 dst2 = make_ssa_name (vectype1);
5259 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5260 build1 (VIEW_CONVERT_EXPR,
5261 vectype1, tem));
5262 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5263 }
5264
5265 new_temp = make_ssa_name (vectype1);
5266 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5267 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5268 }
5269
5270 if (reduce_with_shift && !slp_reduc)
5271 {
5272 int element_bitsize = tree_to_uhwi (bitsize);
5273 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5274 for variable-length vectors and also requires direct target support
5275 for loop reductions. */
5276 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5277 int nelements = vec_size_in_bits / element_bitsize;
5278 vec_perm_builder sel;
5279 vec_perm_indices indices;
5280
5281 int elt_offset;
5282
5283 tree zero_vec = build_zero_cst (vectype1);
5284 /* Case 2: Create:
5285 for (offset = nelements/2; offset >= 1; offset/=2)
5286 {
5287 Create: va' = vec_shift <va, offset>
5288 Create: va = vop <va, va'>
5289 } */
5290
5291 tree rhs;
5292
5293 if (dump_enabled_p ())
5294 dump_printf_loc (MSG_NOTE, vect_location,
5295 "Reduce using vector shifts\n");
5296
5297 mode1 = TYPE_MODE (vectype1);
5298 vec_dest = vect_create_destination_var (scalar_dest, vectype1);
5299 for (elt_offset = nelements / 2;
5300 elt_offset >= 1;
5301 elt_offset /= 2)
5302 {
5303 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5304 indices.new_vector (sel, 2, nelements);
5305 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5306 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5307 new_temp, zero_vec, mask);
5308 new_name = make_ssa_name (vec_dest, epilog_stmt);
5309 gimple_assign_set_lhs (epilog_stmt, new_name);
5310 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5311
5312 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5313 new_temp);
5314 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5315 gimple_assign_set_lhs (epilog_stmt, new_temp);
5316 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5317 }
5318
5319 /* 2.4 Extract the final scalar result. Create:
5320 s_out3 = extract_field <v_out2, bitpos> */
5321
5322 if (dump_enabled_p ())
5323 dump_printf_loc (MSG_NOTE, vect_location,
5324 "extract scalar result\n");
5325
5326 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5327 bitsize, bitsize_zero_node);
5328 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5329 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5330 gimple_assign_set_lhs (epilog_stmt, new_temp);
5331 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5332 scalar_results.safe_push (new_temp);
5333 }
5334 else
5335 {
5336 /* Case 3: Create:
5337 s = extract_field <v_out2, 0>
5338 for (offset = element_size;
5339 offset < vector_size;
5340 offset += element_size;)
5341 {
5342 Create: s' = extract_field <v_out2, offset>
5343 Create: s = op <s, s'> // For non SLP cases
5344 } */
5345
5346 if (dump_enabled_p ())
5347 dump_printf_loc (MSG_NOTE, vect_location,
5348 "Reduce using scalar code.\n");
5349
5350 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5351 int element_bitsize = tree_to_uhwi (bitsize);
5352 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5353 {
5354 int bit_offset;
5355 if (gimple_code (new_phi) == GIMPLE_PHI)
5356 vec_temp = PHI_RESULT (new_phi);
5357 else
5358 vec_temp = gimple_assign_lhs (new_phi);
5359 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5360 bitsize_zero_node);
5361 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5362 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5363 gimple_assign_set_lhs (epilog_stmt, new_temp);
5364 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5365
5366 /* In SLP we don't need to apply reduction operation, so we just
5367 collect s' values in SCALAR_RESULTS. */
5368 if (slp_reduc)
5369 scalar_results.safe_push (new_temp);
5370
5371 for (bit_offset = element_bitsize;
5372 bit_offset < vec_size_in_bits;
5373 bit_offset += element_bitsize)
5374 {
5375 tree bitpos = bitsize_int (bit_offset);
5376 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5377 bitsize, bitpos);
5378
5379 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5380 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5381 gimple_assign_set_lhs (epilog_stmt, new_name);
5382 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5383
5384 if (slp_reduc)
5385 {
5386 /* In SLP we don't need to apply reduction operation, so
5387 we just collect s' values in SCALAR_RESULTS. */
5388 new_temp = new_name;
5389 scalar_results.safe_push (new_name);
5390 }
5391 else
5392 {
5393 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5394 new_name, new_temp);
5395 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5396 gimple_assign_set_lhs (epilog_stmt, new_temp);
5397 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5398 }
5399 }
5400 }
5401
5402 /* The only case where we need to reduce scalar results in SLP, is
5403 unrolling. If the size of SCALAR_RESULTS is greater than
5404 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5405 REDUC_GROUP_SIZE. */
5406 if (slp_reduc)
5407 {
5408 tree res, first_res, new_res;
5409 gimple *new_stmt;
5410
5411 /* Reduce multiple scalar results in case of SLP unrolling. */
5412 for (j = group_size; scalar_results.iterate (j, &res);
5413 j++)
5414 {
5415 first_res = scalar_results[j % group_size];
5416 new_stmt = gimple_build_assign (new_scalar_dest, code,
5417 first_res, res);
5418 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5419 gimple_assign_set_lhs (new_stmt, new_res);
5420 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5421 scalar_results[j % group_size] = new_res;
5422 }
5423 }
5424 else
5425 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5426 scalar_results.safe_push (new_temp);
5427 }
5428
5429 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5430 == INTEGER_INDUC_COND_REDUCTION)
5431 && !operand_equal_p (initial_def, induc_val, 0))
5432 {
5433 /* Earlier we set the initial value to be a vector if induc_val
5434 values. Check the result and if it is induc_val then replace
5435 with the original initial value, unless induc_val is
5436 the same as initial_def already. */
5437 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5438 induc_val);
5439
5440 tree tmp = make_ssa_name (new_scalar_dest);
5441 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5442 initial_def, new_temp);
5443 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5444 scalar_results[0] = tmp;
5445 }
5446 }
5447
5448 vect_finalize_reduction:
5449
5450 if (double_reduc)
5451 loop = loop->inner;
5452
5453 /* 2.5 Adjust the final result by the initial value of the reduction
5454 variable. (When such adjustment is not needed, then
5455 'adjustment_def' is zero). For example, if code is PLUS we create:
5456 new_temp = loop_exit_def + adjustment_def */
5457
5458 if (adjustment_def)
5459 {
5460 gcc_assert (!slp_reduc);
5461 if (nested_in_vect_loop)
5462 {
5463 new_phi = new_phis[0];
5464 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5465 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5466 new_dest = vect_create_destination_var (scalar_dest, vectype);
5467 }
5468 else
5469 {
5470 new_temp = scalar_results[0];
5471 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5472 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5473 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5474 }
5475
5476 epilog_stmt = gimple_build_assign (new_dest, expr);
5477 new_temp = make_ssa_name (new_dest, epilog_stmt);
5478 gimple_assign_set_lhs (epilog_stmt, new_temp);
5479 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5480 if (nested_in_vect_loop)
5481 {
5482 stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
5483 STMT_VINFO_RELATED_STMT (epilog_stmt_info)
5484 = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
5485
5486 if (!double_reduc)
5487 scalar_results.quick_push (new_temp);
5488 else
5489 scalar_results[0] = new_temp;
5490 }
5491 else
5492 scalar_results[0] = new_temp;
5493
5494 new_phis[0] = epilog_stmt;
5495 }
5496
5497 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5498 phis with new adjusted scalar results, i.e., replace use <s_out0>
5499 with use <s_out4>.
5500
5501 Transform:
5502 loop_exit:
5503 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5504 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5505 v_out2 = reduce <v_out1>
5506 s_out3 = extract_field <v_out2, 0>
5507 s_out4 = adjust_result <s_out3>
5508 use <s_out0>
5509 use <s_out0>
5510
5511 into:
5512
5513 loop_exit:
5514 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5515 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5516 v_out2 = reduce <v_out1>
5517 s_out3 = extract_field <v_out2, 0>
5518 s_out4 = adjust_result <s_out3>
5519 use <s_out4>
5520 use <s_out4> */
5521
5522
5523 /* In SLP reduction chain we reduce vector results into one vector if
5524 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5525 LHS of the last stmt in the reduction chain, since we are looking for
5526 the loop exit phi node. */
5527 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5528 {
5529 stmt_vec_info dest_stmt_info
5530 = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
5531 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5532 group_size = 1;
5533 }
5534
5535 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5536 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5537 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5538 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5539 correspond to the first vector stmt, etc.
5540 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5541 if (group_size > new_phis.length ())
5542 {
5543 ratio = group_size / new_phis.length ();
5544 gcc_assert (!(group_size % new_phis.length ()));
5545 }
5546 else
5547 ratio = 1;
5548
5549 stmt_vec_info epilog_stmt_info = NULL;
5550 for (k = 0; k < group_size; k++)
5551 {
5552 if (k % ratio == 0)
5553 {
5554 epilog_stmt_info = loop_vinfo->lookup_stmt (new_phis[k / ratio]);
5555 reduction_phi_info = reduction_phis[k / ratio];
5556 if (double_reduc)
5557 inner_phi = inner_phis[k / ratio];
5558 }
5559
5560 if (slp_reduc)
5561 {
5562 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5563
5564 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5565 /* SLP statements can't participate in patterns. */
5566 gcc_assert (!orig_stmt_info);
5567 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5568 }
5569
5570 phis.create (3);
5571 /* Find the loop-closed-use at the loop exit of the original scalar
5572 result. (The reduction result is expected to have two immediate uses -
5573 one at the latch block, and one at the loop exit). */
5574 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5575 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5576 && !is_gimple_debug (USE_STMT (use_p)))
5577 phis.safe_push (USE_STMT (use_p));
5578
5579 /* While we expect to have found an exit_phi because of loop-closed-ssa
5580 form we can end up without one if the scalar cycle is dead. */
5581
5582 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5583 {
5584 if (outer_loop)
5585 {
5586 stmt_vec_info exit_phi_vinfo
5587 = loop_vinfo->lookup_stmt (exit_phi);
5588 gphi *vect_phi;
5589
5590 /* FORNOW. Currently not supporting the case that an inner-loop
5591 reduction is not used in the outer-loop (but only outside the
5592 outer-loop), unless it is double reduction. */
5593 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5594 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5595 || double_reduc);
5596
5597 if (double_reduc)
5598 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5599 else
5600 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt_info;
5601 if (!double_reduc
5602 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5603 != vect_double_reduction_def)
5604 continue;
5605
5606 /* Handle double reduction:
5607
5608 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5609 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5610 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5611 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5612
5613 At that point the regular reduction (stmt2 and stmt3) is
5614 already vectorized, as well as the exit phi node, stmt4.
5615 Here we vectorize the phi node of double reduction, stmt1, and
5616 update all relevant statements. */
5617
5618 /* Go through all the uses of s2 to find double reduction phi
5619 node, i.e., stmt1 above. */
5620 orig_name = PHI_RESULT (exit_phi);
5621 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5622 {
5623 stmt_vec_info use_stmt_vinfo;
5624 tree vect_phi_init, preheader_arg, vect_phi_res;
5625 basic_block bb = gimple_bb (use_stmt);
5626
5627 /* Check that USE_STMT is really double reduction phi
5628 node. */
5629 if (gimple_code (use_stmt) != GIMPLE_PHI
5630 || gimple_phi_num_args (use_stmt) != 2
5631 || bb->loop_father != outer_loop)
5632 continue;
5633 use_stmt_vinfo = loop_vinfo->lookup_stmt (use_stmt);
5634 if (!use_stmt_vinfo
5635 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5636 != vect_double_reduction_def)
5637 continue;
5638
5639 /* Create vector phi node for double reduction:
5640 vs1 = phi <vs0, vs2>
5641 vs1 was created previously in this function by a call to
5642 vect_get_vec_def_for_operand and is stored in
5643 vec_initial_def;
5644 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5645 vs0 is created here. */
5646
5647 /* Create vector phi node. */
5648 vect_phi = create_phi_node (vec_initial_def, bb);
5649 loop_vec_info_for_loop (outer_loop)->add_stmt (vect_phi);
5650
5651 /* Create vs0 - initial def of the double reduction phi. */
5652 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5653 loop_preheader_edge (outer_loop));
5654 vect_phi_init = get_initial_def_for_reduction
5655 (stmt_info, preheader_arg, NULL);
5656
5657 /* Update phi node arguments with vs0 and vs2. */
5658 add_phi_arg (vect_phi, vect_phi_init,
5659 loop_preheader_edge (outer_loop),
5660 UNKNOWN_LOCATION);
5661 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
5662 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5663 if (dump_enabled_p ())
5664 {
5665 dump_printf_loc (MSG_NOTE, vect_location,
5666 "created double reduction phi node: ");
5667 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5668 }
5669
5670 vect_phi_res = PHI_RESULT (vect_phi);
5671
5672 /* Replace the use, i.e., set the correct vs1 in the regular
5673 reduction phi node. FORNOW, NCOPIES is always 1, so the
5674 loop is redundant. */
5675 stmt_vec_info use_info = reduction_phi_info;
5676 for (j = 0; j < ncopies; j++)
5677 {
5678 edge pr_edge = loop_preheader_edge (loop);
5679 SET_PHI_ARG_DEF (as_a <gphi *> (use_info->stmt),
5680 pr_edge->dest_idx, vect_phi_res);
5681 use_info = STMT_VINFO_RELATED_STMT (use_info);
5682 }
5683 }
5684 }
5685 }
5686
5687 phis.release ();
5688 if (nested_in_vect_loop)
5689 {
5690 if (double_reduc)
5691 loop = outer_loop;
5692 else
5693 continue;
5694 }
5695
5696 phis.create (3);
5697 /* Find the loop-closed-use at the loop exit of the original scalar
5698 result. (The reduction result is expected to have two immediate uses,
5699 one at the latch block, and one at the loop exit). For double
5700 reductions we are looking for exit phis of the outer loop. */
5701 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5702 {
5703 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5704 {
5705 if (!is_gimple_debug (USE_STMT (use_p)))
5706 phis.safe_push (USE_STMT (use_p));
5707 }
5708 else
5709 {
5710 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5711 {
5712 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5713
5714 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5715 {
5716 if (!flow_bb_inside_loop_p (loop,
5717 gimple_bb (USE_STMT (phi_use_p)))
5718 && !is_gimple_debug (USE_STMT (phi_use_p)))
5719 phis.safe_push (USE_STMT (phi_use_p));
5720 }
5721 }
5722 }
5723 }
5724
5725 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5726 {
5727 /* Replace the uses: */
5728 orig_name = PHI_RESULT (exit_phi);
5729 scalar_result = scalar_results[k];
5730 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5731 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5732 SET_USE (use_p, scalar_result);
5733 }
5734
5735 phis.release ();
5736 }
5737 }
5738
5739 /* Return a vector of type VECTYPE that is equal to the vector select
5740 operation "MASK ? VEC : IDENTITY". Insert the select statements
5741 before GSI. */
5742
5743 static tree
5744 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5745 tree vec, tree identity)
5746 {
5747 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5748 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5749 mask, vec, identity);
5750 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5751 return cond;
5752 }
5753
5754 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5755 order, starting with LHS. Insert the extraction statements before GSI and
5756 associate the new scalar SSA names with variable SCALAR_DEST.
5757 Return the SSA name for the result. */
5758
5759 static tree
5760 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5761 tree_code code, tree lhs, tree vector_rhs)
5762 {
5763 tree vectype = TREE_TYPE (vector_rhs);
5764 tree scalar_type = TREE_TYPE (vectype);
5765 tree bitsize = TYPE_SIZE (scalar_type);
5766 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5767 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5768
5769 for (unsigned HOST_WIDE_INT bit_offset = 0;
5770 bit_offset < vec_size_in_bits;
5771 bit_offset += element_bitsize)
5772 {
5773 tree bitpos = bitsize_int (bit_offset);
5774 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5775 bitsize, bitpos);
5776
5777 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5778 rhs = make_ssa_name (scalar_dest, stmt);
5779 gimple_assign_set_lhs (stmt, rhs);
5780 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5781
5782 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5783 tree new_name = make_ssa_name (scalar_dest, stmt);
5784 gimple_assign_set_lhs (stmt, new_name);
5785 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5786 lhs = new_name;
5787 }
5788 return lhs;
5789 }
5790
5791 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
5792 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5793 statement. CODE is the operation performed by STMT_INFO and OPS are
5794 its scalar operands. REDUC_INDEX is the index of the operand in
5795 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5796 implements in-order reduction, or IFN_LAST if we should open-code it.
5797 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5798 that should be used to control the operation in a fully-masked loop. */
5799
5800 static bool
5801 vectorize_fold_left_reduction (stmt_vec_info stmt_info,
5802 gimple_stmt_iterator *gsi,
5803 stmt_vec_info *vec_stmt, slp_tree slp_node,
5804 gimple *reduc_def_stmt,
5805 tree_code code, internal_fn reduc_fn,
5806 tree ops[3], tree vectype_in,
5807 int reduc_index, vec_loop_masks *masks)
5808 {
5809 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5810 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5811 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5812 stmt_vec_info new_stmt_info = NULL;
5813
5814 int ncopies;
5815 if (slp_node)
5816 ncopies = 1;
5817 else
5818 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5819
5820 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
5821 gcc_assert (ncopies == 1);
5822 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5823 gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
5824 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5825 == FOLD_LEFT_REDUCTION);
5826
5827 if (slp_node)
5828 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5829 TYPE_VECTOR_SUBPARTS (vectype_in)));
5830
5831 tree op0 = ops[1 - reduc_index];
5832
5833 int group_size = 1;
5834 stmt_vec_info scalar_dest_def_info;
5835 auto_vec<tree> vec_oprnds0;
5836 if (slp_node)
5837 {
5838 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5839 slp_node);
5840 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5841 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5842 }
5843 else
5844 {
5845 tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
5846 vec_oprnds0.create (1);
5847 vec_oprnds0.quick_push (loop_vec_def0);
5848 scalar_dest_def_info = stmt_info;
5849 }
5850
5851 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5852 tree scalar_type = TREE_TYPE (scalar_dest);
5853 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5854
5855 int vec_num = vec_oprnds0.length ();
5856 gcc_assert (vec_num == 1 || slp_node);
5857 tree vec_elem_type = TREE_TYPE (vectype_out);
5858 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5859
5860 tree vector_identity = NULL_TREE;
5861 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5862 vector_identity = build_zero_cst (vectype_out);
5863
5864 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5865 int i;
5866 tree def0;
5867 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5868 {
5869 gimple *new_stmt;
5870 tree mask = NULL_TREE;
5871 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5872 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5873
5874 /* Handle MINUS by adding the negative. */
5875 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5876 {
5877 tree negated = make_ssa_name (vectype_out);
5878 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5879 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5880 def0 = negated;
5881 }
5882
5883 if (mask)
5884 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5885 vector_identity);
5886
5887 /* On the first iteration the input is simply the scalar phi
5888 result, and for subsequent iterations it is the output of
5889 the preceding operation. */
5890 if (reduc_fn != IFN_LAST)
5891 {
5892 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
5893 /* For chained SLP reductions the output of the previous reduction
5894 operation serves as the input of the next. For the final statement
5895 the output cannot be a temporary - we reuse the original
5896 scalar destination of the last statement. */
5897 if (i != vec_num - 1)
5898 {
5899 gimple_set_lhs (new_stmt, scalar_dest_var);
5900 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
5901 gimple_set_lhs (new_stmt, reduc_var);
5902 }
5903 }
5904 else
5905 {
5906 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
5907 reduc_var, def0);
5908 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
5909 /* Remove the statement, so that we can use the same code paths
5910 as for statements that we've just created. */
5911 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
5912 gsi_remove (&tmp_gsi, false);
5913 }
5914
5915 if (i == vec_num - 1)
5916 {
5917 gimple_set_lhs (new_stmt, scalar_dest);
5918 new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
5919 new_stmt);
5920 }
5921 else
5922 new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
5923 new_stmt, gsi);
5924
5925 if (slp_node)
5926 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5927 }
5928
5929 if (!slp_node)
5930 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5931
5932 return true;
5933 }
5934
5935 /* Function is_nonwrapping_integer_induction.
5936
5937 Check if STMT_VINO (which is part of loop LOOP) both increments and
5938 does not cause overflow. */
5939
5940 static bool
5941 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
5942 {
5943 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
5944 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5945 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5946 tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
5947 widest_int ni, max_loop_value, lhs_max;
5948 wi::overflow_type overflow = wi::OVF_NONE;
5949
5950 /* Make sure the loop is integer based. */
5951 if (TREE_CODE (base) != INTEGER_CST
5952 || TREE_CODE (step) != INTEGER_CST)
5953 return false;
5954
5955 /* Check that the max size of the loop will not wrap. */
5956
5957 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
5958 return true;
5959
5960 if (! max_stmt_executions (loop, &ni))
5961 return false;
5962
5963 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
5964 &overflow);
5965 if (overflow)
5966 return false;
5967
5968 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
5969 TYPE_SIGN (lhs_type), &overflow);
5970 if (overflow)
5971 return false;
5972
5973 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
5974 <= TYPE_PRECISION (lhs_type));
5975 }
5976
5977 /* Function vectorizable_reduction.
5978
5979 Check if STMT_INFO performs a reduction operation that can be vectorized.
5980 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
5981 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5982 Return true if STMT_INFO is vectorizable in this way.
5983
5984 This function also handles reduction idioms (patterns) that have been
5985 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
5986 may be of this form:
5987 X = pattern_expr (arg0, arg1, ..., X)
5988 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
5989 sequence that had been detected and replaced by the pattern-stmt
5990 (STMT_INFO).
5991
5992 This function also handles reduction of condition expressions, for example:
5993 for (int i = 0; i < N; i++)
5994 if (a[i] < value)
5995 last = a[i];
5996 This is handled by vectorising the loop and creating an additional vector
5997 containing the loop indexes for which "a[i] < value" was true. In the
5998 function epilogue this is reduced to a single max value and then used to
5999 index into the vector of results.
6000
6001 In some cases of reduction patterns, the type of the reduction variable X is
6002 different than the type of the other arguments of STMT_INFO.
6003 In such cases, the vectype that is used when transforming STMT_INFO into
6004 a vector stmt is different than the vectype that is used to determine the
6005 vectorization factor, because it consists of a different number of elements
6006 than the actual number of elements that are being operated upon in parallel.
6007
6008 For example, consider an accumulation of shorts into an int accumulator.
6009 On some targets it's possible to vectorize this pattern operating on 8
6010 shorts at a time (hence, the vectype for purposes of determining the
6011 vectorization factor should be V8HI); on the other hand, the vectype that
6012 is used to create the vector form is actually V4SI (the type of the result).
6013
6014 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6015 indicates what is the actual level of parallelism (V8HI in the example), so
6016 that the right vectorization factor would be derived. This vectype
6017 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6018 be used to create the vectorized stmt. The right vectype for the vectorized
6019 stmt is obtained from the type of the result X:
6020 get_vectype_for_scalar_type (TREE_TYPE (X))
6021
6022 This means that, contrary to "regular" reductions (or "regular" stmts in
6023 general), the following equation:
6024 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6025 does *NOT* necessarily hold for reduction patterns. */
6026
6027 bool
6028 vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6029 stmt_vec_info *vec_stmt, slp_tree slp_node,
6030 slp_instance slp_node_instance,
6031 stmt_vector_for_cost *cost_vec)
6032 {
6033 tree vec_dest;
6034 tree scalar_dest;
6035 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6036 tree vectype_in = NULL_TREE;
6037 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6038 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6039 enum tree_code code, orig_code;
6040 internal_fn reduc_fn;
6041 machine_mode vec_mode;
6042 int op_type;
6043 optab optab;
6044 tree new_temp = NULL_TREE;
6045 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
6046 stmt_vec_info cond_stmt_vinfo = NULL;
6047 enum tree_code cond_reduc_op_code = ERROR_MARK;
6048 tree scalar_type;
6049 bool is_simple_use;
6050 int i;
6051 int ncopies;
6052 int epilog_copies;
6053 stmt_vec_info prev_stmt_info, prev_phi_info;
6054 bool single_defuse_cycle = false;
6055 stmt_vec_info new_stmt_info = NULL;
6056 int j;
6057 tree ops[3];
6058 enum vect_def_type dts[3];
6059 bool nested_cycle = false, found_nested_cycle_def = false;
6060 bool double_reduc = false;
6061 basic_block def_bb;
6062 struct loop * def_stmt_loop;
6063 tree def_arg;
6064 auto_vec<tree> vec_oprnds0;
6065 auto_vec<tree> vec_oprnds1;
6066 auto_vec<tree> vec_oprnds2;
6067 auto_vec<tree> vect_defs;
6068 auto_vec<stmt_vec_info> phis;
6069 int vec_num;
6070 tree def0, tem;
6071 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6072 tree cond_reduc_val = NULL_TREE;
6073
6074 /* Make sure it was already recognized as a reduction computation. */
6075 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6076 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6077 return false;
6078
6079 if (nested_in_vect_loop_p (loop, stmt_info))
6080 {
6081 loop = loop->inner;
6082 nested_cycle = true;
6083 }
6084
6085 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6086 gcc_assert (slp_node
6087 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6088
6089 if (gphi *phi = dyn_cast <gphi *> (stmt_info->stmt))
6090 {
6091 tree phi_result = gimple_phi_result (phi);
6092 /* Analysis is fully done on the reduction stmt invocation. */
6093 if (! vec_stmt)
6094 {
6095 if (slp_node)
6096 slp_node_instance->reduc_phis = slp_node;
6097
6098 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6099 return true;
6100 }
6101
6102 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6103 /* Leave the scalar phi in place. Note that checking
6104 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6105 for reductions involving a single statement. */
6106 return true;
6107
6108 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
6109 reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
6110
6111 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info)
6112 == EXTRACT_LAST_REDUCTION)
6113 /* Leave the scalar phi in place. */
6114 return true;
6115
6116 gassign *reduc_stmt = as_a <gassign *> (reduc_stmt_info->stmt);
6117 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
6118 {
6119 tree op = gimple_op (reduc_stmt, k);
6120 if (op == phi_result)
6121 continue;
6122 if (k == 1
6123 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
6124 continue;
6125 if (!vectype_in
6126 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6127 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
6128 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
6129 break;
6130 }
6131 gcc_assert (vectype_in);
6132
6133 if (slp_node)
6134 ncopies = 1;
6135 else
6136 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6137
6138 stmt_vec_info use_stmt_info;
6139 if (ncopies > 1
6140 && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
6141 && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
6142 && vect_stmt_to_vectorize (use_stmt_info) == reduc_stmt_info)
6143 single_defuse_cycle = true;
6144
6145 /* Create the destination vector */
6146 scalar_dest = gimple_assign_lhs (reduc_stmt);
6147 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6148
6149 if (slp_node)
6150 /* The size vect_schedule_slp_instance computes is off for us. */
6151 vec_num = vect_get_num_vectors
6152 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6153 * SLP_TREE_SCALAR_STMTS (slp_node).length (),
6154 vectype_in);
6155 else
6156 vec_num = 1;
6157
6158 /* Generate the reduction PHIs upfront. */
6159 prev_phi_info = NULL;
6160 for (j = 0; j < ncopies; j++)
6161 {
6162 if (j == 0 || !single_defuse_cycle)
6163 {
6164 for (i = 0; i < vec_num; i++)
6165 {
6166 /* Create the reduction-phi that defines the reduction
6167 operand. */
6168 gimple *new_phi = create_phi_node (vec_dest, loop->header);
6169 stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
6170
6171 if (slp_node)
6172 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
6173 else
6174 {
6175 if (j == 0)
6176 STMT_VINFO_VEC_STMT (stmt_info)
6177 = *vec_stmt = new_phi_info;
6178 else
6179 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
6180 prev_phi_info = new_phi_info;
6181 }
6182 }
6183 }
6184 }
6185
6186 return true;
6187 }
6188
6189 /* 1. Is vectorizable reduction? */
6190 /* Not supportable if the reduction variable is used in the loop, unless
6191 it's a reduction chain. */
6192 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6193 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6194 return false;
6195
6196 /* Reductions that are not used even in an enclosing outer-loop,
6197 are expected to be "live" (used out of the loop). */
6198 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6199 && !STMT_VINFO_LIVE_P (stmt_info))
6200 return false;
6201
6202 /* 2. Has this been recognized as a reduction pattern?
6203
6204 Check if STMT represents a pattern that has been recognized
6205 in earlier analysis stages. For stmts that represent a pattern,
6206 the STMT_VINFO_RELATED_STMT field records the last stmt in
6207 the original sequence that constitutes the pattern. */
6208
6209 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6210 if (orig_stmt_info)
6211 {
6212 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6213 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6214 }
6215
6216 /* 3. Check the operands of the operation. The first operands are defined
6217 inside the loop body. The last operand is the reduction variable,
6218 which is defined by the loop-header-phi. */
6219
6220 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
6221
6222 /* Flatten RHS. */
6223 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
6224 {
6225 case GIMPLE_BINARY_RHS:
6226 code = gimple_assign_rhs_code (stmt);
6227 op_type = TREE_CODE_LENGTH (code);
6228 gcc_assert (op_type == binary_op);
6229 ops[0] = gimple_assign_rhs1 (stmt);
6230 ops[1] = gimple_assign_rhs2 (stmt);
6231 break;
6232
6233 case GIMPLE_TERNARY_RHS:
6234 code = gimple_assign_rhs_code (stmt);
6235 op_type = TREE_CODE_LENGTH (code);
6236 gcc_assert (op_type == ternary_op);
6237 ops[0] = gimple_assign_rhs1 (stmt);
6238 ops[1] = gimple_assign_rhs2 (stmt);
6239 ops[2] = gimple_assign_rhs3 (stmt);
6240 break;
6241
6242 case GIMPLE_UNARY_RHS:
6243 return false;
6244
6245 default:
6246 gcc_unreachable ();
6247 }
6248
6249 if (code == COND_EXPR && slp_node)
6250 return false;
6251
6252 scalar_dest = gimple_assign_lhs (stmt);
6253 scalar_type = TREE_TYPE (scalar_dest);
6254 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6255 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6256 return false;
6257
6258 /* Do not try to vectorize bit-precision reductions. */
6259 if (!type_has_mode_precision_p (scalar_type))
6260 return false;
6261
6262 /* All uses but the last are expected to be defined in the loop.
6263 The last use is the reduction variable. In case of nested cycle this
6264 assumption is not true: we use reduc_index to record the index of the
6265 reduction variable. */
6266 stmt_vec_info reduc_def_info = NULL;
6267 int reduc_index = -1;
6268 for (i = 0; i < op_type; i++)
6269 {
6270 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6271 if (i == 0 && code == COND_EXPR)
6272 continue;
6273
6274 stmt_vec_info def_stmt_info;
6275 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem,
6276 &def_stmt_info);
6277 dt = dts[i];
6278 gcc_assert (is_simple_use);
6279 if (dt == vect_reduction_def)
6280 {
6281 reduc_def_info = def_stmt_info;
6282 reduc_index = i;
6283 continue;
6284 }
6285 else if (tem)
6286 {
6287 /* To properly compute ncopies we are interested in the widest
6288 input type in case we're looking at a widening accumulation. */
6289 if (!vectype_in
6290 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6291 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
6292 vectype_in = tem;
6293 }
6294
6295 if (dt != vect_internal_def
6296 && dt != vect_external_def
6297 && dt != vect_constant_def
6298 && dt != vect_induction_def
6299 && !(dt == vect_nested_cycle && nested_cycle))
6300 return false;
6301
6302 if (dt == vect_nested_cycle)
6303 {
6304 found_nested_cycle_def = true;
6305 reduc_def_info = def_stmt_info;
6306 reduc_index = i;
6307 }
6308
6309 if (i == 1 && code == COND_EXPR)
6310 {
6311 /* Record how value of COND_EXPR is defined. */
6312 if (dt == vect_constant_def)
6313 {
6314 cond_reduc_dt = dt;
6315 cond_reduc_val = ops[i];
6316 }
6317 if (dt == vect_induction_def
6318 && def_stmt_info
6319 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6320 {
6321 cond_reduc_dt = dt;
6322 cond_stmt_vinfo = def_stmt_info;
6323 }
6324 }
6325 }
6326
6327 if (!vectype_in)
6328 vectype_in = vectype_out;
6329
6330 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6331 directy used in stmt. */
6332 if (reduc_index == -1)
6333 {
6334 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6335 {
6336 if (dump_enabled_p ())
6337 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6338 "in-order reduction chain without SLP.\n");
6339 return false;
6340 }
6341
6342 if (orig_stmt_info)
6343 reduc_def_info = STMT_VINFO_REDUC_DEF (orig_stmt_info);
6344 else
6345 reduc_def_info = STMT_VINFO_REDUC_DEF (stmt_info);
6346 }
6347
6348 if (! reduc_def_info)
6349 return false;
6350
6351 gphi *reduc_def_phi = dyn_cast <gphi *> (reduc_def_info->stmt);
6352 if (!reduc_def_phi)
6353 return false;
6354
6355 if (!(reduc_index == -1
6356 || dts[reduc_index] == vect_reduction_def
6357 || dts[reduc_index] == vect_nested_cycle
6358 || ((dts[reduc_index] == vect_internal_def
6359 || dts[reduc_index] == vect_external_def
6360 || dts[reduc_index] == vect_constant_def
6361 || dts[reduc_index] == vect_induction_def)
6362 && nested_cycle && found_nested_cycle_def)))
6363 {
6364 /* For pattern recognized stmts, orig_stmt might be a reduction,
6365 but some helper statements for the pattern might not, or
6366 might be COND_EXPRs with reduction uses in the condition. */
6367 gcc_assert (orig_stmt_info);
6368 return false;
6369 }
6370
6371 /* PHIs should not participate in patterns. */
6372 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info));
6373 enum vect_reduction_type v_reduc_type
6374 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
6375 stmt_vec_info tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
6376
6377 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
6378 /* If we have a condition reduction, see if we can simplify it further. */
6379 if (v_reduc_type == COND_REDUCTION)
6380 {
6381 /* TODO: We can't yet handle reduction chains, since we need to treat
6382 each COND_EXPR in the chain specially, not just the last one.
6383 E.g. for:
6384
6385 x_1 = PHI <x_3, ...>
6386 x_2 = a_2 ? ... : x_1;
6387 x_3 = a_3 ? ... : x_2;
6388
6389 we're interested in the last element in x_3 for which a_2 || a_3
6390 is true, whereas the current reduction chain handling would
6391 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6392 as a reduction operation. */
6393 if (reduc_index == -1)
6394 {
6395 if (dump_enabled_p ())
6396 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6397 "conditional reduction chains not supported\n");
6398 return false;
6399 }
6400
6401 /* vect_is_simple_reduction ensured that operand 2 is the
6402 loop-carried operand. */
6403 gcc_assert (reduc_index == 2);
6404
6405 /* Loop peeling modifies initial value of reduction PHI, which
6406 makes the reduction stmt to be transformed different to the
6407 original stmt analyzed. We need to record reduction code for
6408 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6409 it can be used directly at transform stage. */
6410 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
6411 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
6412 {
6413 /* Also set the reduction type to CONST_COND_REDUCTION. */
6414 gcc_assert (cond_reduc_dt == vect_constant_def);
6415 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
6416 }
6417 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6418 vectype_in, OPTIMIZE_FOR_SPEED))
6419 {
6420 if (dump_enabled_p ())
6421 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6422 "optimizing condition reduction with"
6423 " FOLD_EXTRACT_LAST.\n");
6424 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
6425 }
6426 else if (cond_reduc_dt == vect_induction_def)
6427 {
6428 tree base
6429 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6430 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6431
6432 gcc_assert (TREE_CODE (base) == INTEGER_CST
6433 && TREE_CODE (step) == INTEGER_CST);
6434 cond_reduc_val = NULL_TREE;
6435 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6436 above base; punt if base is the minimum value of the type for
6437 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6438 if (tree_int_cst_sgn (step) == -1)
6439 {
6440 cond_reduc_op_code = MIN_EXPR;
6441 if (tree_int_cst_sgn (base) == -1)
6442 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6443 else if (tree_int_cst_lt (base,
6444 TYPE_MAX_VALUE (TREE_TYPE (base))))
6445 cond_reduc_val
6446 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6447 }
6448 else
6449 {
6450 cond_reduc_op_code = MAX_EXPR;
6451 if (tree_int_cst_sgn (base) == 1)
6452 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6453 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6454 base))
6455 cond_reduc_val
6456 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6457 }
6458 if (cond_reduc_val)
6459 {
6460 if (dump_enabled_p ())
6461 dump_printf_loc (MSG_NOTE, vect_location,
6462 "condition expression based on "
6463 "integer induction.\n");
6464 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6465 = INTEGER_INDUC_COND_REDUCTION;
6466 }
6467 }
6468 else if (cond_reduc_dt == vect_constant_def)
6469 {
6470 enum vect_def_type cond_initial_dt;
6471 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
6472 tree cond_initial_val
6473 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
6474
6475 gcc_assert (cond_reduc_val != NULL_TREE);
6476 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6477 if (cond_initial_dt == vect_constant_def
6478 && types_compatible_p (TREE_TYPE (cond_initial_val),
6479 TREE_TYPE (cond_reduc_val)))
6480 {
6481 tree e = fold_binary (LE_EXPR, boolean_type_node,
6482 cond_initial_val, cond_reduc_val);
6483 if (e && (integer_onep (e) || integer_zerop (e)))
6484 {
6485 if (dump_enabled_p ())
6486 dump_printf_loc (MSG_NOTE, vect_location,
6487 "condition expression based on "
6488 "compile time constant.\n");
6489 /* Record reduction code at analysis stage. */
6490 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
6491 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6492 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6493 = CONST_COND_REDUCTION;
6494 }
6495 }
6496 }
6497 }
6498
6499 if (orig_stmt_info)
6500 gcc_assert (tmp == orig_stmt_info
6501 || REDUC_GROUP_FIRST_ELEMENT (tmp) == orig_stmt_info);
6502 else
6503 /* We changed STMT to be the first stmt in reduction chain, hence we
6504 check that in this case the first element in the chain is STMT. */
6505 gcc_assert (tmp == stmt_info
6506 || REDUC_GROUP_FIRST_ELEMENT (tmp) == stmt_info);
6507
6508 if (STMT_VINFO_LIVE_P (reduc_def_info))
6509 return false;
6510
6511 if (slp_node)
6512 ncopies = 1;
6513 else
6514 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6515
6516 gcc_assert (ncopies >= 1);
6517
6518 vec_mode = TYPE_MODE (vectype_in);
6519 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6520
6521 if (code == COND_EXPR)
6522 {
6523 /* Only call during the analysis stage, otherwise we'll lose
6524 STMT_VINFO_TYPE. */
6525 if (!vec_stmt && !vectorizable_condition (stmt_info, gsi, NULL,
6526 ops[reduc_index], 0, NULL,
6527 cost_vec))
6528 {
6529 if (dump_enabled_p ())
6530 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6531 "unsupported condition in reduction\n");
6532 return false;
6533 }
6534 }
6535 else
6536 {
6537 /* 4. Supportable by target? */
6538
6539 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6540 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6541 {
6542 /* Shifts and rotates are only supported by vectorizable_shifts,
6543 not vectorizable_reduction. */
6544 if (dump_enabled_p ())
6545 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6546 "unsupported shift or rotation.\n");
6547 return false;
6548 }
6549
6550 /* 4.1. check support for the operation in the loop */
6551 optab = optab_for_tree_code (code, vectype_in, optab_default);
6552 if (!optab)
6553 {
6554 if (dump_enabled_p ())
6555 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6556 "no optab.\n");
6557
6558 return false;
6559 }
6560
6561 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6562 {
6563 if (dump_enabled_p ())
6564 dump_printf (MSG_NOTE, "op not supported by target.\n");
6565
6566 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6567 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6568 return false;
6569
6570 if (dump_enabled_p ())
6571 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6572 }
6573
6574 /* Worthwhile without SIMD support? */
6575 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6576 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6577 {
6578 if (dump_enabled_p ())
6579 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6580 "not worthwhile without SIMD support.\n");
6581
6582 return false;
6583 }
6584 }
6585
6586 /* 4.2. Check support for the epilog operation.
6587
6588 If STMT represents a reduction pattern, then the type of the
6589 reduction variable may be different than the type of the rest
6590 of the arguments. For example, consider the case of accumulation
6591 of shorts into an int accumulator; The original code:
6592 S1: int_a = (int) short_a;
6593 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6594
6595 was replaced with:
6596 STMT: int_acc = widen_sum <short_a, int_acc>
6597
6598 This means that:
6599 1. The tree-code that is used to create the vector operation in the
6600 epilog code (that reduces the partial results) is not the
6601 tree-code of STMT, but is rather the tree-code of the original
6602 stmt from the pattern that STMT is replacing. I.e, in the example
6603 above we want to use 'widen_sum' in the loop, but 'plus' in the
6604 epilog.
6605 2. The type (mode) we use to check available target support
6606 for the vector operation to be created in the *epilog*, is
6607 determined by the type of the reduction variable (in the example
6608 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6609 However the type (mode) we use to check available target support
6610 for the vector operation to be created *inside the loop*, is
6611 determined by the type of the other arguments to STMT (in the
6612 example we'd check this: optab_handler (widen_sum_optab,
6613 vect_short_mode)).
6614
6615 This is contrary to "regular" reductions, in which the types of all
6616 the arguments are the same as the type of the reduction variable.
6617 For "regular" reductions we can therefore use the same vector type
6618 (and also the same tree-code) when generating the epilog code and
6619 when generating the code inside the loop. */
6620
6621 vect_reduction_type reduction_type
6622 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
6623 if (orig_stmt_info
6624 && (reduction_type == TREE_CODE_REDUCTION
6625 || reduction_type == FOLD_LEFT_REDUCTION))
6626 {
6627 /* This is a reduction pattern: get the vectype from the type of the
6628 reduction variable, and get the tree-code from orig_stmt. */
6629 orig_code = gimple_assign_rhs_code (orig_stmt_info->stmt);
6630 gcc_assert (vectype_out);
6631 vec_mode = TYPE_MODE (vectype_out);
6632 }
6633 else
6634 {
6635 /* Regular reduction: use the same vectype and tree-code as used for
6636 the vector code inside the loop can be used for the epilog code. */
6637 orig_code = code;
6638
6639 if (code == MINUS_EXPR)
6640 orig_code = PLUS_EXPR;
6641
6642 /* For simple condition reductions, replace with the actual expression
6643 we want to base our reduction around. */
6644 if (reduction_type == CONST_COND_REDUCTION)
6645 {
6646 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6647 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6648 }
6649 else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
6650 orig_code = cond_reduc_op_code;
6651 }
6652
6653 if (nested_cycle)
6654 {
6655 def_bb = gimple_bb (reduc_def_phi);
6656 def_stmt_loop = def_bb->loop_father;
6657 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6658 loop_preheader_edge (def_stmt_loop));
6659 stmt_vec_info def_arg_stmt_info = loop_vinfo->lookup_def (def_arg);
6660 if (def_arg_stmt_info
6661 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info)
6662 == vect_double_reduction_def))
6663 double_reduc = true;
6664 }
6665
6666 reduc_fn = IFN_LAST;
6667
6668 if (reduction_type == TREE_CODE_REDUCTION
6669 || reduction_type == FOLD_LEFT_REDUCTION
6670 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6671 || reduction_type == CONST_COND_REDUCTION)
6672 {
6673 if (reduction_type == FOLD_LEFT_REDUCTION
6674 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6675 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6676 {
6677 if (reduc_fn != IFN_LAST
6678 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6679 OPTIMIZE_FOR_SPEED))
6680 {
6681 if (dump_enabled_p ())
6682 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6683 "reduc op not supported by target.\n");
6684
6685 reduc_fn = IFN_LAST;
6686 }
6687 }
6688 else
6689 {
6690 if (!nested_cycle || double_reduc)
6691 {
6692 if (dump_enabled_p ())
6693 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6694 "no reduc code for scalar code.\n");
6695
6696 return false;
6697 }
6698 }
6699 }
6700 else if (reduction_type == COND_REDUCTION)
6701 {
6702 int scalar_precision
6703 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6704 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6705 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6706 nunits_out);
6707
6708 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6709 OPTIMIZE_FOR_SPEED))
6710 reduc_fn = IFN_REDUC_MAX;
6711 }
6712
6713 if (reduction_type != EXTRACT_LAST_REDUCTION
6714 && reduc_fn == IFN_LAST
6715 && !nunits_out.is_constant ())
6716 {
6717 if (dump_enabled_p ())
6718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6719 "missing target support for reduction on"
6720 " variable-length vectors.\n");
6721 return false;
6722 }
6723
6724 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6725 && ncopies > 1)
6726 {
6727 if (dump_enabled_p ())
6728 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6729 "multiple types in double reduction or condition "
6730 "reduction.\n");
6731 return false;
6732 }
6733
6734 /* For SLP reductions, see if there is a neutral value we can use. */
6735 tree neutral_op = NULL_TREE;
6736 if (slp_node)
6737 neutral_op = neutral_op_for_slp_reduction
6738 (slp_node_instance->reduc_phis, code,
6739 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
6740
6741 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6742 {
6743 /* We can't support in-order reductions of code such as this:
6744
6745 for (int i = 0; i < n1; ++i)
6746 for (int j = 0; j < n2; ++j)
6747 l += a[j];
6748
6749 since GCC effectively transforms the loop when vectorizing:
6750
6751 for (int i = 0; i < n1 / VF; ++i)
6752 for (int j = 0; j < n2; ++j)
6753 for (int k = 0; k < VF; ++k)
6754 l += a[j];
6755
6756 which is a reassociation of the original operation. */
6757 if (dump_enabled_p ())
6758 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6759 "in-order double reduction not supported.\n");
6760
6761 return false;
6762 }
6763
6764 if (reduction_type == FOLD_LEFT_REDUCTION
6765 && slp_node
6766 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6767 {
6768 /* We cannot use in-order reductions in this case because there is
6769 an implicit reassociation of the operations involved. */
6770 if (dump_enabled_p ())
6771 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6772 "in-order unchained SLP reductions not supported.\n");
6773 return false;
6774 }
6775
6776 /* For double reductions, and for SLP reductions with a neutral value,
6777 we construct a variable-length initial vector by loading a vector
6778 full of the neutral value and then shift-and-inserting the start
6779 values into the low-numbered elements. */
6780 if ((double_reduc || neutral_op)
6781 && !nunits_out.is_constant ()
6782 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6783 vectype_out, OPTIMIZE_FOR_SPEED))
6784 {
6785 if (dump_enabled_p ())
6786 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6787 "reduction on variable-length vectors requires"
6788 " target support for a vector-shift-and-insert"
6789 " operation.\n");
6790 return false;
6791 }
6792
6793 /* Check extra constraints for variable-length unchained SLP reductions. */
6794 if (STMT_SLP_TYPE (stmt_info)
6795 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6796 && !nunits_out.is_constant ())
6797 {
6798 /* We checked above that we could build the initial vector when
6799 there's a neutral element value. Check here for the case in
6800 which each SLP statement has its own initial value and in which
6801 that value needs to be repeated for every instance of the
6802 statement within the initial vector. */
6803 unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6804 scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
6805 if (!neutral_op
6806 && !can_duplicate_and_interleave_p (group_size, elt_mode))
6807 {
6808 if (dump_enabled_p ())
6809 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6810 "unsupported form of SLP reduction for"
6811 " variable-length vectors: cannot build"
6812 " initial vector.\n");
6813 return false;
6814 }
6815 /* The epilogue code relies on the number of elements being a multiple
6816 of the group size. The duplicate-and-interleave approach to setting
6817 up the the initial vector does too. */
6818 if (!multiple_p (nunits_out, group_size))
6819 {
6820 if (dump_enabled_p ())
6821 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6822 "unsupported form of SLP reduction for"
6823 " variable-length vectors: the vector size"
6824 " is not a multiple of the number of results.\n");
6825 return false;
6826 }
6827 }
6828
6829 /* In case of widenning multiplication by a constant, we update the type
6830 of the constant to be the type of the other operand. We check that the
6831 constant fits the type in the pattern recognition pass. */
6832 if (code == DOT_PROD_EXPR
6833 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6834 {
6835 if (TREE_CODE (ops[0]) == INTEGER_CST)
6836 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6837 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6838 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6839 else
6840 {
6841 if (dump_enabled_p ())
6842 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6843 "invalid types in dot-prod\n");
6844
6845 return false;
6846 }
6847 }
6848
6849 if (reduction_type == COND_REDUCTION)
6850 {
6851 widest_int ni;
6852
6853 if (! max_loop_iterations (loop, &ni))
6854 {
6855 if (dump_enabled_p ())
6856 dump_printf_loc (MSG_NOTE, vect_location,
6857 "loop count not known, cannot create cond "
6858 "reduction.\n");
6859 return false;
6860 }
6861 /* Convert backedges to iterations. */
6862 ni += 1;
6863
6864 /* The additional index will be the same type as the condition. Check
6865 that the loop can fit into this less one (because we'll use up the
6866 zero slot for when there are no matches). */
6867 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6868 if (wi::geu_p (ni, wi::to_widest (max_index)))
6869 {
6870 if (dump_enabled_p ())
6871 dump_printf_loc (MSG_NOTE, vect_location,
6872 "loop size is greater than data size.\n");
6873 return false;
6874 }
6875 }
6876
6877 /* In case the vectorization factor (VF) is bigger than the number
6878 of elements that we can fit in a vectype (nunits), we have to generate
6879 more than one vector stmt - i.e - we need to "unroll" the
6880 vector stmt by a factor VF/nunits. For more details see documentation
6881 in vectorizable_operation. */
6882
6883 /* If the reduction is used in an outer loop we need to generate
6884 VF intermediate results, like so (e.g. for ncopies=2):
6885 r0 = phi (init, r0)
6886 r1 = phi (init, r1)
6887 r0 = x0 + r0;
6888 r1 = x1 + r1;
6889 (i.e. we generate VF results in 2 registers).
6890 In this case we have a separate def-use cycle for each copy, and therefore
6891 for each copy we get the vector def for the reduction variable from the
6892 respective phi node created for this copy.
6893
6894 Otherwise (the reduction is unused in the loop nest), we can combine
6895 together intermediate results, like so (e.g. for ncopies=2):
6896 r = phi (init, r)
6897 r = x0 + r;
6898 r = x1 + r;
6899 (i.e. we generate VF/2 results in a single register).
6900 In this case for each copy we get the vector def for the reduction variable
6901 from the vectorized reduction operation generated in the previous iteration.
6902
6903 This only works when we see both the reduction PHI and its only consumer
6904 in vectorizable_reduction and there are no intermediate stmts
6905 participating. */
6906 stmt_vec_info use_stmt_info;
6907 tree reduc_phi_result = gimple_phi_result (reduc_def_phi);
6908 if (ncopies > 1
6909 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6910 && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
6911 && vect_stmt_to_vectorize (use_stmt_info) == stmt_info)
6912 {
6913 single_defuse_cycle = true;
6914 epilog_copies = 1;
6915 }
6916 else
6917 epilog_copies = ncopies;
6918
6919 /* If the reduction stmt is one of the patterns that have lane
6920 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6921 if ((ncopies > 1
6922 && ! single_defuse_cycle)
6923 && (code == DOT_PROD_EXPR
6924 || code == WIDEN_SUM_EXPR
6925 || code == SAD_EXPR))
6926 {
6927 if (dump_enabled_p ())
6928 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6929 "multi def-use cycle not possible for lane-reducing "
6930 "reduction operation\n");
6931 return false;
6932 }
6933
6934 if (slp_node)
6935 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6936 else
6937 vec_num = 1;
6938
6939 internal_fn cond_fn = get_conditional_internal_fn (code);
6940 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6941
6942 if (!vec_stmt) /* transformation not required. */
6943 {
6944 vect_model_reduction_cost (stmt_info, reduc_fn, ncopies, cost_vec);
6945 if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6946 {
6947 if (reduction_type != FOLD_LEFT_REDUCTION
6948 && (cond_fn == IFN_LAST
6949 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
6950 OPTIMIZE_FOR_SPEED)))
6951 {
6952 if (dump_enabled_p ())
6953 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6954 "can't use a fully-masked loop because no"
6955 " conditional operation is available.\n");
6956 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6957 }
6958 else if (reduc_index == -1)
6959 {
6960 if (dump_enabled_p ())
6961 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6962 "can't use a fully-masked loop for chained"
6963 " reductions.\n");
6964 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6965 }
6966 else
6967 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
6968 vectype_in);
6969 }
6970 if (dump_enabled_p ()
6971 && reduction_type == FOLD_LEFT_REDUCTION)
6972 dump_printf_loc (MSG_NOTE, vect_location,
6973 "using an in-order (fold-left) reduction.\n");
6974 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6975 return true;
6976 }
6977
6978 /* Transform. */
6979
6980 if (dump_enabled_p ())
6981 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
6982
6983 /* FORNOW: Multiple types are not supported for condition. */
6984 if (code == COND_EXPR)
6985 gcc_assert (ncopies == 1);
6986
6987 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
6988
6989 if (reduction_type == FOLD_LEFT_REDUCTION)
6990 return vectorize_fold_left_reduction
6991 (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
6992 reduc_fn, ops, vectype_in, reduc_index, masks);
6993
6994 if (reduction_type == EXTRACT_LAST_REDUCTION)
6995 {
6996 gcc_assert (!slp_node);
6997 return vectorizable_condition (stmt_info, gsi, vec_stmt,
6998 NULL, reduc_index, NULL, NULL);
6999 }
7000
7001 /* Create the destination vector */
7002 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7003
7004 prev_stmt_info = NULL;
7005 prev_phi_info = NULL;
7006 if (!slp_node)
7007 {
7008 vec_oprnds0.create (1);
7009 vec_oprnds1.create (1);
7010 if (op_type == ternary_op)
7011 vec_oprnds2.create (1);
7012 }
7013
7014 phis.create (vec_num);
7015 vect_defs.create (vec_num);
7016 if (!slp_node)
7017 vect_defs.quick_push (NULL_TREE);
7018
7019 if (slp_node)
7020 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
7021 else
7022 phis.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info));
7023
7024 for (j = 0; j < ncopies; j++)
7025 {
7026 if (code == COND_EXPR)
7027 {
7028 gcc_assert (!slp_node);
7029 vectorizable_condition (stmt_info, gsi, vec_stmt,
7030 PHI_RESULT (phis[0]->stmt),
7031 reduc_index, NULL, NULL);
7032 /* Multiple types are not supported for condition. */
7033 break;
7034 }
7035
7036 /* Handle uses. */
7037 if (j == 0)
7038 {
7039 if (slp_node)
7040 {
7041 /* Get vec defs for all the operands except the reduction index,
7042 ensuring the ordering of the ops in the vector is kept. */
7043 auto_vec<tree, 3> slp_ops;
7044 auto_vec<vec<tree>, 3> vec_defs;
7045
7046 slp_ops.quick_push (ops[0]);
7047 slp_ops.quick_push (ops[1]);
7048 if (op_type == ternary_op)
7049 slp_ops.quick_push (ops[2]);
7050
7051 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
7052
7053 vec_oprnds0.safe_splice (vec_defs[0]);
7054 vec_defs[0].release ();
7055 vec_oprnds1.safe_splice (vec_defs[1]);
7056 vec_defs[1].release ();
7057 if (op_type == ternary_op)
7058 {
7059 vec_oprnds2.safe_splice (vec_defs[2]);
7060 vec_defs[2].release ();
7061 }
7062 }
7063 else
7064 {
7065 vec_oprnds0.quick_push
7066 (vect_get_vec_def_for_operand (ops[0], stmt_info));
7067 vec_oprnds1.quick_push
7068 (vect_get_vec_def_for_operand (ops[1], stmt_info));
7069 if (op_type == ternary_op)
7070 vec_oprnds2.quick_push
7071 (vect_get_vec_def_for_operand (ops[2], stmt_info));
7072 }
7073 }
7074 else
7075 {
7076 if (!slp_node)
7077 {
7078 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
7079
7080 if (single_defuse_cycle && reduc_index == 0)
7081 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
7082 else
7083 vec_oprnds0[0]
7084 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7085 vec_oprnds0[0]);
7086 if (single_defuse_cycle && reduc_index == 1)
7087 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
7088 else
7089 vec_oprnds1[0]
7090 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7091 vec_oprnds1[0]);
7092 if (op_type == ternary_op)
7093 {
7094 if (single_defuse_cycle && reduc_index == 2)
7095 vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
7096 else
7097 vec_oprnds2[0]
7098 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7099 vec_oprnds2[0]);
7100 }
7101 }
7102 }
7103
7104 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7105 {
7106 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7107 if (masked_loop_p)
7108 {
7109 /* Make sure that the reduction accumulator is vop[0]. */
7110 if (reduc_index == 1)
7111 {
7112 gcc_assert (commutative_tree_code (code));
7113 std::swap (vop[0], vop[1]);
7114 }
7115 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7116 vectype_in, i * ncopies + j);
7117 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7118 vop[0], vop[1],
7119 vop[0]);
7120 new_temp = make_ssa_name (vec_dest, call);
7121 gimple_call_set_lhs (call, new_temp);
7122 gimple_call_set_nothrow (call, true);
7123 new_stmt_info
7124 = vect_finish_stmt_generation (stmt_info, call, gsi);
7125 }
7126 else
7127 {
7128 if (op_type == ternary_op)
7129 vop[2] = vec_oprnds2[i];
7130
7131 gassign *new_stmt = gimple_build_assign (vec_dest, code,
7132 vop[0], vop[1], vop[2]);
7133 new_temp = make_ssa_name (vec_dest, new_stmt);
7134 gimple_assign_set_lhs (new_stmt, new_temp);
7135 new_stmt_info
7136 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7137 }
7138
7139 if (slp_node)
7140 {
7141 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7142 vect_defs.quick_push (new_temp);
7143 }
7144 else
7145 vect_defs[0] = new_temp;
7146 }
7147
7148 if (slp_node)
7149 continue;
7150
7151 if (j == 0)
7152 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7153 else
7154 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7155
7156 prev_stmt_info = new_stmt_info;
7157 }
7158
7159 /* Finalize the reduction-phi (set its arguments) and create the
7160 epilog reduction code. */
7161 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
7162 vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
7163
7164 vect_create_epilog_for_reduction (vect_defs, stmt_info, reduc_def_phi,
7165 epilog_copies, reduc_fn, phis,
7166 double_reduc, slp_node, slp_node_instance,
7167 cond_reduc_val, cond_reduc_op_code,
7168 neutral_op);
7169
7170 return true;
7171 }
7172
7173 /* Function vect_min_worthwhile_factor.
7174
7175 For a loop where we could vectorize the operation indicated by CODE,
7176 return the minimum vectorization factor that makes it worthwhile
7177 to use generic vectors. */
7178 static unsigned int
7179 vect_min_worthwhile_factor (enum tree_code code)
7180 {
7181 switch (code)
7182 {
7183 case PLUS_EXPR:
7184 case MINUS_EXPR:
7185 case NEGATE_EXPR:
7186 return 4;
7187
7188 case BIT_AND_EXPR:
7189 case BIT_IOR_EXPR:
7190 case BIT_XOR_EXPR:
7191 case BIT_NOT_EXPR:
7192 return 2;
7193
7194 default:
7195 return INT_MAX;
7196 }
7197 }
7198
7199 /* Return true if VINFO indicates we are doing loop vectorization and if
7200 it is worth decomposing CODE operations into scalar operations for
7201 that loop's vectorization factor. */
7202
7203 bool
7204 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7205 {
7206 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7207 unsigned HOST_WIDE_INT value;
7208 return (loop_vinfo
7209 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7210 && value >= vect_min_worthwhile_factor (code));
7211 }
7212
7213 /* Function vectorizable_induction
7214
7215 Check if STMT_INFO performs an induction computation that can be vectorized.
7216 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7217 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7218 Return true if STMT_INFO is vectorizable in this way. */
7219
7220 bool
7221 vectorizable_induction (stmt_vec_info stmt_info,
7222 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7223 stmt_vec_info *vec_stmt, slp_tree slp_node,
7224 stmt_vector_for_cost *cost_vec)
7225 {
7226 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7227 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7228 unsigned ncopies;
7229 bool nested_in_vect_loop = false;
7230 struct loop *iv_loop;
7231 tree vec_def;
7232 edge pe = loop_preheader_edge (loop);
7233 basic_block new_bb;
7234 tree new_vec, vec_init, vec_step, t;
7235 tree new_name;
7236 gimple *new_stmt;
7237 gphi *induction_phi;
7238 tree induc_def, vec_dest;
7239 tree init_expr, step_expr;
7240 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7241 unsigned i;
7242 tree expr;
7243 gimple_seq stmts;
7244 imm_use_iterator imm_iter;
7245 use_operand_p use_p;
7246 gimple *exit_phi;
7247 edge latch_e;
7248 tree loop_arg;
7249 gimple_stmt_iterator si;
7250
7251 gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
7252 if (!phi)
7253 return false;
7254
7255 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7256 return false;
7257
7258 /* Make sure it was recognized as induction computation. */
7259 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7260 return false;
7261
7262 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7263 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7264
7265 if (slp_node)
7266 ncopies = 1;
7267 else
7268 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7269 gcc_assert (ncopies >= 1);
7270
7271 /* FORNOW. These restrictions should be relaxed. */
7272 if (nested_in_vect_loop_p (loop, stmt_info))
7273 {
7274 imm_use_iterator imm_iter;
7275 use_operand_p use_p;
7276 gimple *exit_phi;
7277 edge latch_e;
7278 tree loop_arg;
7279
7280 if (ncopies > 1)
7281 {
7282 if (dump_enabled_p ())
7283 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7284 "multiple types in nested loop.\n");
7285 return false;
7286 }
7287
7288 /* FORNOW: outer loop induction with SLP not supported. */
7289 if (STMT_SLP_TYPE (stmt_info))
7290 return false;
7291
7292 exit_phi = NULL;
7293 latch_e = loop_latch_edge (loop->inner);
7294 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7295 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7296 {
7297 gimple *use_stmt = USE_STMT (use_p);
7298 if (is_gimple_debug (use_stmt))
7299 continue;
7300
7301 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7302 {
7303 exit_phi = use_stmt;
7304 break;
7305 }
7306 }
7307 if (exit_phi)
7308 {
7309 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7310 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7311 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7312 {
7313 if (dump_enabled_p ())
7314 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7315 "inner-loop induction only used outside "
7316 "of the outer vectorized loop.\n");
7317 return false;
7318 }
7319 }
7320
7321 nested_in_vect_loop = true;
7322 iv_loop = loop->inner;
7323 }
7324 else
7325 iv_loop = loop;
7326 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7327
7328 if (slp_node && !nunits.is_constant ())
7329 {
7330 /* The current SLP code creates the initial value element-by-element. */
7331 if (dump_enabled_p ())
7332 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7333 "SLP induction not supported for variable-length"
7334 " vectors.\n");
7335 return false;
7336 }
7337
7338 if (!vec_stmt) /* transformation not required. */
7339 {
7340 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7341 DUMP_VECT_SCOPE ("vectorizable_induction");
7342 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7343 return true;
7344 }
7345
7346 /* Transform. */
7347
7348 /* Compute a vector variable, initialized with the first VF values of
7349 the induction variable. E.g., for an iv with IV_PHI='X' and
7350 evolution S, for a vector of 4 units, we want to compute:
7351 [X, X + S, X + 2*S, X + 3*S]. */
7352
7353 if (dump_enabled_p ())
7354 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7355
7356 latch_e = loop_latch_edge (iv_loop);
7357 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7358
7359 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7360 gcc_assert (step_expr != NULL_TREE);
7361
7362 pe = loop_preheader_edge (iv_loop);
7363 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7364 loop_preheader_edge (iv_loop));
7365
7366 stmts = NULL;
7367 if (!nested_in_vect_loop)
7368 {
7369 /* Convert the initial value to the desired type. */
7370 tree new_type = TREE_TYPE (vectype);
7371 init_expr = gimple_convert (&stmts, new_type, init_expr);
7372
7373 /* If we are using the loop mask to "peel" for alignment then we need
7374 to adjust the start value here. */
7375 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7376 if (skip_niters != NULL_TREE)
7377 {
7378 if (FLOAT_TYPE_P (vectype))
7379 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7380 skip_niters);
7381 else
7382 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7383 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7384 skip_niters, step_expr);
7385 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7386 init_expr, skip_step);
7387 }
7388 }
7389
7390 /* Convert the step to the desired type. */
7391 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
7392
7393 if (stmts)
7394 {
7395 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7396 gcc_assert (!new_bb);
7397 }
7398
7399 /* Find the first insertion point in the BB. */
7400 basic_block bb = gimple_bb (phi);
7401 si = gsi_after_labels (bb);
7402
7403 /* For SLP induction we have to generate several IVs as for example
7404 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7405 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7406 [VF*S, VF*S, VF*S, VF*S] for all. */
7407 if (slp_node)
7408 {
7409 /* Enforced above. */
7410 unsigned int const_nunits = nunits.to_constant ();
7411
7412 /* Generate [VF*S, VF*S, ... ]. */
7413 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7414 {
7415 expr = build_int_cst (integer_type_node, vf);
7416 expr = fold_convert (TREE_TYPE (step_expr), expr);
7417 }
7418 else
7419 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7420 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7421 expr, step_expr);
7422 if (! CONSTANT_CLASS_P (new_name))
7423 new_name = vect_init_vector (stmt_info, new_name,
7424 TREE_TYPE (step_expr), NULL);
7425 new_vec = build_vector_from_val (vectype, new_name);
7426 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7427
7428 /* Now generate the IVs. */
7429 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7430 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7431 unsigned elts = const_nunits * nvects;
7432 unsigned nivs = least_common_multiple (group_size,
7433 const_nunits) / const_nunits;
7434 gcc_assert (elts % group_size == 0);
7435 tree elt = init_expr;
7436 unsigned ivn;
7437 for (ivn = 0; ivn < nivs; ++ivn)
7438 {
7439 tree_vector_builder elts (vectype, const_nunits, 1);
7440 stmts = NULL;
7441 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7442 {
7443 if (ivn*const_nunits + eltn >= group_size
7444 && (ivn * const_nunits + eltn) % group_size == 0)
7445 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7446 elt, step_expr);
7447 elts.quick_push (elt);
7448 }
7449 vec_init = gimple_build_vector (&stmts, &elts);
7450 if (stmts)
7451 {
7452 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7453 gcc_assert (!new_bb);
7454 }
7455
7456 /* Create the induction-phi that defines the induction-operand. */
7457 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7458 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7459 stmt_vec_info induction_phi_info
7460 = loop_vinfo->add_stmt (induction_phi);
7461 induc_def = PHI_RESULT (induction_phi);
7462
7463 /* Create the iv update inside the loop */
7464 vec_def = make_ssa_name (vec_dest);
7465 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7466 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7467 loop_vinfo->add_stmt (new_stmt);
7468
7469 /* Set the arguments of the phi node: */
7470 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7471 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7472 UNKNOWN_LOCATION);
7473
7474 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi_info);
7475 }
7476
7477 /* Re-use IVs when we can. */
7478 if (ivn < nvects)
7479 {
7480 unsigned vfp
7481 = least_common_multiple (group_size, const_nunits) / group_size;
7482 /* Generate [VF'*S, VF'*S, ... ]. */
7483 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7484 {
7485 expr = build_int_cst (integer_type_node, vfp);
7486 expr = fold_convert (TREE_TYPE (step_expr), expr);
7487 }
7488 else
7489 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7490 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7491 expr, step_expr);
7492 if (! CONSTANT_CLASS_P (new_name))
7493 new_name = vect_init_vector (stmt_info, new_name,
7494 TREE_TYPE (step_expr), NULL);
7495 new_vec = build_vector_from_val (vectype, new_name);
7496 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7497 for (; ivn < nvects; ++ivn)
7498 {
7499 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
7500 tree def;
7501 if (gimple_code (iv) == GIMPLE_PHI)
7502 def = gimple_phi_result (iv);
7503 else
7504 def = gimple_assign_lhs (iv);
7505 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7506 PLUS_EXPR,
7507 def, vec_step);
7508 if (gimple_code (iv) == GIMPLE_PHI)
7509 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7510 else
7511 {
7512 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7513 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
7514 }
7515 SLP_TREE_VEC_STMTS (slp_node).quick_push
7516 (loop_vinfo->add_stmt (new_stmt));
7517 }
7518 }
7519
7520 return true;
7521 }
7522
7523 /* Create the vector that holds the initial_value of the induction. */
7524 if (nested_in_vect_loop)
7525 {
7526 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7527 been created during vectorization of previous stmts. We obtain it
7528 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7529 vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
7530 /* If the initial value is not of proper type, convert it. */
7531 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7532 {
7533 new_stmt
7534 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7535 vect_simple_var,
7536 "vec_iv_"),
7537 VIEW_CONVERT_EXPR,
7538 build1 (VIEW_CONVERT_EXPR, vectype,
7539 vec_init));
7540 vec_init = gimple_assign_lhs (new_stmt);
7541 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7542 new_stmt);
7543 gcc_assert (!new_bb);
7544 loop_vinfo->add_stmt (new_stmt);
7545 }
7546 }
7547 else
7548 {
7549 /* iv_loop is the loop to be vectorized. Create:
7550 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7551 stmts = NULL;
7552 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
7553
7554 unsigned HOST_WIDE_INT const_nunits;
7555 if (nunits.is_constant (&const_nunits))
7556 {
7557 tree_vector_builder elts (vectype, const_nunits, 1);
7558 elts.quick_push (new_name);
7559 for (i = 1; i < const_nunits; i++)
7560 {
7561 /* Create: new_name_i = new_name + step_expr */
7562 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7563 new_name, step_expr);
7564 elts.quick_push (new_name);
7565 }
7566 /* Create a vector from [new_name_0, new_name_1, ...,
7567 new_name_nunits-1] */
7568 vec_init = gimple_build_vector (&stmts, &elts);
7569 }
7570 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7571 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7572 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
7573 new_name, step_expr);
7574 else
7575 {
7576 /* Build:
7577 [base, base, base, ...]
7578 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7579 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7580 gcc_assert (flag_associative_math);
7581 tree index = build_index_vector (vectype, 0, 1);
7582 tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
7583 new_name);
7584 tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
7585 step_expr);
7586 vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
7587 vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
7588 vec_init, step_vec);
7589 vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
7590 vec_init, base_vec);
7591 }
7592
7593 if (stmts)
7594 {
7595 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7596 gcc_assert (!new_bb);
7597 }
7598 }
7599
7600
7601 /* Create the vector that holds the step of the induction. */
7602 if (nested_in_vect_loop)
7603 /* iv_loop is nested in the loop to be vectorized. Generate:
7604 vec_step = [S, S, S, S] */
7605 new_name = step_expr;
7606 else
7607 {
7608 /* iv_loop is the loop to be vectorized. Generate:
7609 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7610 gimple_seq seq = NULL;
7611 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7612 {
7613 expr = build_int_cst (integer_type_node, vf);
7614 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7615 }
7616 else
7617 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7618 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7619 expr, step_expr);
7620 if (seq)
7621 {
7622 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7623 gcc_assert (!new_bb);
7624 }
7625 }
7626
7627 t = unshare_expr (new_name);
7628 gcc_assert (CONSTANT_CLASS_P (new_name)
7629 || TREE_CODE (new_name) == SSA_NAME);
7630 new_vec = build_vector_from_val (vectype, t);
7631 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7632
7633
7634 /* Create the following def-use cycle:
7635 loop prolog:
7636 vec_init = ...
7637 vec_step = ...
7638 loop:
7639 vec_iv = PHI <vec_init, vec_loop>
7640 ...
7641 STMT
7642 ...
7643 vec_loop = vec_iv + vec_step; */
7644
7645 /* Create the induction-phi that defines the induction-operand. */
7646 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7647 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7648 stmt_vec_info induction_phi_info = loop_vinfo->add_stmt (induction_phi);
7649 induc_def = PHI_RESULT (induction_phi);
7650
7651 /* Create the iv update inside the loop */
7652 vec_def = make_ssa_name (vec_dest);
7653 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7654 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7655 stmt_vec_info new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7656
7657 /* Set the arguments of the phi node: */
7658 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7659 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7660 UNKNOWN_LOCATION);
7661
7662 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
7663
7664 /* In case that vectorization factor (VF) is bigger than the number
7665 of elements that we can fit in a vectype (nunits), we have to generate
7666 more than one vector stmt - i.e - we need to "unroll" the
7667 vector stmt by a factor VF/nunits. For more details see documentation
7668 in vectorizable_operation. */
7669
7670 if (ncopies > 1)
7671 {
7672 gimple_seq seq = NULL;
7673 stmt_vec_info prev_stmt_vinfo;
7674 /* FORNOW. This restriction should be relaxed. */
7675 gcc_assert (!nested_in_vect_loop);
7676
7677 /* Create the vector that holds the step of the induction. */
7678 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7679 {
7680 expr = build_int_cst (integer_type_node, nunits);
7681 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7682 }
7683 else
7684 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7685 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7686 expr, step_expr);
7687 if (seq)
7688 {
7689 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7690 gcc_assert (!new_bb);
7691 }
7692
7693 t = unshare_expr (new_name);
7694 gcc_assert (CONSTANT_CLASS_P (new_name)
7695 || TREE_CODE (new_name) == SSA_NAME);
7696 new_vec = build_vector_from_val (vectype, t);
7697 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7698
7699 vec_def = induc_def;
7700 prev_stmt_vinfo = induction_phi_info;
7701 for (i = 1; i < ncopies; i++)
7702 {
7703 /* vec_i = vec_prev + vec_step */
7704 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
7705 vec_def, vec_step);
7706 vec_def = make_ssa_name (vec_dest, new_stmt);
7707 gimple_assign_set_lhs (new_stmt, vec_def);
7708
7709 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7710 new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7711 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt_info;
7712 prev_stmt_vinfo = new_stmt_info;
7713 }
7714 }
7715
7716 if (nested_in_vect_loop)
7717 {
7718 /* Find the loop-closed exit-phi of the induction, and record
7719 the final vector of induction results: */
7720 exit_phi = NULL;
7721 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7722 {
7723 gimple *use_stmt = USE_STMT (use_p);
7724 if (is_gimple_debug (use_stmt))
7725 continue;
7726
7727 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7728 {
7729 exit_phi = use_stmt;
7730 break;
7731 }
7732 }
7733 if (exit_phi)
7734 {
7735 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7736 /* FORNOW. Currently not supporting the case that an inner-loop induction
7737 is not used in the outer-loop (i.e. only outside the outer-loop). */
7738 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7739 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7740
7741 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
7742 if (dump_enabled_p ())
7743 {
7744 dump_printf_loc (MSG_NOTE, vect_location,
7745 "vector of inductions after inner-loop:");
7746 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
7747 }
7748 }
7749 }
7750
7751
7752 if (dump_enabled_p ())
7753 {
7754 dump_printf_loc (MSG_NOTE, vect_location,
7755 "transform induction: created def-use cycle: ");
7756 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
7757 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7758 SSA_NAME_DEF_STMT (vec_def), 0);
7759 }
7760
7761 return true;
7762 }
7763
7764 /* Function vectorizable_live_operation.
7765
7766 STMT_INFO computes a value that is used outside the loop. Check if
7767 it can be supported. */
7768
7769 bool
7770 vectorizable_live_operation (stmt_vec_info stmt_info,
7771 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7772 slp_tree slp_node, int slp_index,
7773 stmt_vec_info *vec_stmt,
7774 stmt_vector_for_cost *)
7775 {
7776 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7777 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7778 imm_use_iterator imm_iter;
7779 tree lhs, lhs_type, bitsize, vec_bitsize;
7780 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7781 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7782 int ncopies;
7783 gimple *use_stmt;
7784 auto_vec<tree> vec_oprnds;
7785 int vec_entry = 0;
7786 poly_uint64 vec_index = 0;
7787
7788 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7789
7790 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7791 return false;
7792
7793 /* FORNOW. CHECKME. */
7794 if (nested_in_vect_loop_p (loop, stmt_info))
7795 return false;
7796
7797 /* If STMT is not relevant and it is a simple assignment and its inputs are
7798 invariant then it can remain in place, unvectorized. The original last
7799 scalar value that it computes will be used. */
7800 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7801 {
7802 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
7803 if (dump_enabled_p ())
7804 dump_printf_loc (MSG_NOTE, vect_location,
7805 "statement is simple and uses invariant. Leaving in "
7806 "place.\n");
7807 return true;
7808 }
7809
7810 if (slp_node)
7811 ncopies = 1;
7812 else
7813 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7814
7815 if (slp_node)
7816 {
7817 gcc_assert (slp_index >= 0);
7818
7819 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7820 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7821
7822 /* Get the last occurrence of the scalar index from the concatenation of
7823 all the slp vectors. Calculate which slp vector it is and the index
7824 within. */
7825 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
7826
7827 /* Calculate which vector contains the result, and which lane of
7828 that vector we need. */
7829 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
7830 {
7831 if (dump_enabled_p ())
7832 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7833 "Cannot determine which vector holds the"
7834 " final result.\n");
7835 return false;
7836 }
7837 }
7838
7839 if (!vec_stmt)
7840 {
7841 /* No transformation required. */
7842 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7843 {
7844 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
7845 OPTIMIZE_FOR_SPEED))
7846 {
7847 if (dump_enabled_p ())
7848 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7849 "can't use a fully-masked loop because "
7850 "the target doesn't support extract last "
7851 "reduction.\n");
7852 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7853 }
7854 else if (slp_node)
7855 {
7856 if (dump_enabled_p ())
7857 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7858 "can't use a fully-masked loop because an "
7859 "SLP statement is live after the loop.\n");
7860 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7861 }
7862 else if (ncopies > 1)
7863 {
7864 if (dump_enabled_p ())
7865 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7866 "can't use a fully-masked loop because"
7867 " ncopies is greater than 1.\n");
7868 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7869 }
7870 else
7871 {
7872 gcc_assert (ncopies == 1 && !slp_node);
7873 vect_record_loop_mask (loop_vinfo,
7874 &LOOP_VINFO_MASKS (loop_vinfo),
7875 1, vectype);
7876 }
7877 }
7878 return true;
7879 }
7880
7881 /* Use the lhs of the original scalar statement. */
7882 gimple *stmt = vect_orig_stmt (stmt_info)->stmt;
7883
7884 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7885 : gimple_get_lhs (stmt);
7886 lhs_type = TREE_TYPE (lhs);
7887
7888 bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
7889 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
7890 : TYPE_SIZE (TREE_TYPE (vectype)));
7891 vec_bitsize = TYPE_SIZE (vectype);
7892
7893 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7894 tree vec_lhs, bitstart;
7895 if (slp_node)
7896 {
7897 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7898
7899 /* Get the correct slp vectorized stmt. */
7900 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]->stmt;
7901 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
7902 vec_lhs = gimple_phi_result (phi);
7903 else
7904 vec_lhs = gimple_get_lhs (vec_stmt);
7905
7906 /* Get entry to use. */
7907 bitstart = bitsize_int (vec_index);
7908 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7909 }
7910 else
7911 {
7912 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7913 vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt);
7914 gcc_checking_assert (ncopies == 1
7915 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7916
7917 /* For multiple copies, get the last copy. */
7918 for (int i = 1; i < ncopies; ++i)
7919 vec_lhs = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_lhs);
7920
7921 /* Get the last lane in the vector. */
7922 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7923 }
7924
7925 gimple_seq stmts = NULL;
7926 tree new_tree;
7927 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
7928 {
7929 /* Emit:
7930
7931 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7932
7933 where VEC_LHS is the vectorized live-out result and MASK is
7934 the loop mask for the final iteration. */
7935 gcc_assert (ncopies == 1 && !slp_node);
7936 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
7937 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
7938 1, vectype, 0);
7939 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST,
7940 scalar_type, mask, vec_lhs);
7941
7942 /* Convert the extracted vector element to the required scalar type. */
7943 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
7944 }
7945 else
7946 {
7947 tree bftype = TREE_TYPE (vectype);
7948 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7949 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7950 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
7951 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
7952 &stmts, true, NULL_TREE);
7953 }
7954
7955 if (stmts)
7956 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
7957
7958 /* Replace use of lhs with newly computed result. If the use stmt is a
7959 single arg PHI, just replace all uses of PHI result. It's necessary
7960 because lcssa PHI defining lhs may be before newly inserted stmt. */
7961 use_operand_p use_p;
7962 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
7963 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
7964 && !is_gimple_debug (use_stmt))
7965 {
7966 if (gimple_code (use_stmt) == GIMPLE_PHI
7967 && gimple_phi_num_args (use_stmt) == 1)
7968 {
7969 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
7970 }
7971 else
7972 {
7973 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
7974 SET_USE (use_p, new_tree);
7975 }
7976 update_stmt (use_stmt);
7977 }
7978
7979 return true;
7980 }
7981
7982 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
7983
7984 static void
7985 vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
7986 {
7987 ssa_op_iter op_iter;
7988 imm_use_iterator imm_iter;
7989 def_operand_p def_p;
7990 gimple *ustmt;
7991
7992 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
7993 {
7994 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
7995 {
7996 basic_block bb;
7997
7998 if (!is_gimple_debug (ustmt))
7999 continue;
8000
8001 bb = gimple_bb (ustmt);
8002
8003 if (!flow_bb_inside_loop_p (loop, bb))
8004 {
8005 if (gimple_debug_bind_p (ustmt))
8006 {
8007 if (dump_enabled_p ())
8008 dump_printf_loc (MSG_NOTE, vect_location,
8009 "killing debug use\n");
8010
8011 gimple_debug_bind_reset_value (ustmt);
8012 update_stmt (ustmt);
8013 }
8014 else
8015 gcc_unreachable ();
8016 }
8017 }
8018 }
8019 }
8020
8021 /* Given loop represented by LOOP_VINFO, return true if computation of
8022 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8023 otherwise. */
8024
8025 static bool
8026 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8027 {
8028 /* Constant case. */
8029 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8030 {
8031 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8032 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8033
8034 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8035 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8036 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8037 return true;
8038 }
8039
8040 widest_int max;
8041 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8042 /* Check the upper bound of loop niters. */
8043 if (get_max_loop_iterations (loop, &max))
8044 {
8045 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8046 signop sgn = TYPE_SIGN (type);
8047 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8048 if (max < type_max)
8049 return true;
8050 }
8051 return false;
8052 }
8053
8054 /* Return a mask type with half the number of elements as TYPE. */
8055
8056 tree
8057 vect_halve_mask_nunits (tree type)
8058 {
8059 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
8060 return build_truth_vector_type (nunits, current_vector_size);
8061 }
8062
8063 /* Return a mask type with twice as many elements as TYPE. */
8064
8065 tree
8066 vect_double_mask_nunits (tree type)
8067 {
8068 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
8069 return build_truth_vector_type (nunits, current_vector_size);
8070 }
8071
8072 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8073 contain a sequence of NVECTORS masks that each control a vector of type
8074 VECTYPE. */
8075
8076 void
8077 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8078 unsigned int nvectors, tree vectype)
8079 {
8080 gcc_assert (nvectors != 0);
8081 if (masks->length () < nvectors)
8082 masks->safe_grow_cleared (nvectors);
8083 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8084 /* The number of scalars per iteration and the number of vectors are
8085 both compile-time constants. */
8086 unsigned int nscalars_per_iter
8087 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8088 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8089 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8090 {
8091 rgm->max_nscalars_per_iter = nscalars_per_iter;
8092 rgm->mask_type = build_same_sized_truth_vector_type (vectype);
8093 }
8094 }
8095
8096 /* Given a complete set of masks MASKS, extract mask number INDEX
8097 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8098 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8099
8100 See the comment above vec_loop_masks for more details about the mask
8101 arrangement. */
8102
8103 tree
8104 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8105 unsigned int nvectors, tree vectype, unsigned int index)
8106 {
8107 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8108 tree mask_type = rgm->mask_type;
8109
8110 /* Populate the rgroup's mask array, if this is the first time we've
8111 used it. */
8112 if (rgm->masks.is_empty ())
8113 {
8114 rgm->masks.safe_grow_cleared (nvectors);
8115 for (unsigned int i = 0; i < nvectors; ++i)
8116 {
8117 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8118 /* Provide a dummy definition until the real one is available. */
8119 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8120 rgm->masks[i] = mask;
8121 }
8122 }
8123
8124 tree mask = rgm->masks[index];
8125 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8126 TYPE_VECTOR_SUBPARTS (vectype)))
8127 {
8128 /* A loop mask for data type X can be reused for data type Y
8129 if X has N times more elements than Y and if Y's elements
8130 are N times bigger than X's. In this case each sequence
8131 of N elements in the loop mask will be all-zero or all-one.
8132 We can then view-convert the mask so that each sequence of
8133 N elements is replaced by a single element. */
8134 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8135 TYPE_VECTOR_SUBPARTS (vectype)));
8136 gimple_seq seq = NULL;
8137 mask_type = build_same_sized_truth_vector_type (vectype);
8138 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8139 if (seq)
8140 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8141 }
8142 return mask;
8143 }
8144
8145 /* Scale profiling counters by estimation for LOOP which is vectorized
8146 by factor VF. */
8147
8148 static void
8149 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
8150 {
8151 edge preheader = loop_preheader_edge (loop);
8152 /* Reduce loop iterations by the vectorization factor. */
8153 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8154 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8155
8156 if (freq_h.nonzero_p ())
8157 {
8158 profile_probability p;
8159
8160 /* Avoid dropping loop body profile counter to 0 because of zero count
8161 in loop's preheader. */
8162 if (!(freq_e == profile_count::zero ()))
8163 freq_e = freq_e.force_nonzero ();
8164 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8165 scale_loop_frequencies (loop, p);
8166 }
8167
8168 edge exit_e = single_exit (loop);
8169 exit_e->probability = profile_probability::always ()
8170 .apply_scale (1, new_est_niter + 1);
8171
8172 edge exit_l = single_pred_edge (loop->latch);
8173 profile_probability prob = exit_l->probability;
8174 exit_l->probability = exit_e->probability.invert ();
8175 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8176 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8177 }
8178
8179 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
8180 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its
8181 stmt_vec_info. */
8182
8183 static void
8184 vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
8185 gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
8186 {
8187 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8188 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8189
8190 if (dump_enabled_p ())
8191 {
8192 dump_printf_loc (MSG_NOTE, vect_location,
8193 "------>vectorizing statement: ");
8194 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
8195 }
8196
8197 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8198 vect_loop_kill_debug_uses (loop, stmt_info);
8199
8200 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8201 && !STMT_VINFO_LIVE_P (stmt_info))
8202 return;
8203
8204 if (STMT_VINFO_VECTYPE (stmt_info))
8205 {
8206 poly_uint64 nunits
8207 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8208 if (!STMT_SLP_TYPE (stmt_info)
8209 && maybe_ne (nunits, vf)
8210 && dump_enabled_p ())
8211 /* For SLP VF is set according to unrolling factor, and not
8212 to vector size, hence for SLP this print is not valid. */
8213 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8214 }
8215
8216 /* Pure SLP statements have already been vectorized. We still need
8217 to apply loop vectorization to hybrid SLP statements. */
8218 if (PURE_SLP_STMT (stmt_info))
8219 return;
8220
8221 if (dump_enabled_p ())
8222 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8223
8224 if (vect_transform_stmt (stmt_info, gsi, NULL, NULL))
8225 *seen_store = stmt_info;
8226 }
8227
8228 /* Function vect_transform_loop.
8229
8230 The analysis phase has determined that the loop is vectorizable.
8231 Vectorize the loop - created vectorized stmts to replace the scalar
8232 stmts in the loop, and update the loop exit condition.
8233 Returns scalar epilogue loop if any. */
8234
8235 struct loop *
8236 vect_transform_loop (loop_vec_info loop_vinfo)
8237 {
8238 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8239 struct loop *epilogue = NULL;
8240 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8241 int nbbs = loop->num_nodes;
8242 int i;
8243 tree niters_vector = NULL_TREE;
8244 tree step_vector = NULL_TREE;
8245 tree niters_vector_mult_vf = NULL_TREE;
8246 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8247 unsigned int lowest_vf = constant_lower_bound (vf);
8248 gimple *stmt;
8249 bool check_profitability = false;
8250 unsigned int th;
8251
8252 DUMP_VECT_SCOPE ("vec_transform_loop");
8253
8254 loop_vinfo->shared->check_datarefs ();
8255
8256 /* Use the more conservative vectorization threshold. If the number
8257 of iterations is constant assume the cost check has been performed
8258 by our caller. If the threshold makes all loops profitable that
8259 run at least the (estimated) vectorization factor number of times
8260 checking is pointless, too. */
8261 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8262 if (th >= vect_vf_for_cost (loop_vinfo)
8263 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8264 {
8265 if (dump_enabled_p ())
8266 dump_printf_loc (MSG_NOTE, vect_location,
8267 "Profitability threshold is %d loop iterations.\n",
8268 th);
8269 check_profitability = true;
8270 }
8271
8272 /* Make sure there exists a single-predecessor exit bb. Do this before
8273 versioning. */
8274 edge e = single_exit (loop);
8275 if (! single_pred_p (e->dest))
8276 {
8277 split_loop_exit_edge (e);
8278 if (dump_enabled_p ())
8279 dump_printf (MSG_NOTE, "split exit edge\n");
8280 }
8281
8282 /* Version the loop first, if required, so the profitability check
8283 comes first. */
8284
8285 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8286 {
8287 poly_uint64 versioning_threshold
8288 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
8289 if (check_profitability
8290 && ordered_p (poly_uint64 (th), versioning_threshold))
8291 {
8292 versioning_threshold = ordered_max (poly_uint64 (th),
8293 versioning_threshold);
8294 check_profitability = false;
8295 }
8296 vect_loop_versioning (loop_vinfo, th, check_profitability,
8297 versioning_threshold);
8298 check_profitability = false;
8299 }
8300
8301 /* Make sure there exists a single-predecessor exit bb also on the
8302 scalar loop copy. Do this after versioning but before peeling
8303 so CFG structure is fine for both scalar and if-converted loop
8304 to make slpeel_duplicate_current_defs_from_edges face matched
8305 loop closed PHI nodes on the exit. */
8306 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8307 {
8308 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8309 if (! single_pred_p (e->dest))
8310 {
8311 split_loop_exit_edge (e);
8312 if (dump_enabled_p ())
8313 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8314 }
8315 }
8316
8317 tree niters = vect_build_loop_niters (loop_vinfo);
8318 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8319 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8320 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8321 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8322 &step_vector, &niters_vector_mult_vf, th,
8323 check_profitability, niters_no_overflow);
8324
8325 if (niters_vector == NULL_TREE)
8326 {
8327 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8328 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8329 && known_eq (lowest_vf, vf))
8330 {
8331 niters_vector
8332 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8333 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8334 step_vector = build_one_cst (TREE_TYPE (niters));
8335 }
8336 else
8337 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8338 &step_vector, niters_no_overflow);
8339 }
8340
8341 /* 1) Make sure the loop header has exactly two entries
8342 2) Make sure we have a preheader basic block. */
8343
8344 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8345
8346 split_edge (loop_preheader_edge (loop));
8347
8348 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8349 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8350 /* This will deal with any possible peeling. */
8351 vect_prepare_for_masked_peels (loop_vinfo);
8352
8353 /* Schedule the SLP instances first, then handle loop vectorization
8354 below. */
8355 if (!loop_vinfo->slp_instances.is_empty ())
8356 {
8357 DUMP_VECT_SCOPE ("scheduling SLP instances");
8358 vect_schedule_slp (loop_vinfo);
8359 }
8360
8361 /* FORNOW: the vectorizer supports only loops which body consist
8362 of one basic block (header + empty latch). When the vectorizer will
8363 support more involved loop forms, the order by which the BBs are
8364 traversed need to be reconsidered. */
8365
8366 for (i = 0; i < nbbs; i++)
8367 {
8368 basic_block bb = bbs[i];
8369 stmt_vec_info stmt_info;
8370
8371 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8372 gsi_next (&si))
8373 {
8374 gphi *phi = si.phi ();
8375 if (dump_enabled_p ())
8376 {
8377 dump_printf_loc (MSG_NOTE, vect_location,
8378 "------>vectorizing phi: ");
8379 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
8380 }
8381 stmt_info = loop_vinfo->lookup_stmt (phi);
8382 if (!stmt_info)
8383 continue;
8384
8385 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8386 vect_loop_kill_debug_uses (loop, stmt_info);
8387
8388 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8389 && !STMT_VINFO_LIVE_P (stmt_info))
8390 continue;
8391
8392 if (STMT_VINFO_VECTYPE (stmt_info)
8393 && (maybe_ne
8394 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8395 && dump_enabled_p ())
8396 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8397
8398 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8399 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8400 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
8401 && ! PURE_SLP_STMT (stmt_info))
8402 {
8403 if (dump_enabled_p ())
8404 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8405 vect_transform_stmt (stmt_info, NULL, NULL, NULL);
8406 }
8407 }
8408
8409 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8410 !gsi_end_p (si);)
8411 {
8412 stmt = gsi_stmt (si);
8413 /* During vectorization remove existing clobber stmts. */
8414 if (gimple_clobber_p (stmt))
8415 {
8416 unlink_stmt_vdef (stmt);
8417 gsi_remove (&si, true);
8418 release_defs (stmt);
8419 }
8420 else
8421 {
8422 stmt_info = loop_vinfo->lookup_stmt (stmt);
8423
8424 /* vector stmts created in the outer-loop during vectorization of
8425 stmts in an inner-loop may not have a stmt_info, and do not
8426 need to be vectorized. */
8427 stmt_vec_info seen_store = NULL;
8428 if (stmt_info)
8429 {
8430 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8431 {
8432 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
8433 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
8434 !gsi_end_p (subsi); gsi_next (&subsi))
8435 {
8436 stmt_vec_info pat_stmt_info
8437 = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
8438 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
8439 &si, &seen_store);
8440 }
8441 stmt_vec_info pat_stmt_info
8442 = STMT_VINFO_RELATED_STMT (stmt_info);
8443 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info, &si,
8444 &seen_store);
8445 }
8446 vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
8447 &seen_store);
8448 }
8449 gsi_next (&si);
8450 if (seen_store)
8451 {
8452 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
8453 /* Interleaving. If IS_STORE is TRUE, the
8454 vectorization of the interleaving chain was
8455 completed - free all the stores in the chain. */
8456 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
8457 else
8458 /* Free the attached stmt_vec_info and remove the stmt. */
8459 loop_vinfo->remove_stmt (stmt_info);
8460 }
8461 }
8462 }
8463
8464 /* Stub out scalar statements that must not survive vectorization.
8465 Doing this here helps with grouped statements, or statements that
8466 are involved in patterns. */
8467 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
8468 !gsi_end_p (gsi); gsi_next (&gsi))
8469 {
8470 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
8471 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
8472 {
8473 tree lhs = gimple_get_lhs (call);
8474 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8475 {
8476 tree zero = build_zero_cst (TREE_TYPE (lhs));
8477 gimple *new_stmt = gimple_build_assign (lhs, zero);
8478 gsi_replace (&gsi, new_stmt, true);
8479 }
8480 }
8481 }
8482 } /* BBs in loop */
8483
8484 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8485 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8486 if (integer_onep (step_vector))
8487 niters_no_overflow = true;
8488 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
8489 niters_vector_mult_vf, !niters_no_overflow);
8490
8491 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
8492 scale_profile_for_vect_loop (loop, assumed_vf);
8493
8494 /* True if the final iteration might not handle a full vector's
8495 worth of scalar iterations. */
8496 bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
8497 /* The minimum number of iterations performed by the epilogue. This
8498 is 1 when peeling for gaps because we always need a final scalar
8499 iteration. */
8500 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
8501 /* +1 to convert latch counts to loop iteration counts,
8502 -min_epilogue_iters to remove iterations that cannot be performed
8503 by the vector code. */
8504 int bias_for_lowest = 1 - min_epilogue_iters;
8505 int bias_for_assumed = bias_for_lowest;
8506 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
8507 if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8508 {
8509 /* When the amount of peeling is known at compile time, the first
8510 iteration will have exactly alignment_npeels active elements.
8511 In the worst case it will have at least one. */
8512 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
8513 bias_for_lowest += lowest_vf - min_first_active;
8514 bias_for_assumed += assumed_vf - min_first_active;
8515 }
8516 /* In these calculations the "- 1" converts loop iteration counts
8517 back to latch counts. */
8518 if (loop->any_upper_bound)
8519 loop->nb_iterations_upper_bound
8520 = (final_iter_may_be_partial
8521 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
8522 lowest_vf) - 1
8523 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
8524 lowest_vf) - 1);
8525 if (loop->any_likely_upper_bound)
8526 loop->nb_iterations_likely_upper_bound
8527 = (final_iter_may_be_partial
8528 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
8529 + bias_for_lowest, lowest_vf) - 1
8530 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
8531 + bias_for_lowest, lowest_vf) - 1);
8532 if (loop->any_estimate)
8533 loop->nb_iterations_estimate
8534 = (final_iter_may_be_partial
8535 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
8536 assumed_vf) - 1
8537 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
8538 assumed_vf) - 1);
8539
8540 if (dump_enabled_p ())
8541 {
8542 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8543 {
8544 dump_printf_loc (MSG_NOTE, vect_location,
8545 "LOOP VECTORIZED\n");
8546 if (loop->inner)
8547 dump_printf_loc (MSG_NOTE, vect_location,
8548 "OUTER LOOP VECTORIZED\n");
8549 dump_printf (MSG_NOTE, "\n");
8550 }
8551 else
8552 {
8553 dump_printf_loc (MSG_NOTE, vect_location,
8554 "LOOP EPILOGUE VECTORIZED (VS=");
8555 dump_dec (MSG_NOTE, current_vector_size);
8556 dump_printf (MSG_NOTE, ")\n");
8557 }
8558 }
8559
8560 /* Free SLP instances here because otherwise stmt reference counting
8561 won't work. */
8562 slp_instance instance;
8563 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
8564 vect_free_slp_instance (instance, true);
8565 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
8566 /* Clear-up safelen field since its value is invalid after vectorization
8567 since vectorized loop can have loop-carried dependencies. */
8568 loop->safelen = 0;
8569
8570 /* Don't vectorize epilogue for epilogue. */
8571 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8572 epilogue = NULL;
8573
8574 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
8575 epilogue = NULL;
8576
8577 if (epilogue)
8578 {
8579 auto_vector_sizes vector_sizes;
8580 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
8581 unsigned int next_size = 0;
8582
8583 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8584 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
8585 && known_eq (vf, lowest_vf))
8586 {
8587 unsigned int eiters
8588 = (LOOP_VINFO_INT_NITERS (loop_vinfo)
8589 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
8590 eiters = eiters % lowest_vf;
8591 epilogue->nb_iterations_upper_bound = eiters - 1;
8592
8593 unsigned int ratio;
8594 while (next_size < vector_sizes.length ()
8595 && !(constant_multiple_p (current_vector_size,
8596 vector_sizes[next_size], &ratio)
8597 && eiters >= lowest_vf / ratio))
8598 next_size += 1;
8599 }
8600 else
8601 while (next_size < vector_sizes.length ()
8602 && maybe_lt (current_vector_size, vector_sizes[next_size]))
8603 next_size += 1;
8604
8605 if (next_size == vector_sizes.length ())
8606 epilogue = NULL;
8607 }
8608
8609 if (epilogue)
8610 {
8611 epilogue->force_vectorize = loop->force_vectorize;
8612 epilogue->safelen = loop->safelen;
8613 epilogue->dont_vectorize = false;
8614
8615 /* We may need to if-convert epilogue to vectorize it. */
8616 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8617 tree_if_conversion (epilogue);
8618 }
8619
8620 return epilogue;
8621 }
8622
8623 /* The code below is trying to perform simple optimization - revert
8624 if-conversion for masked stores, i.e. if the mask of a store is zero
8625 do not perform it and all stored value producers also if possible.
8626 For example,
8627 for (i=0; i<n; i++)
8628 if (c[i])
8629 {
8630 p1[i] += 1;
8631 p2[i] = p3[i] +2;
8632 }
8633 this transformation will produce the following semi-hammock:
8634
8635 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8636 {
8637 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8638 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8639 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8640 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8641 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8642 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8643 }
8644 */
8645
8646 void
8647 optimize_mask_stores (struct loop *loop)
8648 {
8649 basic_block *bbs = get_loop_body (loop);
8650 unsigned nbbs = loop->num_nodes;
8651 unsigned i;
8652 basic_block bb;
8653 struct loop *bb_loop;
8654 gimple_stmt_iterator gsi;
8655 gimple *stmt;
8656 auto_vec<gimple *> worklist;
8657
8658 vect_location = find_loop_location (loop);
8659 /* Pick up all masked stores in loop if any. */
8660 for (i = 0; i < nbbs; i++)
8661 {
8662 bb = bbs[i];
8663 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
8664 gsi_next (&gsi))
8665 {
8666 stmt = gsi_stmt (gsi);
8667 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8668 worklist.safe_push (stmt);
8669 }
8670 }
8671
8672 free (bbs);
8673 if (worklist.is_empty ())
8674 return;
8675
8676 /* Loop has masked stores. */
8677 while (!worklist.is_empty ())
8678 {
8679 gimple *last, *last_store;
8680 edge e, efalse;
8681 tree mask;
8682 basic_block store_bb, join_bb;
8683 gimple_stmt_iterator gsi_to;
8684 tree vdef, new_vdef;
8685 gphi *phi;
8686 tree vectype;
8687 tree zero;
8688
8689 last = worklist.pop ();
8690 mask = gimple_call_arg (last, 2);
8691 bb = gimple_bb (last);
8692 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8693 the same loop as if_bb. It could be different to LOOP when two
8694 level loop-nest is vectorized and mask_store belongs to the inner
8695 one. */
8696 e = split_block (bb, last);
8697 bb_loop = bb->loop_father;
8698 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
8699 join_bb = e->dest;
8700 store_bb = create_empty_bb (bb);
8701 add_bb_to_loop (store_bb, bb_loop);
8702 e->flags = EDGE_TRUE_VALUE;
8703 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
8704 /* Put STORE_BB to likely part. */
8705 efalse->probability = profile_probability::unlikely ();
8706 store_bb->count = efalse->count ();
8707 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
8708 if (dom_info_available_p (CDI_DOMINATORS))
8709 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
8710 if (dump_enabled_p ())
8711 dump_printf_loc (MSG_NOTE, vect_location,
8712 "Create new block %d to sink mask stores.",
8713 store_bb->index);
8714 /* Create vector comparison with boolean result. */
8715 vectype = TREE_TYPE (mask);
8716 zero = build_zero_cst (vectype);
8717 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
8718 gsi = gsi_last_bb (bb);
8719 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8720 /* Create new PHI node for vdef of the last masked store:
8721 .MEM_2 = VDEF <.MEM_1>
8722 will be converted to
8723 .MEM.3 = VDEF <.MEM_1>
8724 and new PHI node will be created in join bb
8725 .MEM_2 = PHI <.MEM_1, .MEM_3>
8726 */
8727 vdef = gimple_vdef (last);
8728 new_vdef = make_ssa_name (gimple_vop (cfun), last);
8729 gimple_set_vdef (last, new_vdef);
8730 phi = create_phi_node (vdef, join_bb);
8731 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
8732
8733 /* Put all masked stores with the same mask to STORE_BB if possible. */
8734 while (true)
8735 {
8736 gimple_stmt_iterator gsi_from;
8737 gimple *stmt1 = NULL;
8738
8739 /* Move masked store to STORE_BB. */
8740 last_store = last;
8741 gsi = gsi_for_stmt (last);
8742 gsi_from = gsi;
8743 /* Shift GSI to the previous stmt for further traversal. */
8744 gsi_prev (&gsi);
8745 gsi_to = gsi_start_bb (store_bb);
8746 gsi_move_before (&gsi_from, &gsi_to);
8747 /* Setup GSI_TO to the non-empty block start. */
8748 gsi_to = gsi_start_bb (store_bb);
8749 if (dump_enabled_p ())
8750 {
8751 dump_printf_loc (MSG_NOTE, vect_location,
8752 "Move stmt to created bb\n");
8753 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
8754 }
8755 /* Move all stored value producers if possible. */
8756 while (!gsi_end_p (gsi))
8757 {
8758 tree lhs;
8759 imm_use_iterator imm_iter;
8760 use_operand_p use_p;
8761 bool res;
8762
8763 /* Skip debug statements. */
8764 if (is_gimple_debug (gsi_stmt (gsi)))
8765 {
8766 gsi_prev (&gsi);
8767 continue;
8768 }
8769 stmt1 = gsi_stmt (gsi);
8770 /* Do not consider statements writing to memory or having
8771 volatile operand. */
8772 if (gimple_vdef (stmt1)
8773 || gimple_has_volatile_ops (stmt1))
8774 break;
8775 gsi_from = gsi;
8776 gsi_prev (&gsi);
8777 lhs = gimple_get_lhs (stmt1);
8778 if (!lhs)
8779 break;
8780
8781 /* LHS of vectorized stmt must be SSA_NAME. */
8782 if (TREE_CODE (lhs) != SSA_NAME)
8783 break;
8784
8785 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8786 {
8787 /* Remove dead scalar statement. */
8788 if (has_zero_uses (lhs))
8789 {
8790 gsi_remove (&gsi_from, true);
8791 continue;
8792 }
8793 }
8794
8795 /* Check that LHS does not have uses outside of STORE_BB. */
8796 res = true;
8797 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
8798 {
8799 gimple *use_stmt;
8800 use_stmt = USE_STMT (use_p);
8801 if (is_gimple_debug (use_stmt))
8802 continue;
8803 if (gimple_bb (use_stmt) != store_bb)
8804 {
8805 res = false;
8806 break;
8807 }
8808 }
8809 if (!res)
8810 break;
8811
8812 if (gimple_vuse (stmt1)
8813 && gimple_vuse (stmt1) != gimple_vuse (last_store))
8814 break;
8815
8816 /* Can move STMT1 to STORE_BB. */
8817 if (dump_enabled_p ())
8818 {
8819 dump_printf_loc (MSG_NOTE, vect_location,
8820 "Move stmt to created bb\n");
8821 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
8822 }
8823 gsi_move_before (&gsi_from, &gsi_to);
8824 /* Shift GSI_TO for further insertion. */
8825 gsi_prev (&gsi_to);
8826 }
8827 /* Put other masked stores with the same mask to STORE_BB. */
8828 if (worklist.is_empty ()
8829 || gimple_call_arg (worklist.last (), 2) != mask
8830 || worklist.last () != stmt1)
8831 break;
8832 last = worklist.pop ();
8833 }
8834 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);
8835 }
8836 }