]>
Commit | Line | Data |
---|---|---|
fb85abff | 1 | /* Loop Vectorization |
3aea1f79 | 2 | Copyright (C) 2003-2014 Free Software Foundation, Inc. |
48e1416a | 3 | Contributed by Dorit Naishlos <dorit@il.ibm.com> and |
fb85abff | 4 | Ira Rosen <irar@il.ibm.com> |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 3, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
7bd765d4 | 25 | #include "dumpfile.h" |
fb85abff | 26 | #include "tm.h" |
fb85abff | 27 | #include "tree.h" |
9ed99284 | 28 | #include "stor-layout.h" |
94ea8568 | 29 | #include "predict.h" |
30 | #include "vec.h" | |
31 | #include "hashtab.h" | |
32 | #include "hash-set.h" | |
33 | #include "machmode.h" | |
34 | #include "hard-reg-set.h" | |
35 | #include "input.h" | |
36 | #include "function.h" | |
37 | #include "dominance.h" | |
38 | #include "cfg.h" | |
39 | #include "cfganal.h" | |
fb85abff | 40 | #include "basic-block.h" |
ce084dfc | 41 | #include "gimple-pretty-print.h" |
bc61cadb | 42 | #include "tree-ssa-alias.h" |
43 | #include "internal-fn.h" | |
44 | #include "gimple-expr.h" | |
45 | #include "is-a.h" | |
073c1fd5 | 46 | #include "gimple.h" |
a8783bee | 47 | #include "gimplify.h" |
dcf1a1ec | 48 | #include "gimple-iterator.h" |
e795d6e1 | 49 | #include "gimplify-me.h" |
073c1fd5 | 50 | #include "gimple-ssa.h" |
51 | #include "tree-phinodes.h" | |
52 | #include "ssa-iterators.h" | |
9ed99284 | 53 | #include "stringpool.h" |
073c1fd5 | 54 | #include "tree-ssanames.h" |
05d9c18a | 55 | #include "tree-ssa-loop-ivopts.h" |
56 | #include "tree-ssa-loop-manip.h" | |
57 | #include "tree-ssa-loop-niter.h" | |
b9ed1410 | 58 | #include "tree-pass.h" |
fb85abff | 59 | #include "cfgloop.h" |
fb85abff | 60 | #include "expr.h" |
61 | #include "recog.h" | |
34517c64 | 62 | #include "insn-codes.h" |
fb85abff | 63 | #include "optabs.h" |
64 | #include "params.h" | |
0b205f4c | 65 | #include "diagnostic-core.h" |
fb85abff | 66 | #include "tree-chrec.h" |
67 | #include "tree-scalar-evolution.h" | |
68 | #include "tree-vectorizer.h" | |
559093aa | 69 | #include "target.h" |
fb85abff | 70 | |
71 | /* Loop Vectorization Pass. | |
72 | ||
48e1416a | 73 | This pass tries to vectorize loops. |
fb85abff | 74 | |
75 | For example, the vectorizer transforms the following simple loop: | |
76 | ||
77 | short a[N]; short b[N]; short c[N]; int i; | |
78 | ||
79 | for (i=0; i<N; i++){ | |
80 | a[i] = b[i] + c[i]; | |
81 | } | |
82 | ||
83 | as if it was manually vectorized by rewriting the source code into: | |
84 | ||
85 | typedef int __attribute__((mode(V8HI))) v8hi; | |
86 | short a[N]; short b[N]; short c[N]; int i; | |
87 | v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c; | |
88 | v8hi va, vb, vc; | |
89 | ||
90 | for (i=0; i<N/8; i++){ | |
91 | vb = pb[i]; | |
92 | vc = pc[i]; | |
93 | va = vb + vc; | |
94 | pa[i] = va; | |
95 | } | |
96 | ||
97 | The main entry to this pass is vectorize_loops(), in which | |
98 | the vectorizer applies a set of analyses on a given set of loops, | |
99 | followed by the actual vectorization transformation for the loops that | |
100 | had successfully passed the analysis phase. | |
101 | Throughout this pass we make a distinction between two types of | |
102 | data: scalars (which are represented by SSA_NAMES), and memory references | |
282bf14c | 103 | ("data-refs"). These two types of data require different handling both |
fb85abff | 104 | during analysis and transformation. The types of data-refs that the |
105 | vectorizer currently supports are ARRAY_REFS which base is an array DECL | |
106 | (not a pointer), and INDIRECT_REFS through pointers; both array and pointer | |
107 | accesses are required to have a simple (consecutive) access pattern. | |
108 | ||
109 | Analysis phase: | |
110 | =============== | |
111 | The driver for the analysis phase is vect_analyze_loop(). | |
112 | It applies a set of analyses, some of which rely on the scalar evolution | |
113 | analyzer (scev) developed by Sebastian Pop. | |
114 | ||
115 | During the analysis phase the vectorizer records some information | |
116 | per stmt in a "stmt_vec_info" struct which is attached to each stmt in the | |
117 | loop, as well as general information about the loop as a whole, which is | |
118 | recorded in a "loop_vec_info" struct attached to each loop. | |
119 | ||
120 | Transformation phase: | |
121 | ===================== | |
122 | The loop transformation phase scans all the stmts in the loop, and | |
123 | creates a vector stmt (or a sequence of stmts) for each scalar stmt S in | |
282bf14c | 124 | the loop that needs to be vectorized. It inserts the vector code sequence |
fb85abff | 125 | just before the scalar stmt S, and records a pointer to the vector code |
126 | in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct | |
282bf14c | 127 | attached to S). This pointer will be used for the vectorization of following |
fb85abff | 128 | stmts which use the def of stmt S. Stmt S is removed if it writes to memory; |
129 | otherwise, we rely on dead code elimination for removing it. | |
130 | ||
131 | For example, say stmt S1 was vectorized into stmt VS1: | |
132 | ||
133 | VS1: vb = px[i]; | |
134 | S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 | |
135 | S2: a = b; | |
136 | ||
137 | To vectorize stmt S2, the vectorizer first finds the stmt that defines | |
138 | the operand 'b' (S1), and gets the relevant vector def 'vb' from the | |
282bf14c | 139 | vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The |
fb85abff | 140 | resulting sequence would be: |
141 | ||
142 | VS1: vb = px[i]; | |
143 | S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 | |
144 | VS2: va = vb; | |
145 | S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2 | |
146 | ||
147 | Operands that are not SSA_NAMEs, are data-refs that appear in | |
148 | load/store operations (like 'x[i]' in S1), and are handled differently. | |
149 | ||
150 | Target modeling: | |
151 | ================= | |
152 | Currently the only target specific information that is used is the | |
2101edf2 | 153 | size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". |
154 | Targets that can support different sizes of vectors, for now will need | |
282bf14c | 155 | to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More |
2101edf2 | 156 | flexibility will be added in the future. |
fb85abff | 157 | |
158 | Since we only vectorize operations which vector form can be | |
159 | expressed using existing tree codes, to verify that an operation is | |
160 | supported, the vectorizer checks the relevant optab at the relevant | |
282bf14c | 161 | machine_mode (e.g, optab_handler (add_optab, V8HImode)). If |
fb85abff | 162 | the value found is CODE_FOR_nothing, then there's no target support, and |
163 | we can't vectorize the stmt. | |
164 | ||
165 | For additional information on this project see: | |
166 | http://gcc.gnu.org/projects/tree-ssa/vectorization.html | |
167 | */ | |
168 | ||
5938768b | 169 | static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *); |
170 | ||
fb85abff | 171 | /* Function vect_determine_vectorization_factor |
172 | ||
282bf14c | 173 | Determine the vectorization factor (VF). VF is the number of data elements |
fb85abff | 174 | that are operated upon in parallel in a single iteration of the vectorized |
282bf14c | 175 | loop. For example, when vectorizing a loop that operates on 4byte elements, |
fb85abff | 176 | on a target with vector size (VS) 16byte, the VF is set to 4, since 4 |
177 | elements can fit in a single vector register. | |
178 | ||
179 | We currently support vectorization of loops in which all types operated upon | |
282bf14c | 180 | are of the same size. Therefore this function currently sets VF according to |
fb85abff | 181 | the size of the types operated upon, and fails if there are multiple sizes |
182 | in the loop. | |
183 | ||
184 | VF is also the factor by which the loop iterations are strip-mined, e.g.: | |
185 | original loop: | |
186 | for (i=0; i<N; i++){ | |
187 | a[i] = b[i] + c[i]; | |
188 | } | |
189 | ||
190 | vectorized loop: | |
191 | for (i=0; i<N; i+=VF){ | |
192 | a[i:VF] = b[i:VF] + c[i:VF]; | |
193 | } | |
194 | */ | |
195 | ||
196 | static bool | |
197 | vect_determine_vectorization_factor (loop_vec_info loop_vinfo) | |
198 | { | |
199 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
200 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); | |
201 | int nbbs = loop->num_nodes; | |
202 | gimple_stmt_iterator si; | |
203 | unsigned int vectorization_factor = 0; | |
204 | tree scalar_type; | |
205 | gimple phi; | |
206 | tree vectype; | |
207 | unsigned int nunits; | |
208 | stmt_vec_info stmt_info; | |
209 | int i; | |
210 | HOST_WIDE_INT dummy; | |
18937389 | 211 | gimple stmt, pattern_stmt = NULL; |
212 | gimple_seq pattern_def_seq = NULL; | |
e3a19533 | 213 | gimple_stmt_iterator pattern_def_si = gsi_none (); |
18937389 | 214 | bool analyze_pattern_stmt = false; |
fb85abff | 215 | |
6d8fb6cf | 216 | if (dump_enabled_p ()) |
7bd765d4 | 217 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 218 | "=== vect_determine_vectorization_factor ===\n"); |
fb85abff | 219 | |
220 | for (i = 0; i < nbbs; i++) | |
221 | { | |
222 | basic_block bb = bbs[i]; | |
223 | ||
224 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
225 | { | |
226 | phi = gsi_stmt (si); | |
227 | stmt_info = vinfo_for_stmt (phi); | |
6d8fb6cf | 228 | if (dump_enabled_p ()) |
fb85abff | 229 | { |
7bd765d4 | 230 | dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: "); |
231 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); | |
78bb46f5 | 232 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 233 | } |
234 | ||
235 | gcc_assert (stmt_info); | |
236 | ||
237 | if (STMT_VINFO_RELEVANT_P (stmt_info)) | |
238 | { | |
239 | gcc_assert (!STMT_VINFO_VECTYPE (stmt_info)); | |
240 | scalar_type = TREE_TYPE (PHI_RESULT (phi)); | |
241 | ||
6d8fb6cf | 242 | if (dump_enabled_p ()) |
fb85abff | 243 | { |
7bd765d4 | 244 | dump_printf_loc (MSG_NOTE, vect_location, |
245 | "get vectype for scalar type: "); | |
246 | dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); | |
78bb46f5 | 247 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 248 | } |
249 | ||
250 | vectype = get_vectype_for_scalar_type (scalar_type); | |
251 | if (!vectype) | |
252 | { | |
6d8fb6cf | 253 | if (dump_enabled_p ()) |
fb85abff | 254 | { |
7bd765d4 | 255 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
256 | "not vectorized: unsupported " | |
257 | "data-type "); | |
258 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
259 | scalar_type); | |
78bb46f5 | 260 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
fb85abff | 261 | } |
262 | return false; | |
263 | } | |
264 | STMT_VINFO_VECTYPE (stmt_info) = vectype; | |
265 | ||
6d8fb6cf | 266 | if (dump_enabled_p ()) |
fb85abff | 267 | { |
7bd765d4 | 268 | dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); |
269 | dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); | |
78bb46f5 | 270 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 271 | } |
272 | ||
273 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
6d8fb6cf | 274 | if (dump_enabled_p ()) |
78bb46f5 | 275 | dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", |
276 | nunits); | |
fb85abff | 277 | |
278 | if (!vectorization_factor | |
279 | || (nunits > vectorization_factor)) | |
280 | vectorization_factor = nunits; | |
281 | } | |
282 | } | |
283 | ||
8bf58742 | 284 | for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;) |
fb85abff | 285 | { |
8bf58742 | 286 | tree vf_vectype; |
287 | ||
288 | if (analyze_pattern_stmt) | |
18937389 | 289 | stmt = pattern_stmt; |
8bf58742 | 290 | else |
291 | stmt = gsi_stmt (si); | |
292 | ||
293 | stmt_info = vinfo_for_stmt (stmt); | |
fb85abff | 294 | |
6d8fb6cf | 295 | if (dump_enabled_p ()) |
fb85abff | 296 | { |
7bd765d4 | 297 | dump_printf_loc (MSG_NOTE, vect_location, |
298 | "==> examining statement: "); | |
299 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
78bb46f5 | 300 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 301 | } |
302 | ||
303 | gcc_assert (stmt_info); | |
304 | ||
67eea82d | 305 | /* Skip stmts which do not need to be vectorized. */ |
8911f4de | 306 | if ((!STMT_VINFO_RELEVANT_P (stmt_info) |
307 | && !STMT_VINFO_LIVE_P (stmt_info)) | |
308 | || gimple_clobber_p (stmt)) | |
cfdcf183 | 309 | { |
310 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) | |
311 | && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) | |
312 | && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) | |
313 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) | |
67eea82d | 314 | { |
cfdcf183 | 315 | stmt = pattern_stmt; |
316 | stmt_info = vinfo_for_stmt (pattern_stmt); | |
6d8fb6cf | 317 | if (dump_enabled_p ()) |
cfdcf183 | 318 | { |
7bd765d4 | 319 | dump_printf_loc (MSG_NOTE, vect_location, |
320 | "==> examining pattern statement: "); | |
321 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
78bb46f5 | 322 | dump_printf (MSG_NOTE, "\n"); |
cfdcf183 | 323 | } |
324 | } | |
325 | else | |
326 | { | |
6d8fb6cf | 327 | if (dump_enabled_p ()) |
78bb46f5 | 328 | dump_printf_loc (MSG_NOTE, vect_location, "skip.\n"); |
8bf58742 | 329 | gsi_next (&si); |
cfdcf183 | 330 | continue; |
67eea82d | 331 | } |
fb85abff | 332 | } |
8bf58742 | 333 | else if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
334 | && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) | |
335 | && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) | |
336 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) | |
337 | analyze_pattern_stmt = true; | |
fb85abff | 338 | |
18937389 | 339 | /* If a pattern statement has def stmts, analyze them too. */ |
340 | if (is_pattern_stmt_p (stmt_info)) | |
341 | { | |
342 | if (pattern_def_seq == NULL) | |
343 | { | |
344 | pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); | |
345 | pattern_def_si = gsi_start (pattern_def_seq); | |
346 | } | |
347 | else if (!gsi_end_p (pattern_def_si)) | |
348 | gsi_next (&pattern_def_si); | |
349 | if (pattern_def_seq != NULL) | |
350 | { | |
351 | gimple pattern_def_stmt = NULL; | |
352 | stmt_vec_info pattern_def_stmt_info = NULL; | |
45eea33f | 353 | |
18937389 | 354 | while (!gsi_end_p (pattern_def_si)) |
355 | { | |
356 | pattern_def_stmt = gsi_stmt (pattern_def_si); | |
357 | pattern_def_stmt_info | |
358 | = vinfo_for_stmt (pattern_def_stmt); | |
359 | if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) | |
360 | || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) | |
361 | break; | |
362 | gsi_next (&pattern_def_si); | |
363 | } | |
364 | ||
365 | if (!gsi_end_p (pattern_def_si)) | |
366 | { | |
6d8fb6cf | 367 | if (dump_enabled_p ()) |
18937389 | 368 | { |
7bd765d4 | 369 | dump_printf_loc (MSG_NOTE, vect_location, |
370 | "==> examining pattern def stmt: "); | |
371 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, | |
372 | pattern_def_stmt, 0); | |
78bb46f5 | 373 | dump_printf (MSG_NOTE, "\n"); |
18937389 | 374 | } |
375 | ||
376 | stmt = pattern_def_stmt; | |
377 | stmt_info = pattern_def_stmt_info; | |
378 | } | |
379 | else | |
380 | { | |
e3a19533 | 381 | pattern_def_si = gsi_none (); |
18937389 | 382 | analyze_pattern_stmt = false; |
383 | } | |
384 | } | |
385 | else | |
386 | analyze_pattern_stmt = false; | |
387 | } | |
45eea33f | 388 | |
c71d3c24 | 389 | if (gimple_get_lhs (stmt) == NULL_TREE |
390 | /* MASK_STORE has no lhs, but is ok. */ | |
391 | && (!is_gimple_call (stmt) | |
392 | || !gimple_call_internal_p (stmt) | |
393 | || gimple_call_internal_fn (stmt) != IFN_MASK_STORE)) | |
fb85abff | 394 | { |
d09768a4 | 395 | if (is_gimple_call (stmt)) |
396 | { | |
397 | /* Ignore calls with no lhs. These must be calls to | |
398 | #pragma omp simd functions, and what vectorization factor | |
399 | it really needs can't be determined until | |
400 | vectorizable_simd_clone_call. */ | |
401 | if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) | |
402 | { | |
403 | pattern_def_seq = NULL; | |
404 | gsi_next (&si); | |
405 | } | |
406 | continue; | |
407 | } | |
6d8fb6cf | 408 | if (dump_enabled_p ()) |
fb85abff | 409 | { |
7bd765d4 | 410 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
411 | "not vectorized: irregular stmt."); | |
412 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, | |
413 | 0); | |
78bb46f5 | 414 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
fb85abff | 415 | } |
416 | return false; | |
417 | } | |
418 | ||
419 | if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) | |
420 | { | |
6d8fb6cf | 421 | if (dump_enabled_p ()) |
fb85abff | 422 | { |
7bd765d4 | 423 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
424 | "not vectorized: vector stmt in loop:"); | |
425 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
78bb46f5 | 426 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
fb85abff | 427 | } |
428 | return false; | |
429 | } | |
430 | ||
431 | if (STMT_VINFO_VECTYPE (stmt_info)) | |
432 | { | |
48e1416a | 433 | /* The only case when a vectype had been already set is for stmts |
acdc5fae | 434 | that contain a dataref, or for "pattern-stmts" (stmts |
435 | generated by the vectorizer to represent/replace a certain | |
436 | idiom). */ | |
48e1416a | 437 | gcc_assert (STMT_VINFO_DATA_REF (stmt_info) |
acdc5fae | 438 | || is_pattern_stmt_p (stmt_info) |
18937389 | 439 | || !gsi_end_p (pattern_def_si)); |
fb85abff | 440 | vectype = STMT_VINFO_VECTYPE (stmt_info); |
441 | } | |
442 | else | |
443 | { | |
0187b74e | 444 | gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)); |
c71d3c24 | 445 | if (is_gimple_call (stmt) |
446 | && gimple_call_internal_p (stmt) | |
447 | && gimple_call_internal_fn (stmt) == IFN_MASK_STORE) | |
448 | scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); | |
449 | else | |
450 | scalar_type = TREE_TYPE (gimple_get_lhs (stmt)); | |
6d8fb6cf | 451 | if (dump_enabled_p ()) |
fb85abff | 452 | { |
7bd765d4 | 453 | dump_printf_loc (MSG_NOTE, vect_location, |
454 | "get vectype for scalar type: "); | |
455 | dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); | |
78bb46f5 | 456 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 457 | } |
fb85abff | 458 | vectype = get_vectype_for_scalar_type (scalar_type); |
459 | if (!vectype) | |
460 | { | |
6d8fb6cf | 461 | if (dump_enabled_p ()) |
fb85abff | 462 | { |
7bd765d4 | 463 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
464 | "not vectorized: unsupported " | |
465 | "data-type "); | |
466 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
467 | scalar_type); | |
78bb46f5 | 468 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
fb85abff | 469 | } |
470 | return false; | |
471 | } | |
b334cbba | 472 | |
fb85abff | 473 | STMT_VINFO_VECTYPE (stmt_info) = vectype; |
0bf5f81b | 474 | |
475 | if (dump_enabled_p ()) | |
476 | { | |
477 | dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); | |
478 | dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); | |
78bb46f5 | 479 | dump_printf (MSG_NOTE, "\n"); |
0bf5f81b | 480 | } |
fb85abff | 481 | } |
482 | ||
b334cbba | 483 | /* The vectorization factor is according to the smallest |
484 | scalar type (or the largest vector size, but we only | |
485 | support one vector size per loop). */ | |
486 | scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, | |
487 | &dummy); | |
6d8fb6cf | 488 | if (dump_enabled_p ()) |
b334cbba | 489 | { |
7bd765d4 | 490 | dump_printf_loc (MSG_NOTE, vect_location, |
491 | "get vectype for scalar type: "); | |
492 | dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); | |
78bb46f5 | 493 | dump_printf (MSG_NOTE, "\n"); |
b334cbba | 494 | } |
495 | vf_vectype = get_vectype_for_scalar_type (scalar_type); | |
496 | if (!vf_vectype) | |
497 | { | |
6d8fb6cf | 498 | if (dump_enabled_p ()) |
b334cbba | 499 | { |
7bd765d4 | 500 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
501 | "not vectorized: unsupported data-type "); | |
502 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
503 | scalar_type); | |
78bb46f5 | 504 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
b334cbba | 505 | } |
506 | return false; | |
507 | } | |
508 | ||
509 | if ((GET_MODE_SIZE (TYPE_MODE (vectype)) | |
510 | != GET_MODE_SIZE (TYPE_MODE (vf_vectype)))) | |
511 | { | |
6d8fb6cf | 512 | if (dump_enabled_p ()) |
b334cbba | 513 | { |
7bd765d4 | 514 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
515 | "not vectorized: different sized vector " | |
516 | "types in statement, "); | |
517 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
518 | vectype); | |
519 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
520 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
521 | vf_vectype); | |
78bb46f5 | 522 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
b334cbba | 523 | } |
524 | return false; | |
525 | } | |
526 | ||
6d8fb6cf | 527 | if (dump_enabled_p ()) |
fb85abff | 528 | { |
7bd765d4 | 529 | dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); |
530 | dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype); | |
78bb46f5 | 531 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 532 | } |
533 | ||
b334cbba | 534 | nunits = TYPE_VECTOR_SUBPARTS (vf_vectype); |
6d8fb6cf | 535 | if (dump_enabled_p ()) |
78bb46f5 | 536 | dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits); |
fb85abff | 537 | if (!vectorization_factor |
538 | || (nunits > vectorization_factor)) | |
539 | vectorization_factor = nunits; | |
8bf58742 | 540 | |
18937389 | 541 | if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) |
542 | { | |
543 | pattern_def_seq = NULL; | |
544 | gsi_next (&si); | |
545 | } | |
fb85abff | 546 | } |
547 | } | |
548 | ||
549 | /* TODO: Analyze cost. Decide if worth while to vectorize. */ | |
6d8fb6cf | 550 | if (dump_enabled_p ()) |
78bb46f5 | 551 | dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n", |
7bd765d4 | 552 | vectorization_factor); |
fb85abff | 553 | if (vectorization_factor <= 1) |
554 | { | |
6d8fb6cf | 555 | if (dump_enabled_p ()) |
7bd765d4 | 556 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 557 | "not vectorized: unsupported data-type\n"); |
fb85abff | 558 | return false; |
559 | } | |
560 | LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; | |
561 | ||
562 | return true; | |
563 | } | |
564 | ||
565 | ||
566 | /* Function vect_is_simple_iv_evolution. | |
567 | ||
568 | FORNOW: A simple evolution of an induction variables in the loop is | |
bb0d2509 | 569 | considered a polynomial evolution. */ |
fb85abff | 570 | |
571 | static bool | |
572 | vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, | |
573 | tree * step) | |
574 | { | |
575 | tree init_expr; | |
576 | tree step_expr; | |
577 | tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb); | |
bb0d2509 | 578 | basic_block bb; |
fb85abff | 579 | |
580 | /* When there is no evolution in this loop, the evolution function | |
581 | is not "simple". */ | |
582 | if (evolution_part == NULL_TREE) | |
583 | return false; | |
584 | ||
585 | /* When the evolution is a polynomial of degree >= 2 | |
586 | the evolution function is not "simple". */ | |
587 | if (tree_is_chrec (evolution_part)) | |
588 | return false; | |
589 | ||
590 | step_expr = evolution_part; | |
591 | init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb)); | |
592 | ||
6d8fb6cf | 593 | if (dump_enabled_p ()) |
fb85abff | 594 | { |
7bd765d4 | 595 | dump_printf_loc (MSG_NOTE, vect_location, "step: "); |
596 | dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr); | |
597 | dump_printf (MSG_NOTE, ", init: "); | |
598 | dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr); | |
78bb46f5 | 599 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 600 | } |
601 | ||
602 | *init = init_expr; | |
603 | *step = step_expr; | |
604 | ||
bb0d2509 | 605 | if (TREE_CODE (step_expr) != INTEGER_CST |
606 | && (TREE_CODE (step_expr) != SSA_NAME | |
607 | || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr))) | |
1d62df1c | 608 | && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb)) |
609 | || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr)) | |
610 | && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)) | |
611 | || !flag_associative_math))) | |
612 | && (TREE_CODE (step_expr) != REAL_CST | |
613 | || !flag_associative_math)) | |
fb85abff | 614 | { |
6d8fb6cf | 615 | if (dump_enabled_p ()) |
7bd765d4 | 616 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 617 | "step unknown.\n"); |
fb85abff | 618 | return false; |
619 | } | |
620 | ||
621 | return true; | |
622 | } | |
623 | ||
624 | /* Function vect_analyze_scalar_cycles_1. | |
625 | ||
626 | Examine the cross iteration def-use cycles of scalar variables | |
282bf14c | 627 | in LOOP. LOOP_VINFO represents the loop that is now being |
fb85abff | 628 | considered for vectorization (can be LOOP, or an outer-loop |
629 | enclosing LOOP). */ | |
630 | ||
631 | static void | |
632 | vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) | |
633 | { | |
634 | basic_block bb = loop->header; | |
bb0d2509 | 635 | tree init, step; |
4997014d | 636 | auto_vec<gimple, 64> worklist; |
fb85abff | 637 | gimple_stmt_iterator gsi; |
7aa0d350 | 638 | bool double_reduc; |
fb85abff | 639 | |
6d8fb6cf | 640 | if (dump_enabled_p ()) |
7bd765d4 | 641 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 642 | "=== vect_analyze_scalar_cycles ===\n"); |
fb85abff | 643 | |
282bf14c | 644 | /* First - identify all inductions. Reduction detection assumes that all the |
48e1416a | 645 | inductions have been identified, therefore, this order must not be |
ade2ac53 | 646 | changed. */ |
fb85abff | 647 | for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
648 | { | |
649 | gimple phi = gsi_stmt (gsi); | |
650 | tree access_fn = NULL; | |
651 | tree def = PHI_RESULT (phi); | |
652 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); | |
653 | ||
6d8fb6cf | 654 | if (dump_enabled_p ()) |
fb85abff | 655 | { |
7bd765d4 | 656 | dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); |
657 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); | |
78bb46f5 | 658 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 659 | } |
660 | ||
282bf14c | 661 | /* Skip virtual phi's. The data dependences that are associated with |
fb85abff | 662 | virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ |
7c782c9b | 663 | if (virtual_operand_p (def)) |
fb85abff | 664 | continue; |
665 | ||
666 | STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type; | |
667 | ||
668 | /* Analyze the evolution function. */ | |
669 | access_fn = analyze_scalar_evolution (loop, def); | |
acf5dbc0 | 670 | if (access_fn) |
fb85abff | 671 | { |
58280b1f | 672 | STRIP_NOPS (access_fn); |
6d8fb6cf | 673 | if (dump_enabled_p ()) |
58280b1f | 674 | { |
7bd765d4 | 675 | dump_printf_loc (MSG_NOTE, vect_location, |
676 | "Access function of PHI: "); | |
677 | dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn); | |
78bb46f5 | 678 | dump_printf (MSG_NOTE, "\n"); |
58280b1f | 679 | } |
680 | STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) | |
681 | = evolution_part_in_loop_num (access_fn, loop->num); | |
fb85abff | 682 | } |
683 | ||
684 | if (!access_fn | |
bb0d2509 | 685 | || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step) |
686 | || (LOOP_VINFO_LOOP (loop_vinfo) != loop | |
687 | && TREE_CODE (step) != INTEGER_CST)) | |
fb85abff | 688 | { |
f1f41a6c | 689 | worklist.safe_push (phi); |
fb85abff | 690 | continue; |
691 | } | |
692 | ||
86faead7 | 693 | gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE); |
694 | ||
6d8fb6cf | 695 | if (dump_enabled_p ()) |
78bb46f5 | 696 | dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n"); |
fb85abff | 697 | STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def; |
698 | } | |
699 | ||
700 | ||
ade2ac53 | 701 | /* Second - identify all reductions and nested cycles. */ |
f1f41a6c | 702 | while (worklist.length () > 0) |
fb85abff | 703 | { |
f1f41a6c | 704 | gimple phi = worklist.pop (); |
fb85abff | 705 | tree def = PHI_RESULT (phi); |
706 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); | |
707 | gimple reduc_stmt; | |
ade2ac53 | 708 | bool nested_cycle; |
fb85abff | 709 | |
6d8fb6cf | 710 | if (dump_enabled_p ()) |
48e1416a | 711 | { |
7bd765d4 | 712 | dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); |
713 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); | |
78bb46f5 | 714 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 715 | } |
716 | ||
7c782c9b | 717 | gcc_assert (!virtual_operand_p (def) |
718 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); | |
fb85abff | 719 | |
ade2ac53 | 720 | nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo)); |
f4a50267 | 721 | reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle, |
722 | &double_reduc); | |
fb85abff | 723 | if (reduc_stmt) |
724 | { | |
7aa0d350 | 725 | if (double_reduc) |
ade2ac53 | 726 | { |
6d8fb6cf | 727 | if (dump_enabled_p ()) |
7bd765d4 | 728 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 729 | "Detected double reduction.\n"); |
ade2ac53 | 730 | |
7aa0d350 | 731 | STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def; |
ade2ac53 | 732 | STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = |
7aa0d350 | 733 | vect_double_reduction_def; |
ade2ac53 | 734 | } |
48e1416a | 735 | else |
ade2ac53 | 736 | { |
7aa0d350 | 737 | if (nested_cycle) |
738 | { | |
6d8fb6cf | 739 | if (dump_enabled_p ()) |
7bd765d4 | 740 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 741 | "Detected vectorizable nested cycle.\n"); |
ade2ac53 | 742 | |
7aa0d350 | 743 | STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle; |
744 | STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = | |
745 | vect_nested_cycle; | |
746 | } | |
747 | else | |
748 | { | |
6d8fb6cf | 749 | if (dump_enabled_p ()) |
7bd765d4 | 750 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 751 | "Detected reduction.\n"); |
7aa0d350 | 752 | |
753 | STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; | |
754 | STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = | |
755 | vect_reduction_def; | |
eefa05c8 | 756 | /* Store the reduction cycles for possible vectorization in |
757 | loop-aware SLP. */ | |
f1f41a6c | 758 | LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt); |
7aa0d350 | 759 | } |
ade2ac53 | 760 | } |
fb85abff | 761 | } |
762 | else | |
6d8fb6cf | 763 | if (dump_enabled_p ()) |
7bd765d4 | 764 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 765 | "Unknown def-use cycle pattern.\n"); |
fb85abff | 766 | } |
fb85abff | 767 | } |
768 | ||
769 | ||
770 | /* Function vect_analyze_scalar_cycles. | |
771 | ||
772 | Examine the cross iteration def-use cycles of scalar variables, by | |
282bf14c | 773 | analyzing the loop-header PHIs of scalar variables. Classify each |
fb85abff | 774 | cycle as one of the following: invariant, induction, reduction, unknown. |
775 | We do that for the loop represented by LOOP_VINFO, and also to its | |
776 | inner-loop, if exists. | |
777 | Examples for scalar cycles: | |
778 | ||
779 | Example1: reduction: | |
780 | ||
781 | loop1: | |
782 | for (i=0; i<N; i++) | |
783 | sum += a[i]; | |
784 | ||
785 | Example2: induction: | |
786 | ||
787 | loop2: | |
788 | for (i=0; i<N; i++) | |
789 | a[i] = i; */ | |
790 | ||
791 | static void | |
792 | vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) | |
793 | { | |
794 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
795 | ||
796 | vect_analyze_scalar_cycles_1 (loop_vinfo, loop); | |
797 | ||
798 | /* When vectorizing an outer-loop, the inner-loop is executed sequentially. | |
799 | Reductions in such inner-loop therefore have different properties than | |
800 | the reductions in the nest that gets vectorized: | |
801 | 1. When vectorized, they are executed in the same order as in the original | |
802 | scalar loop, so we can't change the order of computation when | |
803 | vectorizing them. | |
48e1416a | 804 | 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the |
fb85abff | 805 | current checks are too strict. */ |
806 | ||
807 | if (loop->inner) | |
808 | vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); | |
809 | } | |
810 | ||
313a5120 | 811 | |
fb85abff | 812 | /* Function vect_get_loop_niters. |
813 | ||
313a5120 | 814 | Determine how many iterations the loop is executed and place it |
796f6cba | 815 | in NUMBER_OF_ITERATIONS. Place the number of latch iterations |
816 | in NUMBER_OF_ITERATIONSM1. | |
313a5120 | 817 | |
fb85abff | 818 | Return the loop exit condition. */ |
819 | ||
820 | static gimple | |
796f6cba | 821 | vect_get_loop_niters (struct loop *loop, tree *number_of_iterations, |
822 | tree *number_of_iterationsm1) | |
fb85abff | 823 | { |
824 | tree niters; | |
825 | ||
6d8fb6cf | 826 | if (dump_enabled_p ()) |
7bd765d4 | 827 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 828 | "=== get_loop_niters ===\n"); |
fb85abff | 829 | |
313a5120 | 830 | niters = number_of_latch_executions (loop); |
796f6cba | 831 | *number_of_iterationsm1 = niters; |
832 | ||
313a5120 | 833 | /* We want the number of loop header executions which is the number |
834 | of latch executions plus one. | |
835 | ??? For UINT_MAX latch executions this number overflows to zero | |
836 | for loops like do { n++; } while (n != 0); */ | |
837 | if (niters && !chrec_contains_undetermined (niters)) | |
796f6cba | 838 | niters = fold_build2 (PLUS_EXPR, TREE_TYPE (niters), unshare_expr (niters), |
313a5120 | 839 | build_int_cst (TREE_TYPE (niters), 1)); |
840 | *number_of_iterations = niters; | |
fb85abff | 841 | |
842 | return get_loop_exit_condition (loop); | |
843 | } | |
844 | ||
845 | ||
846 | /* Function bb_in_loop_p | |
847 | ||
848 | Used as predicate for dfs order traversal of the loop bbs. */ | |
849 | ||
850 | static bool | |
851 | bb_in_loop_p (const_basic_block bb, const void *data) | |
852 | { | |
853 | const struct loop *const loop = (const struct loop *)data; | |
854 | if (flow_bb_inside_loop_p (loop, bb)) | |
855 | return true; | |
856 | return false; | |
857 | } | |
858 | ||
859 | ||
860 | /* Function new_loop_vec_info. | |
861 | ||
862 | Create and initialize a new loop_vec_info struct for LOOP, as well as | |
863 | stmt_vec_info structs for all the stmts in LOOP. */ | |
864 | ||
865 | static loop_vec_info | |
866 | new_loop_vec_info (struct loop *loop) | |
867 | { | |
868 | loop_vec_info res; | |
869 | basic_block *bbs; | |
870 | gimple_stmt_iterator si; | |
871 | unsigned int i, nbbs; | |
872 | ||
873 | res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info)); | |
874 | LOOP_VINFO_LOOP (res) = loop; | |
875 | ||
876 | bbs = get_loop_body (loop); | |
877 | ||
878 | /* Create/Update stmt_info for all stmts in the loop. */ | |
879 | for (i = 0; i < loop->num_nodes; i++) | |
880 | { | |
881 | basic_block bb = bbs[i]; | |
882 | ||
883 | /* BBs in a nested inner-loop will have been already processed (because | |
884 | we will have called vect_analyze_loop_form for any nested inner-loop). | |
885 | Therefore, for stmts in an inner-loop we just want to update the | |
886 | STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new | |
887 | loop_info of the outer-loop we are currently considering to vectorize | |
888 | (instead of the loop_info of the inner-loop). | |
889 | For stmts in other BBs we need to create a stmt_info from scratch. */ | |
890 | if (bb->loop_father != loop) | |
891 | { | |
892 | /* Inner-loop bb. */ | |
893 | gcc_assert (loop->inner && bb->loop_father == loop->inner); | |
894 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
895 | { | |
896 | gimple phi = gsi_stmt (si); | |
897 | stmt_vec_info stmt_info = vinfo_for_stmt (phi); | |
898 | loop_vec_info inner_loop_vinfo = | |
899 | STMT_VINFO_LOOP_VINFO (stmt_info); | |
900 | gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo)); | |
901 | STMT_VINFO_LOOP_VINFO (stmt_info) = res; | |
902 | } | |
903 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
904 | { | |
905 | gimple stmt = gsi_stmt (si); | |
906 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
907 | loop_vec_info inner_loop_vinfo = | |
908 | STMT_VINFO_LOOP_VINFO (stmt_info); | |
909 | gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo)); | |
910 | STMT_VINFO_LOOP_VINFO (stmt_info) = res; | |
911 | } | |
912 | } | |
913 | else | |
914 | { | |
915 | /* bb in current nest. */ | |
916 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
917 | { | |
918 | gimple phi = gsi_stmt (si); | |
919 | gimple_set_uid (phi, 0); | |
37545e54 | 920 | set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL)); |
fb85abff | 921 | } |
922 | ||
923 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
924 | { | |
925 | gimple stmt = gsi_stmt (si); | |
926 | gimple_set_uid (stmt, 0); | |
37545e54 | 927 | set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL)); |
fb85abff | 928 | } |
929 | } | |
930 | } | |
931 | ||
932 | /* CHECKME: We want to visit all BBs before their successors (except for | |
933 | latch blocks, for which this assertion wouldn't hold). In the simple | |
934 | case of the loop forms we allow, a dfs order of the BBs would the same | |
935 | as reversed postorder traversal, so we are safe. */ | |
936 | ||
937 | free (bbs); | |
938 | bbs = XCNEWVEC (basic_block, loop->num_nodes); | |
939 | nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p, | |
940 | bbs, loop->num_nodes, loop); | |
941 | gcc_assert (nbbs == loop->num_nodes); | |
942 | ||
943 | LOOP_VINFO_BBS (res) = bbs; | |
796f6cba | 944 | LOOP_VINFO_NITERSM1 (res) = NULL; |
fb85abff | 945 | LOOP_VINFO_NITERS (res) = NULL; |
946 | LOOP_VINFO_NITERS_UNCHANGED (res) = NULL; | |
947 | LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0; | |
004a94a5 | 948 | LOOP_VINFO_COST_MODEL_THRESHOLD (res) = 0; |
fb85abff | 949 | LOOP_VINFO_VECTORIZABLE_P (res) = 0; |
313a5120 | 950 | LOOP_VINFO_PEELING_FOR_ALIGNMENT (res) = 0; |
fb85abff | 951 | LOOP_VINFO_VECT_FACTOR (res) = 0; |
f1f41a6c | 952 | LOOP_VINFO_LOOP_NEST (res).create (3); |
953 | LOOP_VINFO_DATAREFS (res).create (10); | |
954 | LOOP_VINFO_DDRS (res).create (10 * 10); | |
fb85abff | 955 | LOOP_VINFO_UNALIGNED_DR (res) = NULL; |
f1f41a6c | 956 | LOOP_VINFO_MAY_MISALIGN_STMTS (res).create ( |
957 | PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS)); | |
958 | LOOP_VINFO_MAY_ALIAS_DDRS (res).create ( | |
959 | PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)); | |
960 | LOOP_VINFO_GROUPED_STORES (res).create (10); | |
961 | LOOP_VINFO_REDUCTIONS (res).create (10); | |
962 | LOOP_VINFO_REDUCTION_CHAINS (res).create (10); | |
963 | LOOP_VINFO_SLP_INSTANCES (res).create (10); | |
fb85abff | 964 | LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1; |
4db2b577 | 965 | LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop); |
a4ee7fac | 966 | LOOP_VINFO_PEELING_FOR_GAPS (res) = false; |
313a5120 | 967 | LOOP_VINFO_PEELING_FOR_NITER (res) = false; |
ba69439f | 968 | LOOP_VINFO_OPERANDS_SWAPPED (res) = false; |
fb85abff | 969 | |
970 | return res; | |
971 | } | |
972 | ||
973 | ||
974 | /* Function destroy_loop_vec_info. | |
975 | ||
976 | Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the | |
977 | stmts in the loop. */ | |
978 | ||
979 | void | |
980 | destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts) | |
981 | { | |
982 | struct loop *loop; | |
983 | basic_block *bbs; | |
984 | int nbbs; | |
985 | gimple_stmt_iterator si; | |
986 | int j; | |
f1f41a6c | 987 | vec<slp_instance> slp_instances; |
fb85abff | 988 | slp_instance instance; |
ba69439f | 989 | bool swapped; |
fb85abff | 990 | |
991 | if (!loop_vinfo) | |
992 | return; | |
993 | ||
994 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
995 | ||
996 | bbs = LOOP_VINFO_BBS (loop_vinfo); | |
033ee56d | 997 | nbbs = clean_stmts ? loop->num_nodes : 0; |
ba69439f | 998 | swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo); |
fb85abff | 999 | |
fb85abff | 1000 | for (j = 0; j < nbbs; j++) |
1001 | { | |
1002 | basic_block bb = bbs[j]; | |
1003 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
1004 | free_stmt_vec_info (gsi_stmt (si)); | |
1005 | ||
1006 | for (si = gsi_start_bb (bb); !gsi_end_p (si); ) | |
1007 | { | |
1008 | gimple stmt = gsi_stmt (si); | |
ba69439f | 1009 | |
1010 | /* We may have broken canonical form by moving a constant | |
1011 | into RHS1 of a commutative op. Fix such occurrences. */ | |
1012 | if (swapped && is_gimple_assign (stmt)) | |
1013 | { | |
1014 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
1015 | ||
1016 | if ((code == PLUS_EXPR | |
1017 | || code == POINTER_PLUS_EXPR | |
1018 | || code == MULT_EXPR) | |
1019 | && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt))) | |
8f6fa493 | 1020 | swap_ssa_operands (stmt, |
1021 | gimple_assign_rhs1_ptr (stmt), | |
1022 | gimple_assign_rhs2_ptr (stmt)); | |
ba69439f | 1023 | } |
1024 | ||
3b515af5 | 1025 | /* Free stmt_vec_info. */ |
1026 | free_stmt_vec_info (stmt); | |
fb85abff | 1027 | gsi_next (&si); |
1028 | } | |
1029 | } | |
1030 | ||
1031 | free (LOOP_VINFO_BBS (loop_vinfo)); | |
23e1875f | 1032 | vect_destroy_datarefs (loop_vinfo, NULL); |
fb85abff | 1033 | free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo)); |
f1f41a6c | 1034 | LOOP_VINFO_LOOP_NEST (loop_vinfo).release (); |
1035 | LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release (); | |
1036 | LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release (); | |
fb85abff | 1037 | slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo); |
f1f41a6c | 1038 | FOR_EACH_VEC_ELT (slp_instances, j, instance) |
fb85abff | 1039 | vect_free_slp_instance (instance); |
1040 | ||
f1f41a6c | 1041 | LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); |
1042 | LOOP_VINFO_GROUPED_STORES (loop_vinfo).release (); | |
1043 | LOOP_VINFO_REDUCTIONS (loop_vinfo).release (); | |
1044 | LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release (); | |
fb85abff | 1045 | |
c1f445d2 | 1046 | delete LOOP_VINFO_PEELING_HTAB (loop_vinfo); |
1047 | LOOP_VINFO_PEELING_HTAB (loop_vinfo) = NULL; | |
0822b158 | 1048 | |
4db2b577 | 1049 | destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); |
1050 | ||
fb85abff | 1051 | free (loop_vinfo); |
1052 | loop->aux = NULL; | |
1053 | } | |
1054 | ||
1055 | ||
1056 | /* Function vect_analyze_loop_1. | |
1057 | ||
1058 | Apply a set of analyses on LOOP, and create a loop_vec_info struct | |
1059 | for it. The different analyses will record information in the | |
1060 | loop_vec_info struct. This is a subset of the analyses applied in | |
1061 | vect_analyze_loop, to be applied on an inner-loop nested in the loop | |
1062 | that is now considered for (outer-loop) vectorization. */ | |
1063 | ||
1064 | static loop_vec_info | |
1065 | vect_analyze_loop_1 (struct loop *loop) | |
1066 | { | |
1067 | loop_vec_info loop_vinfo; | |
1068 | ||
6d8fb6cf | 1069 | if (dump_enabled_p ()) |
7bd765d4 | 1070 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 1071 | "===== analyze_loop_nest_1 =====\n"); |
fb85abff | 1072 | |
1073 | /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */ | |
1074 | ||
1075 | loop_vinfo = vect_analyze_loop_form (loop); | |
1076 | if (!loop_vinfo) | |
1077 | { | |
6d8fb6cf | 1078 | if (dump_enabled_p ()) |
7bd765d4 | 1079 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1080 | "bad inner-loop form.\n"); |
fb85abff | 1081 | return NULL; |
1082 | } | |
1083 | ||
1084 | return loop_vinfo; | |
1085 | } | |
1086 | ||
1087 | ||
1088 | /* Function vect_analyze_loop_form. | |
1089 | ||
1090 | Verify that certain CFG restrictions hold, including: | |
1091 | - the loop has a pre-header | |
1092 | - the loop has a single entry and exit | |
1093 | - the loop exit condition is simple enough, and the number of iterations | |
1094 | can be analyzed (a countable loop). */ | |
1095 | ||
1096 | loop_vec_info | |
1097 | vect_analyze_loop_form (struct loop *loop) | |
1098 | { | |
1099 | loop_vec_info loop_vinfo; | |
1100 | gimple loop_cond; | |
796f6cba | 1101 | tree number_of_iterations = NULL, number_of_iterationsm1 = NULL; |
fb85abff | 1102 | loop_vec_info inner_loop_vinfo = NULL; |
1103 | ||
6d8fb6cf | 1104 | if (dump_enabled_p ()) |
7bd765d4 | 1105 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 1106 | "=== vect_analyze_loop_form ===\n"); |
fb85abff | 1107 | |
1108 | /* Different restrictions apply when we are considering an inner-most loop, | |
48e1416a | 1109 | vs. an outer (nested) loop. |
fb85abff | 1110 | (FORNOW. May want to relax some of these restrictions in the future). */ |
1111 | ||
1112 | if (!loop->inner) | |
1113 | { | |
48e1416a | 1114 | /* Inner-most loop. We currently require that the number of BBs is |
1115 | exactly 2 (the header and latch). Vectorizable inner-most loops | |
fb85abff | 1116 | look like this: |
1117 | ||
1118 | (pre-header) | |
1119 | | | |
1120 | header <--------+ | |
1121 | | | | | |
1122 | | +--> latch --+ | |
1123 | | | |
1124 | (exit-bb) */ | |
1125 | ||
1126 | if (loop->num_nodes != 2) | |
1127 | { | |
6d8fb6cf | 1128 | if (dump_enabled_p ()) |
7bd765d4 | 1129 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1130 | "not vectorized: control flow in loop.\n"); |
fb85abff | 1131 | return NULL; |
1132 | } | |
1133 | ||
1134 | if (empty_block_p (loop->header)) | |
313a5120 | 1135 | { |
1136 | if (dump_enabled_p ()) | |
1137 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
78bb46f5 | 1138 | "not vectorized: empty loop.\n"); |
313a5120 | 1139 | return NULL; |
1140 | } | |
fb85abff | 1141 | } |
1142 | else | |
1143 | { | |
1144 | struct loop *innerloop = loop->inner; | |
f018d957 | 1145 | edge entryedge; |
fb85abff | 1146 | |
1147 | /* Nested loop. We currently require that the loop is doubly-nested, | |
48e1416a | 1148 | contains a single inner loop, and the number of BBs is exactly 5. |
fb85abff | 1149 | Vectorizable outer-loops look like this: |
1150 | ||
1151 | (pre-header) | |
1152 | | | |
1153 | header <---+ | |
1154 | | | | |
1155 | inner-loop | | |
1156 | | | | |
1157 | tail ------+ | |
48e1416a | 1158 | | |
fb85abff | 1159 | (exit-bb) |
1160 | ||
1161 | The inner-loop has the properties expected of inner-most loops | |
1162 | as described above. */ | |
1163 | ||
1164 | if ((loop->inner)->inner || (loop->inner)->next) | |
1165 | { | |
6d8fb6cf | 1166 | if (dump_enabled_p ()) |
7bd765d4 | 1167 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1168 | "not vectorized: multiple nested loops.\n"); |
fb85abff | 1169 | return NULL; |
1170 | } | |
1171 | ||
1172 | /* Analyze the inner-loop. */ | |
1173 | inner_loop_vinfo = vect_analyze_loop_1 (loop->inner); | |
1174 | if (!inner_loop_vinfo) | |
1175 | { | |
6d8fb6cf | 1176 | if (dump_enabled_p ()) |
7bd765d4 | 1177 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1178 | "not vectorized: Bad inner loop.\n"); |
fb85abff | 1179 | return NULL; |
1180 | } | |
1181 | ||
1182 | if (!expr_invariant_in_loop_p (loop, | |
1183 | LOOP_VINFO_NITERS (inner_loop_vinfo))) | |
1184 | { | |
6d8fb6cf | 1185 | if (dump_enabled_p ()) |
78bb46f5 | 1186 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1187 | "not vectorized: inner-loop count not" | |
1188 | " invariant.\n"); | |
fb85abff | 1189 | destroy_loop_vec_info (inner_loop_vinfo, true); |
1190 | return NULL; | |
1191 | } | |
1192 | ||
48e1416a | 1193 | if (loop->num_nodes != 5) |
fb85abff | 1194 | { |
6d8fb6cf | 1195 | if (dump_enabled_p ()) |
7bd765d4 | 1196 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1197 | "not vectorized: control flow in loop.\n"); |
fb85abff | 1198 | destroy_loop_vec_info (inner_loop_vinfo, true); |
1199 | return NULL; | |
1200 | } | |
1201 | ||
1202 | gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2); | |
fb85abff | 1203 | entryedge = EDGE_PRED (innerloop->header, 0); |
1204 | if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch) | |
f018d957 | 1205 | entryedge = EDGE_PRED (innerloop->header, 1); |
48e1416a | 1206 | |
fb85abff | 1207 | if (entryedge->src != loop->header |
1208 | || !single_exit (innerloop) | |
1209 | || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src) | |
1210 | { | |
6d8fb6cf | 1211 | if (dump_enabled_p ()) |
78bb46f5 | 1212 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1213 | "not vectorized: unsupported outerloop form.\n"); | |
fb85abff | 1214 | destroy_loop_vec_info (inner_loop_vinfo, true); |
1215 | return NULL; | |
1216 | } | |
1217 | ||
6d8fb6cf | 1218 | if (dump_enabled_p ()) |
7bd765d4 | 1219 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 1220 | "Considering outer-loop vectorization.\n"); |
fb85abff | 1221 | } |
48e1416a | 1222 | |
1223 | if (!single_exit (loop) | |
fb85abff | 1224 | || EDGE_COUNT (loop->header->preds) != 2) |
1225 | { | |
6d8fb6cf | 1226 | if (dump_enabled_p ()) |
fb85abff | 1227 | { |
1228 | if (!single_exit (loop)) | |
7bd765d4 | 1229 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1230 | "not vectorized: multiple exits.\n"); |
fb85abff | 1231 | else if (EDGE_COUNT (loop->header->preds) != 2) |
78bb46f5 | 1232 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1233 | "not vectorized: too many incoming edges.\n"); | |
fb85abff | 1234 | } |
1235 | if (inner_loop_vinfo) | |
1236 | destroy_loop_vec_info (inner_loop_vinfo, true); | |
1237 | return NULL; | |
1238 | } | |
1239 | ||
1240 | /* We assume that the loop exit condition is at the end of the loop. i.e, | |
1241 | that the loop is represented as a do-while (with a proper if-guard | |
1242 | before the loop if needed), where the loop header contains all the | |
1243 | executable statements, and the latch is empty. */ | |
1244 | if (!empty_block_p (loop->latch) | |
3c18ea71 | 1245 | || !gimple_seq_empty_p (phi_nodes (loop->latch))) |
fb85abff | 1246 | { |
6d8fb6cf | 1247 | if (dump_enabled_p ()) |
7bd765d4 | 1248 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1249 | "not vectorized: latch block not empty.\n"); |
fb85abff | 1250 | if (inner_loop_vinfo) |
1251 | destroy_loop_vec_info (inner_loop_vinfo, true); | |
1252 | return NULL; | |
1253 | } | |
1254 | ||
1255 | /* Make sure there exists a single-predecessor exit bb: */ | |
1256 | if (!single_pred_p (single_exit (loop)->dest)) | |
1257 | { | |
1258 | edge e = single_exit (loop); | |
1259 | if (!(e->flags & EDGE_ABNORMAL)) | |
1260 | { | |
1261 | split_loop_exit_edge (e); | |
6d8fb6cf | 1262 | if (dump_enabled_p ()) |
78bb46f5 | 1263 | dump_printf (MSG_NOTE, "split exit edge.\n"); |
fb85abff | 1264 | } |
1265 | else | |
1266 | { | |
6d8fb6cf | 1267 | if (dump_enabled_p ()) |
78bb46f5 | 1268 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1269 | "not vectorized: abnormal loop exit edge.\n"); | |
fb85abff | 1270 | if (inner_loop_vinfo) |
1271 | destroy_loop_vec_info (inner_loop_vinfo, true); | |
1272 | return NULL; | |
1273 | } | |
1274 | } | |
1275 | ||
796f6cba | 1276 | loop_cond = vect_get_loop_niters (loop, &number_of_iterations, |
1277 | &number_of_iterationsm1); | |
fb85abff | 1278 | if (!loop_cond) |
1279 | { | |
6d8fb6cf | 1280 | if (dump_enabled_p ()) |
78bb46f5 | 1281 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1282 | "not vectorized: complicated exit condition.\n"); | |
fb85abff | 1283 | if (inner_loop_vinfo) |
1284 | destroy_loop_vec_info (inner_loop_vinfo, true); | |
1285 | return NULL; | |
1286 | } | |
48e1416a | 1287 | |
313a5120 | 1288 | if (!number_of_iterations |
1289 | || chrec_contains_undetermined (number_of_iterations)) | |
fb85abff | 1290 | { |
6d8fb6cf | 1291 | if (dump_enabled_p ()) |
78bb46f5 | 1292 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
7bd765d4 | 1293 | "not vectorized: number of iterations cannot be " |
78bb46f5 | 1294 | "computed.\n"); |
fb85abff | 1295 | if (inner_loop_vinfo) |
1296 | destroy_loop_vec_info (inner_loop_vinfo, true); | |
1297 | return NULL; | |
1298 | } | |
1299 | ||
313a5120 | 1300 | if (integer_zerop (number_of_iterations)) |
fb85abff | 1301 | { |
6d8fb6cf | 1302 | if (dump_enabled_p ()) |
313a5120 | 1303 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1304 | "not vectorized: number of iterations = 0.\n"); | |
fb85abff | 1305 | if (inner_loop_vinfo) |
313a5120 | 1306 | destroy_loop_vec_info (inner_loop_vinfo, true); |
fb85abff | 1307 | return NULL; |
1308 | } | |
1309 | ||
313a5120 | 1310 | loop_vinfo = new_loop_vec_info (loop); |
796f6cba | 1311 | LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1; |
313a5120 | 1312 | LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations; |
1313 | LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations; | |
1314 | ||
1315 | if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) | |
fb85abff | 1316 | { |
6d8fb6cf | 1317 | if (dump_enabled_p ()) |
fb85abff | 1318 | { |
7bd765d4 | 1319 | dump_printf_loc (MSG_NOTE, vect_location, |
1320 | "Symbolic number of iterations is "); | |
1321 | dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations); | |
78bb46f5 | 1322 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 1323 | } |
1324 | } | |
fb85abff | 1325 | |
1326 | STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type; | |
1327 | ||
1328 | /* CHECKME: May want to keep it around it in the future. */ | |
1329 | if (inner_loop_vinfo) | |
1330 | destroy_loop_vec_info (inner_loop_vinfo, false); | |
1331 | ||
1332 | gcc_assert (!loop->aux); | |
1333 | loop->aux = loop_vinfo; | |
1334 | return loop_vinfo; | |
1335 | } | |
1336 | ||
f083cd24 | 1337 | |
1338 | /* Function vect_analyze_loop_operations. | |
1339 | ||
1340 | Scan the loop stmts and make sure they are all vectorizable. */ | |
1341 | ||
1342 | static bool | |
bc937a44 | 1343 | vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp) |
f083cd24 | 1344 | { |
1345 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
1346 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); | |
1347 | int nbbs = loop->num_nodes; | |
1348 | gimple_stmt_iterator si; | |
1349 | unsigned int vectorization_factor = 0; | |
1350 | int i; | |
1351 | gimple phi; | |
1352 | stmt_vec_info stmt_info; | |
1353 | bool need_to_vectorize = false; | |
1354 | int min_profitable_iters; | |
1355 | int min_scalar_loop_bound; | |
1356 | unsigned int th; | |
1357 | bool only_slp_in_loop = true, ok; | |
5115d20b | 1358 | HOST_WIDE_INT max_niter; |
5938768b | 1359 | HOST_WIDE_INT estimated_niter; |
1360 | int min_profitable_estimate; | |
f083cd24 | 1361 | |
6d8fb6cf | 1362 | if (dump_enabled_p ()) |
7bd765d4 | 1363 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 1364 | "=== vect_analyze_loop_operations ===\n"); |
f083cd24 | 1365 | |
1366 | gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); | |
1367 | vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
bc937a44 | 1368 | if (slp) |
1369 | { | |
1370 | /* If all the stmts in the loop can be SLPed, we perform only SLP, and | |
1371 | vectorization factor of the loop is the unrolling factor required by | |
1372 | the SLP instances. If that unrolling factor is 1, we say, that we | |
1373 | perform pure SLP on loop - cross iteration parallelism is not | |
1374 | exploited. */ | |
1375 | for (i = 0; i < nbbs; i++) | |
1376 | { | |
1377 | basic_block bb = bbs[i]; | |
1378 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
1379 | { | |
1380 | gimple stmt = gsi_stmt (si); | |
1381 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1382 | gcc_assert (stmt_info); | |
1383 | if ((STMT_VINFO_RELEVANT_P (stmt_info) | |
1384 | || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) | |
1385 | && !PURE_SLP_STMT (stmt_info)) | |
1386 | /* STMT needs both SLP and loop-based vectorization. */ | |
1387 | only_slp_in_loop = false; | |
1388 | } | |
1389 | } | |
1390 | ||
1391 | if (only_slp_in_loop) | |
1392 | vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo); | |
1393 | else | |
1394 | vectorization_factor = least_common_multiple (vectorization_factor, | |
1395 | LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo)); | |
1396 | ||
1397 | LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; | |
6d8fb6cf | 1398 | if (dump_enabled_p ()) |
7bd765d4 | 1399 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 1400 | "Updating vectorization factor to %d\n", |
7bd765d4 | 1401 | vectorization_factor); |
bc937a44 | 1402 | } |
f083cd24 | 1403 | |
1404 | for (i = 0; i < nbbs; i++) | |
1405 | { | |
1406 | basic_block bb = bbs[i]; | |
1407 | ||
1408 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
1409 | { | |
1410 | phi = gsi_stmt (si); | |
1411 | ok = true; | |
1412 | ||
1413 | stmt_info = vinfo_for_stmt (phi); | |
6d8fb6cf | 1414 | if (dump_enabled_p ()) |
f083cd24 | 1415 | { |
7bd765d4 | 1416 | dump_printf_loc (MSG_NOTE, vect_location, "examining phi: "); |
1417 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); | |
78bb46f5 | 1418 | dump_printf (MSG_NOTE, "\n"); |
f083cd24 | 1419 | } |
1420 | ||
8bdf488e | 1421 | /* Inner-loop loop-closed exit phi in outer-loop vectorization |
1422 | (i.e., a phi in the tail of the outer-loop). */ | |
f083cd24 | 1423 | if (! is_loop_header_bb_p (bb)) |
1424 | { | |
8bdf488e | 1425 | /* FORNOW: we currently don't support the case that these phis |
7aa0d350 | 1426 | are not used in the outerloop (unless it is double reduction, |
48e1416a | 1427 | i.e., this phi is vect_reduction_def), cause this case |
7aa0d350 | 1428 | requires to actually do something here. */ |
1429 | if ((!STMT_VINFO_RELEVANT_P (stmt_info) | |
1430 | || STMT_VINFO_LIVE_P (stmt_info)) | |
48e1416a | 1431 | && STMT_VINFO_DEF_TYPE (stmt_info) |
7aa0d350 | 1432 | != vect_double_reduction_def) |
f083cd24 | 1433 | { |
6d8fb6cf | 1434 | if (dump_enabled_p ()) |
78bb46f5 | 1435 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
7bd765d4 | 1436 | "Unsupported loop-closed phi in " |
78bb46f5 | 1437 | "outer-loop.\n"); |
f083cd24 | 1438 | return false; |
1439 | } | |
8bdf488e | 1440 | |
1441 | /* If PHI is used in the outer loop, we check that its operand | |
1442 | is defined in the inner loop. */ | |
1443 | if (STMT_VINFO_RELEVANT_P (stmt_info)) | |
1444 | { | |
1445 | tree phi_op; | |
1446 | gimple op_def_stmt; | |
1447 | ||
1448 | if (gimple_phi_num_args (phi) != 1) | |
1449 | return false; | |
1450 | ||
1451 | phi_op = PHI_ARG_DEF (phi, 0); | |
1452 | if (TREE_CODE (phi_op) != SSA_NAME) | |
1453 | return false; | |
1454 | ||
1455 | op_def_stmt = SSA_NAME_DEF_STMT (phi_op); | |
ea902f25 | 1456 | if (gimple_nop_p (op_def_stmt) |
791e6391 | 1457 | || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt)) |
1458 | || !vinfo_for_stmt (op_def_stmt)) | |
8bdf488e | 1459 | return false; |
1460 | ||
1461 | if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) | |
1462 | != vect_used_in_outer | |
1463 | && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) | |
1464 | != vect_used_in_outer_by_reduction) | |
1465 | return false; | |
1466 | } | |
1467 | ||
f083cd24 | 1468 | continue; |
1469 | } | |
1470 | ||
1471 | gcc_assert (stmt_info); | |
1472 | ||
1473 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
1474 | { | |
1475 | /* FORNOW: not yet supported. */ | |
6d8fb6cf | 1476 | if (dump_enabled_p ()) |
7bd765d4 | 1477 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1478 | "not vectorized: value used after loop.\n"); |
f083cd24 | 1479 | return false; |
1480 | } | |
1481 | ||
1482 | if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope | |
1483 | && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) | |
1484 | { | |
1485 | /* A scalar-dependence cycle that we don't support. */ | |
6d8fb6cf | 1486 | if (dump_enabled_p ()) |
78bb46f5 | 1487 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1488 | "not vectorized: scalar dependence cycle.\n"); | |
f083cd24 | 1489 | return false; |
1490 | } | |
1491 | ||
1492 | if (STMT_VINFO_RELEVANT_P (stmt_info)) | |
1493 | { | |
1494 | need_to_vectorize = true; | |
1495 | if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) | |
1496 | ok = vectorizable_induction (phi, NULL, NULL); | |
1497 | } | |
1498 | ||
1499 | if (!ok) | |
1500 | { | |
6d8fb6cf | 1501 | if (dump_enabled_p ()) |
f083cd24 | 1502 | { |
78bb46f5 | 1503 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
7bd765d4 | 1504 | "not vectorized: relevant phi not " |
1505 | "supported: "); | |
1506 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0); | |
78bb46f5 | 1507 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
f083cd24 | 1508 | } |
4db2b577 | 1509 | return false; |
f083cd24 | 1510 | } |
1511 | } | |
1512 | ||
1513 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
1514 | { | |
1515 | gimple stmt = gsi_stmt (si); | |
8911f4de | 1516 | if (!gimple_clobber_p (stmt) |
1517 | && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL)) | |
f083cd24 | 1518 | return false; |
48e1416a | 1519 | } |
f083cd24 | 1520 | } /* bbs */ |
1521 | ||
1522 | /* All operations in the loop are either irrelevant (deal with loop | |
1523 | control, or dead), or only used outside the loop and can be moved | |
1524 | out of the loop (e.g. invariants, inductions). The loop can be | |
1525 | optimized away by scalar optimizations. We're better off not | |
1526 | touching this loop. */ | |
1527 | if (!need_to_vectorize) | |
1528 | { | |
6d8fb6cf | 1529 | if (dump_enabled_p ()) |
7bd765d4 | 1530 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 1531 | "All the computation can be taken out of the loop.\n"); |
6d8fb6cf | 1532 | if (dump_enabled_p ()) |
78bb46f5 | 1533 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
7bd765d4 | 1534 | "not vectorized: redundant loop. no profit to " |
78bb46f5 | 1535 | "vectorize.\n"); |
f083cd24 | 1536 | return false; |
1537 | } | |
1538 | ||
6d8fb6cf | 1539 | if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ()) |
7bd765d4 | 1540 | dump_printf_loc (MSG_NOTE, vect_location, |
1541 | "vectorization_factor = %d, niters = " | |
78bb46f5 | 1542 | HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor, |
7bd765d4 | 1543 | LOOP_VINFO_INT_NITERS (loop_vinfo)); |
f083cd24 | 1544 | |
5115d20b | 1545 | if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
1546 | && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor)) | |
1547 | || ((max_niter = max_stmt_executions_int (loop)) != -1 | |
9a5ede52 | 1548 | && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor)) |
f083cd24 | 1549 | { |
6d8fb6cf | 1550 | if (dump_enabled_p ()) |
7bd765d4 | 1551 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1552 | "not vectorized: iteration count too small.\n"); |
6d8fb6cf | 1553 | if (dump_enabled_p ()) |
7bd765d4 | 1554 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1555 | "not vectorized: iteration count smaller than " | |
78bb46f5 | 1556 | "vectorization factor.\n"); |
f083cd24 | 1557 | return false; |
1558 | } | |
1559 | ||
282bf14c | 1560 | /* Analyze cost. Decide if worth while to vectorize. */ |
f083cd24 | 1561 | |
1562 | /* Once VF is set, SLP costs should be updated since the number of created | |
1563 | vector stmts depends on VF. */ | |
1564 | vect_update_slp_costs_according_to_vf (loop_vinfo); | |
1565 | ||
5938768b | 1566 | vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters, |
1567 | &min_profitable_estimate); | |
f083cd24 | 1568 | LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters; |
1569 | ||
1570 | if (min_profitable_iters < 0) | |
1571 | { | |
6d8fb6cf | 1572 | if (dump_enabled_p ()) |
7bd765d4 | 1573 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1574 | "not vectorized: vectorization not profitable.\n"); |
6d8fb6cf | 1575 | if (dump_enabled_p ()) |
78bb46f5 | 1576 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
7bd765d4 | 1577 | "not vectorized: vector version will never be " |
78bb46f5 | 1578 | "profitable.\n"); |
f083cd24 | 1579 | return false; |
1580 | } | |
1581 | ||
1582 | min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) | |
1583 | * vectorization_factor) - 1); | |
1584 | ||
5938768b | 1585 | |
f083cd24 | 1586 | /* Use the cost model only if it is more conservative than user specified |
1587 | threshold. */ | |
1588 | ||
1589 | th = (unsigned) min_scalar_loop_bound; | |
1590 | if (min_profitable_iters | |
1591 | && (!min_scalar_loop_bound | |
1592 | || min_profitable_iters > min_scalar_loop_bound)) | |
1593 | th = (unsigned) min_profitable_iters; | |
1594 | ||
004a94a5 | 1595 | LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th; |
1596 | ||
f083cd24 | 1597 | if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
1598 | && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th) | |
1599 | { | |
6d8fb6cf | 1600 | if (dump_enabled_p ()) |
7bd765d4 | 1601 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1602 | "not vectorized: vectorization not profitable.\n"); |
6d8fb6cf | 1603 | if (dump_enabled_p ()) |
7bd765d4 | 1604 | dump_printf_loc (MSG_NOTE, vect_location, |
1605 | "not vectorized: iteration count smaller than user " | |
1606 | "specified loop bound parameter or minimum profitable " | |
78bb46f5 | 1607 | "iterations (whichever is more conservative).\n"); |
f083cd24 | 1608 | return false; |
1609 | } | |
1610 | ||
5938768b | 1611 | if ((estimated_niter = estimated_stmt_executions_int (loop)) != -1 |
1612 | && ((unsigned HOST_WIDE_INT) estimated_niter | |
1613 | <= MAX (th, (unsigned)min_profitable_estimate))) | |
1614 | { | |
6d8fb6cf | 1615 | if (dump_enabled_p ()) |
5938768b | 1616 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1617 | "not vectorized: estimated iteration count too " | |
78bb46f5 | 1618 | "small.\n"); |
6d8fb6cf | 1619 | if (dump_enabled_p ()) |
5938768b | 1620 | dump_printf_loc (MSG_NOTE, vect_location, |
1621 | "not vectorized: estimated iteration count smaller " | |
1622 | "than specified loop bound parameter or minimum " | |
1623 | "profitable iterations (whichever is more " | |
78bb46f5 | 1624 | "conservative).\n"); |
5938768b | 1625 | return false; |
1626 | } | |
1627 | ||
f083cd24 | 1628 | return true; |
1629 | } | |
1630 | ||
1631 | ||
c4740c5d | 1632 | /* Function vect_analyze_loop_2. |
fb85abff | 1633 | |
1634 | Apply a set of analyses on LOOP, and create a loop_vec_info struct | |
282bf14c | 1635 | for it. The different analyses will record information in the |
fb85abff | 1636 | loop_vec_info struct. */ |
c4740c5d | 1637 | static bool |
1638 | vect_analyze_loop_2 (loop_vec_info loop_vinfo) | |
fb85abff | 1639 | { |
80508571 | 1640 | bool ok, slp = false; |
91a74fc6 | 1641 | int max_vf = MAX_VECTORIZATION_FACTOR; |
1642 | int min_vf = 2; | |
004a94a5 | 1643 | unsigned int th; |
ca91d3f8 | 1644 | unsigned int n_stmts = 0; |
fb85abff | 1645 | |
fb85abff | 1646 | /* Find all data references in the loop (which correspond to vdefs/vuses) |
91a74fc6 | 1647 | and analyze their evolution in the loop. Also adjust the minimal |
1648 | vectorization factor according to the loads and stores. | |
fb85abff | 1649 | |
1650 | FORNOW: Handle only simple, array references, which | |
1651 | alignment can be forced, and aligned pointer-references. */ | |
1652 | ||
ca91d3f8 | 1653 | ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf, &n_stmts); |
fb85abff | 1654 | if (!ok) |
1655 | { | |
6d8fb6cf | 1656 | if (dump_enabled_p ()) |
7bd765d4 | 1657 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1658 | "bad data references.\n"); |
c4740c5d | 1659 | return false; |
fb85abff | 1660 | } |
1661 | ||
68f15e9d | 1662 | /* Analyze the access patterns of the data-refs in the loop (consecutive, |
1663 | complex, etc.). FORNOW: Only handle consecutive access pattern. */ | |
1664 | ||
1665 | ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL); | |
1666 | if (!ok) | |
1667 | { | |
1668 | if (dump_enabled_p ()) | |
1669 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
78bb46f5 | 1670 | "bad data access.\n"); |
68f15e9d | 1671 | return false; |
1672 | } | |
1673 | ||
fb85abff | 1674 | /* Classify all cross-iteration scalar data-flow cycles. |
1675 | Cross-iteration cycles caused by virtual phis are analyzed separately. */ | |
1676 | ||
1677 | vect_analyze_scalar_cycles (loop_vinfo); | |
1678 | ||
4c0c783a | 1679 | vect_pattern_recog (loop_vinfo, NULL); |
fb85abff | 1680 | |
1681 | /* Data-flow analysis to detect stmts that do not need to be vectorized. */ | |
1682 | ||
1683 | ok = vect_mark_stmts_to_be_vectorized (loop_vinfo); | |
1684 | if (!ok) | |
1685 | { | |
6d8fb6cf | 1686 | if (dump_enabled_p ()) |
7bd765d4 | 1687 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1688 | "unexpected pattern.\n"); |
c4740c5d | 1689 | return false; |
fb85abff | 1690 | } |
1691 | ||
91a74fc6 | 1692 | /* Analyze data dependences between the data-refs in the loop |
1693 | and adjust the maximum vectorization factor according to | |
1694 | the dependences. | |
1695 | FORNOW: fail at the first data dependence that we encounter. */ | |
fb85abff | 1696 | |
68f15e9d | 1697 | ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf); |
91a74fc6 | 1698 | if (!ok |
1699 | || max_vf < min_vf) | |
fb85abff | 1700 | { |
6d8fb6cf | 1701 | if (dump_enabled_p ()) |
7bd765d4 | 1702 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1703 | "bad data dependence.\n"); |
c4740c5d | 1704 | return false; |
fb85abff | 1705 | } |
1706 | ||
1707 | ok = vect_determine_vectorization_factor (loop_vinfo); | |
1708 | if (!ok) | |
1709 | { | |
6d8fb6cf | 1710 | if (dump_enabled_p ()) |
7bd765d4 | 1711 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1712 | "can't determine vectorization factor.\n"); |
c4740c5d | 1713 | return false; |
fb85abff | 1714 | } |
91a74fc6 | 1715 | if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo)) |
1716 | { | |
6d8fb6cf | 1717 | if (dump_enabled_p ()) |
7bd765d4 | 1718 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1719 | "bad data dependence.\n"); |
c4740c5d | 1720 | return false; |
91a74fc6 | 1721 | } |
fb85abff | 1722 | |
91a74fc6 | 1723 | /* Analyze the alignment of the data-refs in the loop. |
1724 | Fail if a data reference is found that cannot be vectorized. */ | |
fb85abff | 1725 | |
91a74fc6 | 1726 | ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL); |
fb85abff | 1727 | if (!ok) |
1728 | { | |
6d8fb6cf | 1729 | if (dump_enabled_p ()) |
7bd765d4 | 1730 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1731 | "bad data alignment.\n"); |
c4740c5d | 1732 | return false; |
fb85abff | 1733 | } |
1734 | ||
fb85abff | 1735 | /* Prune the list of ddrs to be tested at run-time by versioning for alias. |
1736 | It is important to call pruning after vect_analyze_data_ref_accesses, | |
1737 | since we use grouping information gathered by interleaving analysis. */ | |
1738 | ok = vect_prune_runtime_alias_test_list (loop_vinfo); | |
1739 | if (!ok) | |
1740 | { | |
6d8fb6cf | 1741 | if (dump_enabled_p ()) |
7bd765d4 | 1742 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1c7f4889 | 1743 | "number of versioning for alias " |
1744 | "run-time tests exceeds %d " | |
1745 | "(--param vect-max-version-for-alias-checks)\n", | |
1746 | PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)); | |
c4740c5d | 1747 | return false; |
fb85abff | 1748 | } |
1749 | ||
fb85abff | 1750 | /* This pass will decide on using loop versioning and/or loop peeling in |
1751 | order to enhance the alignment of data references in the loop. */ | |
1752 | ||
1753 | ok = vect_enhance_data_refs_alignment (loop_vinfo); | |
1754 | if (!ok) | |
1755 | { | |
6d8fb6cf | 1756 | if (dump_enabled_p ()) |
7bd765d4 | 1757 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1758 | "bad data alignment.\n"); |
c4740c5d | 1759 | return false; |
fb85abff | 1760 | } |
1761 | ||
0822b158 | 1762 | /* Check the SLP opportunities in the loop, analyze and build SLP trees. */ |
ca91d3f8 | 1763 | ok = vect_analyze_slp (loop_vinfo, NULL, n_stmts); |
0822b158 | 1764 | if (ok) |
1765 | { | |
1766 | /* Decide which possible SLP instances to SLP. */ | |
bc937a44 | 1767 | slp = vect_make_slp_decision (loop_vinfo); |
0822b158 | 1768 | |
1769 | /* Find stmts that need to be both vectorized and SLPed. */ | |
1770 | vect_detect_hybrid_slp (loop_vinfo); | |
1771 | } | |
39a5d6b1 | 1772 | else |
1773 | return false; | |
0822b158 | 1774 | |
fb85abff | 1775 | /* Scan all the operations in the loop and make sure they are |
1776 | vectorizable. */ | |
1777 | ||
bc937a44 | 1778 | ok = vect_analyze_loop_operations (loop_vinfo, slp); |
fb85abff | 1779 | if (!ok) |
1780 | { | |
6d8fb6cf | 1781 | if (dump_enabled_p ()) |
7bd765d4 | 1782 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1783 | "bad operation or unsupported loop bound.\n"); |
c4740c5d | 1784 | return false; |
1785 | } | |
1786 | ||
313a5120 | 1787 | /* Decide whether we need to create an epilogue loop to handle |
1788 | remaining scalar iterations. */ | |
004a94a5 | 1789 | th = ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) + 1) |
1790 | / LOOP_VINFO_VECT_FACTOR (loop_vinfo)) | |
1791 | * LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
1792 | ||
313a5120 | 1793 | if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
1794 | && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0) | |
1795 | { | |
1796 | if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo) | |
1797 | - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)) | |
1798 | < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))) | |
1799 | LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; | |
1800 | } | |
1801 | else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) | |
1802 | || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo)) | |
004a94a5 | 1803 | < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)) |
1804 | /* In case of versioning, check if the maximum number of | |
1805 | iterations is greater than th. If they are identical, | |
1806 | the epilogue is unnecessary. */ | |
1807 | && ((!LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo) | |
1808 | && !LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) | |
1809 | || (unsigned HOST_WIDE_INT)max_stmt_executions_int | |
1810 | (LOOP_VINFO_LOOP (loop_vinfo)) > th))) | |
313a5120 | 1811 | LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; |
1812 | ||
1813 | /* If an epilogue loop is required make sure we can create one. */ | |
1814 | if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) | |
1815 | || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)) | |
1816 | { | |
1817 | if (dump_enabled_p ()) | |
1818 | dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n"); | |
1819 | if (!vect_can_advance_ivs_p (loop_vinfo) | |
1820 | || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo), | |
1821 | single_exit (LOOP_VINFO_LOOP | |
1822 | (loop_vinfo)))) | |
1823 | { | |
1824 | if (dump_enabled_p ()) | |
1825 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1826 | "not vectorized: can't create required " | |
1827 | "epilog loop\n"); | |
1828 | return false; | |
1829 | } | |
1830 | } | |
1831 | ||
c4740c5d | 1832 | return true; |
1833 | } | |
1834 | ||
1835 | /* Function vect_analyze_loop. | |
1836 | ||
1837 | Apply a set of analyses on LOOP, and create a loop_vec_info struct | |
1838 | for it. The different analyses will record information in the | |
1839 | loop_vec_info struct. */ | |
1840 | loop_vec_info | |
1841 | vect_analyze_loop (struct loop *loop) | |
1842 | { | |
1843 | loop_vec_info loop_vinfo; | |
1844 | unsigned int vector_sizes; | |
1845 | ||
1846 | /* Autodetect first vector size we try. */ | |
1847 | current_vector_size = 0; | |
1848 | vector_sizes = targetm.vectorize.autovectorize_vector_sizes (); | |
1849 | ||
6d8fb6cf | 1850 | if (dump_enabled_p ()) |
7bd765d4 | 1851 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 1852 | "===== analyze_loop_nest =====\n"); |
c4740c5d | 1853 | |
1854 | if (loop_outer (loop) | |
1855 | && loop_vec_info_for_loop (loop_outer (loop)) | |
1856 | && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop)))) | |
1857 | { | |
6d8fb6cf | 1858 | if (dump_enabled_p ()) |
7bd765d4 | 1859 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 1860 | "outer-loop already vectorized.\n"); |
fb85abff | 1861 | return NULL; |
1862 | } | |
1863 | ||
c4740c5d | 1864 | while (1) |
1865 | { | |
1866 | /* Check the CFG characteristics of the loop (nesting, entry/exit). */ | |
1867 | loop_vinfo = vect_analyze_loop_form (loop); | |
1868 | if (!loop_vinfo) | |
1869 | { | |
6d8fb6cf | 1870 | if (dump_enabled_p ()) |
7bd765d4 | 1871 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 1872 | "bad loop form.\n"); |
c4740c5d | 1873 | return NULL; |
1874 | } | |
fb85abff | 1875 | |
c4740c5d | 1876 | if (vect_analyze_loop_2 (loop_vinfo)) |
1877 | { | |
1878 | LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1; | |
1879 | ||
1880 | return loop_vinfo; | |
1881 | } | |
1882 | ||
1883 | destroy_loop_vec_info (loop_vinfo, true); | |
1884 | ||
1885 | vector_sizes &= ~current_vector_size; | |
1886 | if (vector_sizes == 0 | |
1887 | || current_vector_size == 0) | |
1888 | return NULL; | |
1889 | ||
1890 | /* Try the next biggest vector size. */ | |
1891 | current_vector_size = 1 << floor_log2 (vector_sizes); | |
6d8fb6cf | 1892 | if (dump_enabled_p ()) |
7bd765d4 | 1893 | dump_printf_loc (MSG_NOTE, vect_location, |
1894 | "***** Re-trying analysis with " | |
1895 | "vector size %d\n", current_vector_size); | |
c4740c5d | 1896 | } |
fb85abff | 1897 | } |
1898 | ||
1899 | ||
1900 | /* Function reduction_code_for_scalar_code | |
1901 | ||
1902 | Input: | |
1903 | CODE - tree_code of a reduction operations. | |
1904 | ||
1905 | Output: | |
1906 | REDUC_CODE - the corresponding tree-code to be used to reduce the | |
7ba68b18 | 1907 | vector of partial results into a single scalar result, or ERROR_MARK |
1908 | if the operation is a supported reduction operation, but does not have | |
1909 | such a tree-code. | |
fb85abff | 1910 | |
7aa0d350 | 1911 | Return FALSE if CODE currently cannot be vectorized as reduction. */ |
fb85abff | 1912 | |
1913 | static bool | |
1914 | reduction_code_for_scalar_code (enum tree_code code, | |
1915 | enum tree_code *reduc_code) | |
1916 | { | |
1917 | switch (code) | |
7aa0d350 | 1918 | { |
1919 | case MAX_EXPR: | |
1920 | *reduc_code = REDUC_MAX_EXPR; | |
1921 | return true; | |
fb85abff | 1922 | |
7aa0d350 | 1923 | case MIN_EXPR: |
1924 | *reduc_code = REDUC_MIN_EXPR; | |
1925 | return true; | |
fb85abff | 1926 | |
7aa0d350 | 1927 | case PLUS_EXPR: |
1928 | *reduc_code = REDUC_PLUS_EXPR; | |
1929 | return true; | |
fb85abff | 1930 | |
7aa0d350 | 1931 | case MULT_EXPR: |
1932 | case MINUS_EXPR: | |
1933 | case BIT_IOR_EXPR: | |
1934 | case BIT_XOR_EXPR: | |
1935 | case BIT_AND_EXPR: | |
1936 | *reduc_code = ERROR_MARK; | |
1937 | return true; | |
1938 | ||
1939 | default: | |
1940 | return false; | |
1941 | } | |
fb85abff | 1942 | } |
1943 | ||
1944 | ||
282bf14c | 1945 | /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement |
fb85abff | 1946 | STMT is printed with a message MSG. */ |
1947 | ||
1948 | static void | |
7bd765d4 | 1949 | report_vect_op (int msg_type, gimple stmt, const char *msg) |
fb85abff | 1950 | { |
7bd765d4 | 1951 | dump_printf_loc (msg_type, vect_location, "%s", msg); |
1952 | dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0); | |
78bb46f5 | 1953 | dump_printf (msg_type, "\n"); |
fb85abff | 1954 | } |
1955 | ||
1956 | ||
39a5d6b1 | 1957 | /* Detect SLP reduction of the form: |
1958 | ||
1959 | #a1 = phi <a5, a0> | |
1960 | a2 = operation (a1) | |
1961 | a3 = operation (a2) | |
1962 | a4 = operation (a3) | |
1963 | a5 = operation (a4) | |
1964 | ||
1965 | #a = phi <a5> | |
1966 | ||
1967 | PHI is the reduction phi node (#a1 = phi <a5, a0> above) | |
1968 | FIRST_STMT is the first reduction stmt in the chain | |
1969 | (a2 = operation (a1)). | |
1970 | ||
1971 | Return TRUE if a reduction chain was detected. */ | |
1972 | ||
1973 | static bool | |
1974 | vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt) | |
1975 | { | |
1976 | struct loop *loop = (gimple_bb (phi))->loop_father; | |
1977 | struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); | |
1978 | enum tree_code code; | |
85078181 | 1979 | gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt; |
39a5d6b1 | 1980 | stmt_vec_info use_stmt_info, current_stmt_info; |
1981 | tree lhs; | |
1982 | imm_use_iterator imm_iter; | |
1983 | use_operand_p use_p; | |
6b809b99 | 1984 | int nloop_uses, size = 0, n_out_of_loop_uses; |
39a5d6b1 | 1985 | bool found = false; |
1986 | ||
1987 | if (loop != vect_loop) | |
1988 | return false; | |
1989 | ||
1990 | lhs = PHI_RESULT (phi); | |
1991 | code = gimple_assign_rhs_code (first_stmt); | |
1992 | while (1) | |
1993 | { | |
1994 | nloop_uses = 0; | |
6b809b99 | 1995 | n_out_of_loop_uses = 0; |
39a5d6b1 | 1996 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) |
1997 | { | |
85078181 | 1998 | gimple use_stmt = USE_STMT (use_p); |
0b308eee | 1999 | if (is_gimple_debug (use_stmt)) |
2000 | continue; | |
85078181 | 2001 | |
39a5d6b1 | 2002 | /* Check if we got back to the reduction phi. */ |
85078181 | 2003 | if (use_stmt == phi) |
39a5d6b1 | 2004 | { |
85078181 | 2005 | loop_use_stmt = use_stmt; |
39a5d6b1 | 2006 | found = true; |
2007 | break; | |
2008 | } | |
2009 | ||
6b809b99 | 2010 | if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) |
2011 | { | |
2012 | if (vinfo_for_stmt (use_stmt) | |
2013 | && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt))) | |
2014 | { | |
2015 | loop_use_stmt = use_stmt; | |
2016 | nloop_uses++; | |
2017 | } | |
2018 | } | |
2019 | else | |
2020 | n_out_of_loop_uses++; | |
39a5d6b1 | 2021 | |
6b809b99 | 2022 | /* There are can be either a single use in the loop or two uses in |
2023 | phi nodes. */ | |
2024 | if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses)) | |
2025 | return false; | |
39a5d6b1 | 2026 | } |
2027 | ||
2028 | if (found) | |
2029 | break; | |
2030 | ||
85078181 | 2031 | /* We reached a statement with no loop uses. */ |
2032 | if (nloop_uses == 0) | |
2033 | return false; | |
2034 | ||
39a5d6b1 | 2035 | /* This is a loop exit phi, and we haven't reached the reduction phi. */ |
85078181 | 2036 | if (gimple_code (loop_use_stmt) == GIMPLE_PHI) |
39a5d6b1 | 2037 | return false; |
2038 | ||
85078181 | 2039 | if (!is_gimple_assign (loop_use_stmt) |
2040 | || code != gimple_assign_rhs_code (loop_use_stmt) | |
2041 | || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt))) | |
39a5d6b1 | 2042 | return false; |
2043 | ||
2044 | /* Insert USE_STMT into reduction chain. */ | |
85078181 | 2045 | use_stmt_info = vinfo_for_stmt (loop_use_stmt); |
39a5d6b1 | 2046 | if (current_stmt) |
2047 | { | |
2048 | current_stmt_info = vinfo_for_stmt (current_stmt); | |
85078181 | 2049 | GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt; |
39a5d6b1 | 2050 | GROUP_FIRST_ELEMENT (use_stmt_info) |
2051 | = GROUP_FIRST_ELEMENT (current_stmt_info); | |
2052 | } | |
2053 | else | |
85078181 | 2054 | GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt; |
39a5d6b1 | 2055 | |
85078181 | 2056 | lhs = gimple_assign_lhs (loop_use_stmt); |
2057 | current_stmt = loop_use_stmt; | |
39a5d6b1 | 2058 | size++; |
2059 | } | |
2060 | ||
85078181 | 2061 | if (!found || loop_use_stmt != phi || size < 2) |
39a5d6b1 | 2062 | return false; |
2063 | ||
39a5d6b1 | 2064 | /* Swap the operands, if needed, to make the reduction operand be the second |
2065 | operand. */ | |
2066 | lhs = PHI_RESULT (phi); | |
eb3a666e | 2067 | next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); |
2068 | while (next_stmt) | |
39a5d6b1 | 2069 | { |
85078181 | 2070 | if (gimple_assign_rhs2 (next_stmt) == lhs) |
eb3a666e | 2071 | { |
85078181 | 2072 | tree op = gimple_assign_rhs1 (next_stmt); |
2073 | gimple def_stmt = NULL; | |
2074 | ||
2075 | if (TREE_CODE (op) == SSA_NAME) | |
2076 | def_stmt = SSA_NAME_DEF_STMT (op); | |
2077 | ||
2078 | /* Check that the other def is either defined in the loop | |
2079 | ("vect_internal_def"), or it's an induction (defined by a | |
2080 | loop-header phi-node). */ | |
2081 | if (def_stmt | |
aada78b6 | 2082 | && gimple_bb (def_stmt) |
85078181 | 2083 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) |
2084 | && (is_gimple_assign (def_stmt) | |
2085 | || is_gimple_call (def_stmt) | |
2086 | || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) | |
2087 | == vect_induction_def | |
2088 | || (gimple_code (def_stmt) == GIMPLE_PHI | |
2089 | && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) | |
2090 | == vect_internal_def | |
2091 | && !is_loop_header_bb_p (gimple_bb (def_stmt))))) | |
eb3a666e | 2092 | { |
85078181 | 2093 | lhs = gimple_assign_lhs (next_stmt); |
2094 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); | |
2095 | continue; | |
2096 | } | |
2097 | ||
2098 | return false; | |
2099 | } | |
2100 | else | |
2101 | { | |
2102 | tree op = gimple_assign_rhs2 (next_stmt); | |
2103 | gimple def_stmt = NULL; | |
2104 | ||
2105 | if (TREE_CODE (op) == SSA_NAME) | |
2106 | def_stmt = SSA_NAME_DEF_STMT (op); | |
2107 | ||
2108 | /* Check that the other def is either defined in the loop | |
2109 | ("vect_internal_def"), or it's an induction (defined by a | |
2110 | loop-header phi-node). */ | |
2111 | if (def_stmt | |
aada78b6 | 2112 | && gimple_bb (def_stmt) |
85078181 | 2113 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) |
2114 | && (is_gimple_assign (def_stmt) | |
2115 | || is_gimple_call (def_stmt) | |
2116 | || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) | |
eb3a666e | 2117 | == vect_induction_def |
85078181 | 2118 | || (gimple_code (def_stmt) == GIMPLE_PHI |
2119 | && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) | |
eb3a666e | 2120 | == vect_internal_def |
85078181 | 2121 | && !is_loop_header_bb_p (gimple_bb (def_stmt))))) |
2122 | { | |
6d8fb6cf | 2123 | if (dump_enabled_p ()) |
eb3a666e | 2124 | { |
7bd765d4 | 2125 | dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: "); |
2126 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0); | |
78bb46f5 | 2127 | dump_printf (MSG_NOTE, "\n"); |
eb3a666e | 2128 | } |
2129 | ||
8f6fa493 | 2130 | swap_ssa_operands (next_stmt, |
2131 | gimple_assign_rhs1_ptr (next_stmt), | |
2132 | gimple_assign_rhs2_ptr (next_stmt)); | |
a9696ee9 | 2133 | update_stmt (next_stmt); |
ba69439f | 2134 | |
2135 | if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt))) | |
2136 | LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; | |
eb3a666e | 2137 | } |
2138 | else | |
85078181 | 2139 | return false; |
39a5d6b1 | 2140 | } |
2141 | ||
eb3a666e | 2142 | lhs = gimple_assign_lhs (next_stmt); |
2143 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); | |
39a5d6b1 | 2144 | } |
2145 | ||
eb3a666e | 2146 | /* Save the chain for further analysis in SLP detection. */ |
2147 | first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); | |
f1f41a6c | 2148 | LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first); |
eb3a666e | 2149 | GROUP_SIZE (vinfo_for_stmt (first)) = size; |
2150 | ||
39a5d6b1 | 2151 | return true; |
2152 | } | |
2153 | ||
2154 | ||
f4a50267 | 2155 | /* Function vect_is_simple_reduction_1 |
fb85abff | 2156 | |
7aa0d350 | 2157 | (1) Detect a cross-iteration def-use cycle that represents a simple |
282bf14c | 2158 | reduction computation. We look for the following pattern: |
fb85abff | 2159 | |
2160 | loop_header: | |
2161 | a1 = phi < a0, a2 > | |
2162 | a3 = ... | |
2163 | a2 = operation (a3, a1) | |
48e1416a | 2164 | |
63048bd8 | 2165 | or |
2166 | ||
2167 | a3 = ... | |
2168 | loop_header: | |
2169 | a1 = phi < a0, a2 > | |
2170 | a2 = operation (a3, a1) | |
2171 | ||
fb85abff | 2172 | such that: |
48e1416a | 2173 | 1. operation is commutative and associative and it is safe to |
ade2ac53 | 2174 | change the order of the computation (if CHECK_REDUCTION is true) |
fb85abff | 2175 | 2. no uses for a2 in the loop (a2 is used out of the loop) |
caf6df13 | 2176 | 3. no uses of a1 in the loop besides the reduction operation |
2177 | 4. no uses of a1 outside the loop. | |
fb85abff | 2178 | |
caf6df13 | 2179 | Conditions 1,4 are tested here. |
48e1416a | 2180 | Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. |
ade2ac53 | 2181 | |
48e1416a | 2182 | (2) Detect a cross-iteration def-use cycle in nested loops, i.e., |
2183 | nested cycles, if CHECK_REDUCTION is false. | |
7aa0d350 | 2184 | |
2185 | (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double | |
2186 | reductions: | |
2187 | ||
2188 | a1 = phi < a0, a2 > | |
2189 | inner loop (def of a3) | |
48e1416a | 2190 | a2 = phi < a3 > |
f4a50267 | 2191 | |
2192 | If MODIFY is true it tries also to rework the code in-place to enable | |
2193 | detection of more reduction patterns. For the time being we rewrite | |
2194 | "res -= RHS" into "rhs += -RHS" when it seems worthwhile. | |
7aa0d350 | 2195 | */ |
fb85abff | 2196 | |
f4a50267 | 2197 | static gimple |
2198 | vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi, | |
2199 | bool check_reduction, bool *double_reduc, | |
2200 | bool modify) | |
fb85abff | 2201 | { |
2202 | struct loop *loop = (gimple_bb (phi))->loop_father; | |
2203 | struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); | |
2204 | edge latch_e = loop_latch_edge (loop); | |
2205 | tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); | |
0df23b96 | 2206 | gimple def_stmt, def1 = NULL, def2 = NULL; |
f4a50267 | 2207 | enum tree_code orig_code, code; |
0df23b96 | 2208 | tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE; |
fb85abff | 2209 | tree type; |
2210 | int nloop_uses; | |
2211 | tree name; | |
2212 | imm_use_iterator imm_iter; | |
2213 | use_operand_p use_p; | |
7aa0d350 | 2214 | bool phi_def; |
2215 | ||
2216 | *double_reduc = false; | |
fb85abff | 2217 | |
ade2ac53 | 2218 | /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization, |
2219 | otherwise, we assume outer loop vectorization. */ | |
48e1416a | 2220 | gcc_assert ((check_reduction && loop == vect_loop) |
ade2ac53 | 2221 | || (!check_reduction && flow_loop_nested_p (vect_loop, loop))); |
fb85abff | 2222 | |
2223 | name = PHI_RESULT (phi); | |
75f8b7c8 | 2224 | /* ??? If there are no uses of the PHI result the inner loop reduction |
2225 | won't be detected as possibly double-reduction by vectorizable_reduction | |
2226 | because that tries to walk the PHI arg from the preheader edge which | |
2227 | can be constant. See PR60382. */ | |
2228 | if (has_zero_uses (name)) | |
2229 | return NULL; | |
fb85abff | 2230 | nloop_uses = 0; |
2231 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) | |
2232 | { | |
2233 | gimple use_stmt = USE_STMT (use_p); | |
9845d120 | 2234 | if (is_gimple_debug (use_stmt)) |
2235 | continue; | |
caf6df13 | 2236 | |
2237 | if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) | |
2238 | { | |
6d8fb6cf | 2239 | if (dump_enabled_p ()) |
7bd765d4 | 2240 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 2241 | "intermediate value used outside loop.\n"); |
caf6df13 | 2242 | |
2243 | return NULL; | |
2244 | } | |
2245 | ||
2246 | if (vinfo_for_stmt (use_stmt) | |
fb85abff | 2247 | && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt))) |
2248 | nloop_uses++; | |
2249 | if (nloop_uses > 1) | |
2250 | { | |
6d8fb6cf | 2251 | if (dump_enabled_p ()) |
7bd765d4 | 2252 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 2253 | "reduction used in loop.\n"); |
fb85abff | 2254 | return NULL; |
2255 | } | |
2256 | } | |
2257 | ||
2258 | if (TREE_CODE (loop_arg) != SSA_NAME) | |
2259 | { | |
6d8fb6cf | 2260 | if (dump_enabled_p ()) |
fb85abff | 2261 | { |
7bd765d4 | 2262 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2263 | "reduction: not ssa_name: "); | |
2264 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg); | |
78bb46f5 | 2265 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
fb85abff | 2266 | } |
2267 | return NULL; | |
2268 | } | |
2269 | ||
2270 | def_stmt = SSA_NAME_DEF_STMT (loop_arg); | |
2271 | if (!def_stmt) | |
2272 | { | |
6d8fb6cf | 2273 | if (dump_enabled_p ()) |
7bd765d4 | 2274 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 2275 | "reduction: no def_stmt.\n"); |
fb85abff | 2276 | return NULL; |
2277 | } | |
2278 | ||
7aa0d350 | 2279 | if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI) |
fb85abff | 2280 | { |
6d8fb6cf | 2281 | if (dump_enabled_p ()) |
78bb46f5 | 2282 | { |
2283 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); | |
2284 | dump_printf (MSG_NOTE, "\n"); | |
2285 | } | |
fb85abff | 2286 | return NULL; |
2287 | } | |
2288 | ||
7aa0d350 | 2289 | if (is_gimple_assign (def_stmt)) |
2290 | { | |
2291 | name = gimple_assign_lhs (def_stmt); | |
2292 | phi_def = false; | |
2293 | } | |
2294 | else | |
2295 | { | |
2296 | name = PHI_RESULT (def_stmt); | |
2297 | phi_def = true; | |
2298 | } | |
2299 | ||
fb85abff | 2300 | nloop_uses = 0; |
2301 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) | |
2302 | { | |
2303 | gimple use_stmt = USE_STMT (use_p); | |
9845d120 | 2304 | if (is_gimple_debug (use_stmt)) |
2305 | continue; | |
fb85abff | 2306 | if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)) |
2307 | && vinfo_for_stmt (use_stmt) | |
2308 | && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt))) | |
2309 | nloop_uses++; | |
2310 | if (nloop_uses > 1) | |
2311 | { | |
6d8fb6cf | 2312 | if (dump_enabled_p ()) |
7bd765d4 | 2313 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 2314 | "reduction used in loop.\n"); |
fb85abff | 2315 | return NULL; |
2316 | } | |
2317 | } | |
2318 | ||
7aa0d350 | 2319 | /* If DEF_STMT is a phi node itself, we expect it to have a single argument |
2320 | defined in the inner loop. */ | |
2321 | if (phi_def) | |
2322 | { | |
2323 | op1 = PHI_ARG_DEF (def_stmt, 0); | |
2324 | ||
2325 | if (gimple_phi_num_args (def_stmt) != 1 | |
2326 | || TREE_CODE (op1) != SSA_NAME) | |
2327 | { | |
6d8fb6cf | 2328 | if (dump_enabled_p ()) |
7bd765d4 | 2329 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 2330 | "unsupported phi node definition.\n"); |
7aa0d350 | 2331 | |
2332 | return NULL; | |
2333 | } | |
2334 | ||
48e1416a | 2335 | def1 = SSA_NAME_DEF_STMT (op1); |
149f7c8d | 2336 | if (gimple_bb (def1) |
2337 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) | |
7aa0d350 | 2338 | && loop->inner |
2339 | && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1)) | |
2340 | && is_gimple_assign (def1)) | |
2341 | { | |
6d8fb6cf | 2342 | if (dump_enabled_p ()) |
7bd765d4 | 2343 | report_vect_op (MSG_NOTE, def_stmt, |
2344 | "detected double reduction: "); | |
48e1416a | 2345 | |
7aa0d350 | 2346 | *double_reduc = true; |
2347 | return def_stmt; | |
2348 | } | |
2349 | ||
2350 | return NULL; | |
2351 | } | |
2352 | ||
f4a50267 | 2353 | code = orig_code = gimple_assign_rhs_code (def_stmt); |
2354 | ||
2355 | /* We can handle "res -= x[i]", which is non-associative by | |
2356 | simply rewriting this into "res += -x[i]". Avoid changing | |
2357 | gimple instruction for the first simple tests and only do this | |
2358 | if we're allowed to change code at all. */ | |
9ef16690 | 2359 | if (code == MINUS_EXPR |
2360 | && modify | |
2361 | && (op1 = gimple_assign_rhs1 (def_stmt)) | |
2362 | && TREE_CODE (op1) == SSA_NAME | |
2363 | && SSA_NAME_DEF_STMT (op1) == phi) | |
f4a50267 | 2364 | code = PLUS_EXPR; |
fb85abff | 2365 | |
48e1416a | 2366 | if (check_reduction |
ade2ac53 | 2367 | && (!commutative_tree_code (code) || !associative_tree_code (code))) |
fb85abff | 2368 | { |
6d8fb6cf | 2369 | if (dump_enabled_p ()) |
7bd765d4 | 2370 | report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
2371 | "reduction: not commutative/associative: "); | |
fb85abff | 2372 | return NULL; |
2373 | } | |
2374 | ||
48e1416a | 2375 | if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS) |
fb85abff | 2376 | { |
0df23b96 | 2377 | if (code != COND_EXPR) |
2378 | { | |
6d8fb6cf | 2379 | if (dump_enabled_p ()) |
7bd765d4 | 2380 | report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
2381 | "reduction: not binary operation: "); | |
fb85abff | 2382 | |
0df23b96 | 2383 | return NULL; |
2384 | } | |
2385 | ||
8a2caf10 | 2386 | op3 = gimple_assign_rhs1 (def_stmt); |
a18d4327 | 2387 | if (COMPARISON_CLASS_P (op3)) |
2388 | { | |
2389 | op4 = TREE_OPERAND (op3, 1); | |
2390 | op3 = TREE_OPERAND (op3, 0); | |
48e1416a | 2391 | } |
2392 | ||
8a2caf10 | 2393 | op1 = gimple_assign_rhs2 (def_stmt); |
2394 | op2 = gimple_assign_rhs3 (def_stmt); | |
0df23b96 | 2395 | |
2396 | if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) | |
2397 | { | |
6d8fb6cf | 2398 | if (dump_enabled_p ()) |
7bd765d4 | 2399 | report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
2400 | "reduction: uses not ssa_names: "); | |
0df23b96 | 2401 | |
2402 | return NULL; | |
2403 | } | |
fb85abff | 2404 | } |
0df23b96 | 2405 | else |
2406 | { | |
2407 | op1 = gimple_assign_rhs1 (def_stmt); | |
2408 | op2 = gimple_assign_rhs2 (def_stmt); | |
2409 | ||
a29b42f8 | 2410 | if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) |
0df23b96 | 2411 | { |
6d8fb6cf | 2412 | if (dump_enabled_p ()) |
7bd765d4 | 2413 | report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
2414 | "reduction: uses not ssa_names: "); | |
0df23b96 | 2415 | |
2416 | return NULL; | |
2417 | } | |
2418 | } | |
fb85abff | 2419 | |
fb85abff | 2420 | type = TREE_TYPE (gimple_assign_lhs (def_stmt)); |
0df23b96 | 2421 | if ((TREE_CODE (op1) == SSA_NAME |
1ea6a73c | 2422 | && !types_compatible_p (type,TREE_TYPE (op1))) |
0df23b96 | 2423 | || (TREE_CODE (op2) == SSA_NAME |
1ea6a73c | 2424 | && !types_compatible_p (type, TREE_TYPE (op2))) |
0df23b96 | 2425 | || (op3 && TREE_CODE (op3) == SSA_NAME |
1ea6a73c | 2426 | && !types_compatible_p (type, TREE_TYPE (op3))) |
0df23b96 | 2427 | || (op4 && TREE_CODE (op4) == SSA_NAME |
1ea6a73c | 2428 | && !types_compatible_p (type, TREE_TYPE (op4)))) |
fb85abff | 2429 | { |
6d8fb6cf | 2430 | if (dump_enabled_p ()) |
fb85abff | 2431 | { |
7bd765d4 | 2432 | dump_printf_loc (MSG_NOTE, vect_location, |
2433 | "reduction: multiple types: operation type: "); | |
2434 | dump_generic_expr (MSG_NOTE, TDF_SLIM, type); | |
2435 | dump_printf (MSG_NOTE, ", operands types: "); | |
2436 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2437 | TREE_TYPE (op1)); | |
2438 | dump_printf (MSG_NOTE, ","); | |
2439 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2440 | TREE_TYPE (op2)); | |
a18d4327 | 2441 | if (op3) |
0df23b96 | 2442 | { |
7bd765d4 | 2443 | dump_printf (MSG_NOTE, ","); |
2444 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2445 | TREE_TYPE (op3)); | |
a18d4327 | 2446 | } |
2447 | ||
2448 | if (op4) | |
2449 | { | |
7bd765d4 | 2450 | dump_printf (MSG_NOTE, ","); |
2451 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2452 | TREE_TYPE (op4)); | |
0df23b96 | 2453 | } |
78bb46f5 | 2454 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 2455 | } |
0df23b96 | 2456 | |
fb85abff | 2457 | return NULL; |
2458 | } | |
2459 | ||
48e1416a | 2460 | /* Check that it's ok to change the order of the computation. |
ade2ac53 | 2461 | Generally, when vectorizing a reduction we change the order of the |
fb85abff | 2462 | computation. This may change the behavior of the program in some |
48e1416a | 2463 | cases, so we need to check that this is ok. One exception is when |
fb85abff | 2464 | vectorizing an outer-loop: the inner-loop is executed sequentially, |
2465 | and therefore vectorizing reductions in the inner-loop during | |
2466 | outer-loop vectorization is safe. */ | |
2467 | ||
2468 | /* CHECKME: check for !flag_finite_math_only too? */ | |
2469 | if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math | |
48e1416a | 2470 | && check_reduction) |
fb85abff | 2471 | { |
2472 | /* Changing the order of operations changes the semantics. */ | |
6d8fb6cf | 2473 | if (dump_enabled_p ()) |
7bd765d4 | 2474 | report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
2475 | "reduction: unsafe fp math optimization: "); | |
fb85abff | 2476 | return NULL; |
2477 | } | |
2478 | else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type) | |
ade2ac53 | 2479 | && check_reduction) |
fb85abff | 2480 | { |
2481 | /* Changing the order of operations changes the semantics. */ | |
6d8fb6cf | 2482 | if (dump_enabled_p ()) |
7bd765d4 | 2483 | report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
2484 | "reduction: unsafe int math optimization: "); | |
fb85abff | 2485 | return NULL; |
2486 | } | |
ade2ac53 | 2487 | else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction) |
fb85abff | 2488 | { |
2489 | /* Changing the order of operations changes the semantics. */ | |
6d8fb6cf | 2490 | if (dump_enabled_p ()) |
7bd765d4 | 2491 | report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
fb85abff | 2492 | "reduction: unsafe fixed-point math optimization: "); |
2493 | return NULL; | |
2494 | } | |
2495 | ||
f4a50267 | 2496 | /* If we detected "res -= x[i]" earlier, rewrite it into |
2497 | "res += -x[i]" now. If this turns out to be useless reassoc | |
2498 | will clean it up again. */ | |
2499 | if (orig_code == MINUS_EXPR) | |
2500 | { | |
2501 | tree rhs = gimple_assign_rhs2 (def_stmt); | |
bb38acc8 | 2502 | tree negrhs = make_ssa_name (TREE_TYPE (rhs), NULL); |
f4a50267 | 2503 | gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs, |
2504 | rhs, NULL); | |
2505 | gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt); | |
2506 | set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt, | |
2507 | loop_info, NULL)); | |
2508 | gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT); | |
2509 | gimple_assign_set_rhs2 (def_stmt, negrhs); | |
2510 | gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR); | |
2511 | update_stmt (def_stmt); | |
2512 | } | |
2513 | ||
ade2ac53 | 2514 | /* Reduction is safe. We're dealing with one of the following: |
fb85abff | 2515 | 1) integer arithmetic and no trapv |
ade2ac53 | 2516 | 2) floating point arithmetic, and special flags permit this optimization |
2517 | 3) nested cycle (i.e., outer loop vectorization). */ | |
0df23b96 | 2518 | if (TREE_CODE (op1) == SSA_NAME) |
2519 | def1 = SSA_NAME_DEF_STMT (op1); | |
2520 | ||
2521 | if (TREE_CODE (op2) == SSA_NAME) | |
2522 | def2 = SSA_NAME_DEF_STMT (op2); | |
2523 | ||
48e1416a | 2524 | if (code != COND_EXPR |
a29b42f8 | 2525 | && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2)))) |
fb85abff | 2526 | { |
6d8fb6cf | 2527 | if (dump_enabled_p ()) |
7bd765d4 | 2528 | report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: "); |
fb85abff | 2529 | return NULL; |
2530 | } | |
2531 | ||
fb85abff | 2532 | /* Check that one def is the reduction def, defined by PHI, |
f083cd24 | 2533 | the other def is either defined in the loop ("vect_internal_def"), |
fb85abff | 2534 | or it's an induction (defined by a loop-header phi-node). */ |
2535 | ||
0df23b96 | 2536 | if (def2 && def2 == phi |
2537 | && (code == COND_EXPR | |
a29b42f8 | 2538 | || !def1 || gimple_nop_p (def1) |
63048bd8 | 2539 | || !flow_bb_inside_loop_p (loop, gimple_bb (def1)) |
0df23b96 | 2540 | || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1)) |
2541 | && (is_gimple_assign (def1) | |
1e845e91 | 2542 | || is_gimple_call (def1) |
48e1416a | 2543 | || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) |
0df23b96 | 2544 | == vect_induction_def |
2545 | || (gimple_code (def1) == GIMPLE_PHI | |
48e1416a | 2546 | && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) |
0df23b96 | 2547 | == vect_internal_def |
2548 | && !is_loop_header_bb_p (gimple_bb (def1))))))) | |
fb85abff | 2549 | { |
6d8fb6cf | 2550 | if (dump_enabled_p ()) |
7bd765d4 | 2551 | report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); |
fb85abff | 2552 | return def_stmt; |
2553 | } | |
39a5d6b1 | 2554 | |
2555 | if (def1 && def1 == phi | |
2556 | && (code == COND_EXPR | |
a29b42f8 | 2557 | || !def2 || gimple_nop_p (def2) |
63048bd8 | 2558 | || !flow_bb_inside_loop_p (loop, gimple_bb (def2)) |
39a5d6b1 | 2559 | || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2)) |
2560 | && (is_gimple_assign (def2) | |
2561 | || is_gimple_call (def2) | |
2562 | || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) | |
2563 | == vect_induction_def | |
2564 | || (gimple_code (def2) == GIMPLE_PHI | |
2565 | && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) | |
2566 | == vect_internal_def | |
2567 | && !is_loop_header_bb_p (gimple_bb (def2))))))) | |
fb85abff | 2568 | { |
ade2ac53 | 2569 | if (check_reduction) |
2570 | { | |
2571 | /* Swap operands (just for simplicity - so that the rest of the code | |
2572 | can assume that the reduction variable is always the last (second) | |
2573 | argument). */ | |
6d8fb6cf | 2574 | if (dump_enabled_p ()) |
7bd765d4 | 2575 | report_vect_op (MSG_NOTE, def_stmt, |
ade2ac53 | 2576 | "detected reduction: need to swap operands: "); |
2577 | ||
8f6fa493 | 2578 | swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt), |
2579 | gimple_assign_rhs2_ptr (def_stmt)); | |
ba69439f | 2580 | |
2581 | if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt))) | |
2582 | LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; | |
ade2ac53 | 2583 | } |
2584 | else | |
2585 | { | |
6d8fb6cf | 2586 | if (dump_enabled_p ()) |
7bd765d4 | 2587 | report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); |
ade2ac53 | 2588 | } |
2589 | ||
fb85abff | 2590 | return def_stmt; |
2591 | } | |
39a5d6b1 | 2592 | |
2593 | /* Try to find SLP reduction chain. */ | |
85078181 | 2594 | if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt)) |
fb85abff | 2595 | { |
6d8fb6cf | 2596 | if (dump_enabled_p ()) |
7bd765d4 | 2597 | report_vect_op (MSG_NOTE, def_stmt, |
2598 | "reduction: detected reduction chain: "); | |
ade2ac53 | 2599 | |
39a5d6b1 | 2600 | return def_stmt; |
fb85abff | 2601 | } |
39a5d6b1 | 2602 | |
6d8fb6cf | 2603 | if (dump_enabled_p ()) |
7bd765d4 | 2604 | report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
2605 | "reduction: unknown pattern: "); | |
39a5d6b1 | 2606 | |
2607 | return NULL; | |
fb85abff | 2608 | } |
2609 | ||
f4a50267 | 2610 | /* Wrapper around vect_is_simple_reduction_1, that won't modify code |
2611 | in-place. Arguments as there. */ | |
2612 | ||
2613 | static gimple | |
2614 | vect_is_simple_reduction (loop_vec_info loop_info, gimple phi, | |
2615 | bool check_reduction, bool *double_reduc) | |
2616 | { | |
2617 | return vect_is_simple_reduction_1 (loop_info, phi, check_reduction, | |
2618 | double_reduc, false); | |
2619 | } | |
2620 | ||
2621 | /* Wrapper around vect_is_simple_reduction_1, which will modify code | |
2622 | in-place if it enables detection of more reductions. Arguments | |
2623 | as there. */ | |
2624 | ||
2625 | gimple | |
2626 | vect_force_simple_reduction (loop_vec_info loop_info, gimple phi, | |
2627 | bool check_reduction, bool *double_reduc) | |
2628 | { | |
2629 | return vect_is_simple_reduction_1 (loop_info, phi, check_reduction, | |
2630 | double_reduc, true); | |
2631 | } | |
fb85abff | 2632 | |
0822b158 | 2633 | /* Calculate the cost of one scalar iteration of the loop. */ |
2634 | int | |
f4ac3f3e | 2635 | vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo) |
0822b158 | 2636 | { |
2637 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2638 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); | |
2639 | int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0; | |
2640 | int innerloop_iters, i, stmt_cost; | |
2641 | ||
282bf14c | 2642 | /* Count statements in scalar loop. Using this as scalar cost for a single |
0822b158 | 2643 | iteration for now. |
2644 | ||
2645 | TODO: Add outer loop support. | |
2646 | ||
2647 | TODO: Consider assigning different costs to different scalar | |
2648 | statements. */ | |
2649 | ||
2650 | /* FORNOW. */ | |
3aee830b | 2651 | innerloop_iters = 1; |
0822b158 | 2652 | if (loop->inner) |
2653 | innerloop_iters = 50; /* FIXME */ | |
2654 | ||
2655 | for (i = 0; i < nbbs; i++) | |
2656 | { | |
2657 | gimple_stmt_iterator si; | |
2658 | basic_block bb = bbs[i]; | |
2659 | ||
2660 | if (bb->loop_father == loop->inner) | |
2661 | factor = innerloop_iters; | |
2662 | else | |
2663 | factor = 1; | |
2664 | ||
2665 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
2666 | { | |
2667 | gimple stmt = gsi_stmt (si); | |
45f1556e | 2668 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
0822b158 | 2669 | |
2670 | if (!is_gimple_assign (stmt) && !is_gimple_call (stmt)) | |
2671 | continue; | |
2672 | ||
45f1556e | 2673 | /* Skip stmts that are not vectorized inside the loop. */ |
2674 | if (stmt_info | |
2675 | && !STMT_VINFO_RELEVANT_P (stmt_info) | |
2676 | && (!STMT_VINFO_LIVE_P (stmt_info) | |
5df2530b | 2677 | || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) |
2678 | && !STMT_VINFO_IN_PATTERN_P (stmt_info)) | |
45f1556e | 2679 | continue; |
2680 | ||
0822b158 | 2681 | if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))) |
2682 | { | |
2683 | if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))) | |
f4ac3f3e | 2684 | stmt_cost = vect_get_stmt_cost (scalar_load); |
0822b158 | 2685 | else |
f4ac3f3e | 2686 | stmt_cost = vect_get_stmt_cost (scalar_store); |
0822b158 | 2687 | } |
2688 | else | |
f4ac3f3e | 2689 | stmt_cost = vect_get_stmt_cost (scalar_stmt); |
0822b158 | 2690 | |
2691 | scalar_single_iter_cost += stmt_cost * factor; | |
2692 | } | |
2693 | } | |
2694 | return scalar_single_iter_cost; | |
2695 | } | |
2696 | ||
2697 | /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */ | |
2698 | int | |
2699 | vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue, | |
2700 | int *peel_iters_epilogue, | |
f97dec81 | 2701 | int scalar_single_iter_cost, |
2702 | stmt_vector_for_cost *prologue_cost_vec, | |
2703 | stmt_vector_for_cost *epilogue_cost_vec) | |
0822b158 | 2704 | { |
f97dec81 | 2705 | int retval = 0; |
0822b158 | 2706 | int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
2707 | ||
2708 | if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) | |
2709 | { | |
2710 | *peel_iters_epilogue = vf/2; | |
6d8fb6cf | 2711 | if (dump_enabled_p ()) |
7bd765d4 | 2712 | dump_printf_loc (MSG_NOTE, vect_location, |
2713 | "cost model: epilogue peel iters set to vf/2 " | |
78bb46f5 | 2714 | "because loop iterations are unknown .\n"); |
0822b158 | 2715 | |
2716 | /* If peeled iterations are known but number of scalar loop | |
2717 | iterations are unknown, count a taken branch per peeled loop. */ | |
f97dec81 | 2718 | retval = record_stmt_cost (prologue_cost_vec, 2, cond_branch_taken, |
2719 | NULL, 0, vect_prologue); | |
0822b158 | 2720 | } |
2721 | else | |
2722 | { | |
2723 | int niters = LOOP_VINFO_INT_NITERS (loop_vinfo); | |
2724 | peel_iters_prologue = niters < peel_iters_prologue ? | |
2725 | niters : peel_iters_prologue; | |
2726 | *peel_iters_epilogue = (niters - peel_iters_prologue) % vf; | |
a4ee7fac | 2727 | /* If we need to peel for gaps, but no peeling is required, we have to |
2728 | peel VF iterations. */ | |
2729 | if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue) | |
2730 | *peel_iters_epilogue = vf; | |
0822b158 | 2731 | } |
2732 | ||
f97dec81 | 2733 | if (peel_iters_prologue) |
2734 | retval += record_stmt_cost (prologue_cost_vec, | |
2735 | peel_iters_prologue * scalar_single_iter_cost, | |
2736 | scalar_stmt, NULL, 0, vect_prologue); | |
2737 | if (*peel_iters_epilogue) | |
2738 | retval += record_stmt_cost (epilogue_cost_vec, | |
2739 | *peel_iters_epilogue * scalar_single_iter_cost, | |
2740 | scalar_stmt, NULL, 0, vect_epilogue); | |
2741 | return retval; | |
0822b158 | 2742 | } |
2743 | ||
fb85abff | 2744 | /* Function vect_estimate_min_profitable_iters |
2745 | ||
2746 | Return the number of iterations required for the vector version of the | |
2747 | loop to be profitable relative to the cost of the scalar version of the | |
5938768b | 2748 | loop. */ |
fb85abff | 2749 | |
5938768b | 2750 | static void |
2751 | vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, | |
2752 | int *ret_min_profitable_niters, | |
2753 | int *ret_min_profitable_estimate) | |
fb85abff | 2754 | { |
fb85abff | 2755 | int min_profitable_iters; |
5938768b | 2756 | int min_profitable_estimate; |
fb85abff | 2757 | int peel_iters_prologue; |
2758 | int peel_iters_epilogue; | |
f97dec81 | 2759 | unsigned vec_inside_cost = 0; |
fb85abff | 2760 | int vec_outside_cost = 0; |
f97dec81 | 2761 | unsigned vec_prologue_cost = 0; |
2762 | unsigned vec_epilogue_cost = 0; | |
fb85abff | 2763 | int scalar_single_iter_cost = 0; |
2764 | int scalar_outside_cost = 0; | |
2765 | int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
313a5120 | 2766 | int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); |
f97dec81 | 2767 | void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); |
fb85abff | 2768 | |
2769 | /* Cost model disabled. */ | |
3e398f5b | 2770 | if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) |
fb85abff | 2771 | { |
78bb46f5 | 2772 | dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n"); |
5938768b | 2773 | *ret_min_profitable_niters = 0; |
2774 | *ret_min_profitable_estimate = 0; | |
2775 | return; | |
fb85abff | 2776 | } |
2777 | ||
2778 | /* Requires loop versioning tests to handle misalignment. */ | |
10095225 | 2779 | if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) |
fb85abff | 2780 | { |
2781 | /* FIXME: Make cost depend on complexity of individual check. */ | |
f1f41a6c | 2782 | unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length (); |
f97dec81 | 2783 | (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, |
2784 | vect_prologue); | |
7bd765d4 | 2785 | dump_printf (MSG_NOTE, |
2786 | "cost model: Adding cost of checks for loop " | |
2787 | "versioning to treat misalignment.\n"); | |
fb85abff | 2788 | } |
2789 | ||
10095225 | 2790 | /* Requires loop versioning with alias checks. */ |
2791 | if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) | |
fb85abff | 2792 | { |
2793 | /* FIXME: Make cost depend on complexity of individual check. */ | |
f1f41a6c | 2794 | unsigned len = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).length (); |
f97dec81 | 2795 | (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, |
2796 | vect_prologue); | |
7bd765d4 | 2797 | dump_printf (MSG_NOTE, |
2798 | "cost model: Adding cost of checks for loop " | |
2799 | "versioning aliasing.\n"); | |
fb85abff | 2800 | } |
2801 | ||
10095225 | 2802 | if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo) |
2803 | || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) | |
f97dec81 | 2804 | (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, |
2805 | vect_prologue); | |
fb85abff | 2806 | |
2807 | /* Count statements in scalar loop. Using this as scalar cost for a single | |
2808 | iteration for now. | |
2809 | ||
2810 | TODO: Add outer loop support. | |
2811 | ||
2812 | TODO: Consider assigning different costs to different scalar | |
2813 | statements. */ | |
2814 | ||
f4ac3f3e | 2815 | scalar_single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo); |
0822b158 | 2816 | |
fb85abff | 2817 | /* Add additional cost for the peeled instructions in prologue and epilogue |
2818 | loop. | |
2819 | ||
2820 | FORNOW: If we don't know the value of peel_iters for prologue or epilogue | |
2821 | at compile-time - we assume it's vf/2 (the worst would be vf-1). | |
2822 | ||
2823 | TODO: Build an expression that represents peel_iters for prologue and | |
2824 | epilogue to be used in a run-time test. */ | |
2825 | ||
0822b158 | 2826 | if (npeel < 0) |
fb85abff | 2827 | { |
2828 | peel_iters_prologue = vf/2; | |
7bd765d4 | 2829 | dump_printf (MSG_NOTE, "cost model: " |
78bb46f5 | 2830 | "prologue peel iters set to vf/2.\n"); |
fb85abff | 2831 | |
2832 | /* If peeling for alignment is unknown, loop bound of main loop becomes | |
2833 | unknown. */ | |
2834 | peel_iters_epilogue = vf/2; | |
7bd765d4 | 2835 | dump_printf (MSG_NOTE, "cost model: " |
2836 | "epilogue peel iters set to vf/2 because " | |
78bb46f5 | 2837 | "peeling for alignment is unknown.\n"); |
fb85abff | 2838 | |
2839 | /* If peeled iterations are unknown, count a taken branch and a not taken | |
2840 | branch per peeled loop. Even if scalar loop iterations are known, | |
2841 | vector iterations are not known since peeled prologue iterations are | |
2842 | not known. Hence guards remain the same. */ | |
f97dec81 | 2843 | (void) add_stmt_cost (target_cost_data, 2, cond_branch_taken, |
2844 | NULL, 0, vect_prologue); | |
2845 | (void) add_stmt_cost (target_cost_data, 2, cond_branch_not_taken, | |
2846 | NULL, 0, vect_prologue); | |
2847 | /* FORNOW: Don't attempt to pass individual scalar instructions to | |
2848 | the model; just assume linear cost for scalar iterations. */ | |
2849 | (void) add_stmt_cost (target_cost_data, | |
2850 | peel_iters_prologue * scalar_single_iter_cost, | |
2851 | scalar_stmt, NULL, 0, vect_prologue); | |
2852 | (void) add_stmt_cost (target_cost_data, | |
2853 | peel_iters_epilogue * scalar_single_iter_cost, | |
2854 | scalar_stmt, NULL, 0, vect_epilogue); | |
fb85abff | 2855 | } |
48e1416a | 2856 | else |
fb85abff | 2857 | { |
f97dec81 | 2858 | stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec; |
2859 | stmt_info_for_cost *si; | |
2860 | int j; | |
2861 | void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); | |
2862 | ||
f1f41a6c | 2863 | prologue_cost_vec.create (2); |
2864 | epilogue_cost_vec.create (2); | |
0822b158 | 2865 | peel_iters_prologue = npeel; |
f97dec81 | 2866 | |
2867 | (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue, | |
2868 | &peel_iters_epilogue, | |
2869 | scalar_single_iter_cost, | |
2870 | &prologue_cost_vec, | |
2871 | &epilogue_cost_vec); | |
2872 | ||
f1f41a6c | 2873 | FOR_EACH_VEC_ELT (prologue_cost_vec, j, si) |
f97dec81 | 2874 | { |
2875 | struct _stmt_vec_info *stmt_info | |
2876 | = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; | |
2877 | (void) add_stmt_cost (data, si->count, si->kind, stmt_info, | |
2878 | si->misalign, vect_prologue); | |
2879 | } | |
2880 | ||
f1f41a6c | 2881 | FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si) |
f97dec81 | 2882 | { |
2883 | struct _stmt_vec_info *stmt_info | |
2884 | = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; | |
2885 | (void) add_stmt_cost (data, si->count, si->kind, stmt_info, | |
2886 | si->misalign, vect_epilogue); | |
2887 | } | |
2888 | ||
f1f41a6c | 2889 | prologue_cost_vec.release (); |
2890 | epilogue_cost_vec.release (); | |
fb85abff | 2891 | } |
2892 | ||
fb85abff | 2893 | /* FORNOW: The scalar outside cost is incremented in one of the |
2894 | following ways: | |
2895 | ||
2896 | 1. The vectorizer checks for alignment and aliasing and generates | |
2897 | a condition that allows dynamic vectorization. A cost model | |
2898 | check is ANDED with the versioning condition. Hence scalar code | |
2899 | path now has the added cost of the versioning check. | |
2900 | ||
2901 | if (cost > th & versioning_check) | |
2902 | jmp to vector code | |
2903 | ||
2904 | Hence run-time scalar is incremented by not-taken branch cost. | |
2905 | ||
2906 | 2. The vectorizer then checks if a prologue is required. If the | |
2907 | cost model check was not done before during versioning, it has to | |
2908 | be done before the prologue check. | |
2909 | ||
2910 | if (cost <= th) | |
2911 | prologue = scalar_iters | |
2912 | if (prologue == 0) | |
2913 | jmp to vector code | |
2914 | else | |
2915 | execute prologue | |
2916 | if (prologue == num_iters) | |
2917 | go to exit | |
2918 | ||
2919 | Hence the run-time scalar cost is incremented by a taken branch, | |
2920 | plus a not-taken branch, plus a taken branch cost. | |
2921 | ||
2922 | 3. The vectorizer then checks if an epilogue is required. If the | |
2923 | cost model check was not done before during prologue check, it | |
2924 | has to be done with the epilogue check. | |
2925 | ||
2926 | if (prologue == 0) | |
2927 | jmp to vector code | |
2928 | else | |
2929 | execute prologue | |
2930 | if (prologue == num_iters) | |
2931 | go to exit | |
2932 | vector code: | |
2933 | if ((cost <= th) | (scalar_iters-prologue-epilogue == 0)) | |
2934 | jmp to epilogue | |
2935 | ||
2936 | Hence the run-time scalar cost should be incremented by 2 taken | |
2937 | branches. | |
2938 | ||
2939 | TODO: The back end may reorder the BBS's differently and reverse | |
2940 | conditions/branch directions. Change the estimates below to | |
2941 | something more reasonable. */ | |
2942 | ||
2943 | /* If the number of iterations is known and we do not do versioning, we can | |
282bf14c | 2944 | decide whether to vectorize at compile time. Hence the scalar version |
fb85abff | 2945 | do not carry cost model guard costs. */ |
2946 | if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) | |
10095225 | 2947 | || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo) |
2948 | || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) | |
fb85abff | 2949 | { |
2950 | /* Cost model check occurs at versioning. */ | |
10095225 | 2951 | if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo) |
2952 | || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) | |
f4ac3f3e | 2953 | scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken); |
fb85abff | 2954 | else |
2955 | { | |
2956 | /* Cost model check occurs at prologue generation. */ | |
313a5120 | 2957 | if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) |
f4ac3f3e | 2958 | scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken) |
2959 | + vect_get_stmt_cost (cond_branch_not_taken); | |
fb85abff | 2960 | /* Cost model check occurs at epilogue generation. */ |
2961 | else | |
f4ac3f3e | 2962 | scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken); |
fb85abff | 2963 | } |
2964 | } | |
2965 | ||
f97dec81 | 2966 | /* Complete the target-specific cost calculations. */ |
2967 | finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost, | |
2968 | &vec_inside_cost, &vec_epilogue_cost); | |
fb85abff | 2969 | |
f97dec81 | 2970 | vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost); |
4db2b577 | 2971 | |
48e1416a | 2972 | /* Calculate number of iterations required to make the vector version |
282bf14c | 2973 | profitable, relative to the loop bodies only. The following condition |
48e1416a | 2974 | must hold true: |
fb85abff | 2975 | SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC |
2976 | where | |
2977 | SIC = scalar iteration cost, VIC = vector iteration cost, | |
2978 | VOC = vector outside cost, VF = vectorization factor, | |
2979 | PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations | |
2980 | SOC = scalar outside cost for run time cost model check. */ | |
2981 | ||
f97dec81 | 2982 | if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost) |
fb85abff | 2983 | { |
2984 | if (vec_outside_cost <= 0) | |
2985 | min_profitable_iters = 1; | |
2986 | else | |
2987 | { | |
2988 | min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf | |
2989 | - vec_inside_cost * peel_iters_prologue | |
2990 | - vec_inside_cost * peel_iters_epilogue) | |
2991 | / ((scalar_single_iter_cost * vf) | |
2992 | - vec_inside_cost); | |
2993 | ||
2994 | if ((scalar_single_iter_cost * vf * min_profitable_iters) | |
f97dec81 | 2995 | <= (((int) vec_inside_cost * min_profitable_iters) |
2996 | + (((int) vec_outside_cost - scalar_outside_cost) * vf))) | |
fb85abff | 2997 | min_profitable_iters++; |
2998 | } | |
2999 | } | |
3000 | /* vector version will never be profitable. */ | |
3001 | else | |
3002 | { | |
4c73695b | 3003 | if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize) |
3e398f5b | 3004 | warning_at (vect_location, OPT_Wopenmp_simd, "vectorization " |
3005 | "did not happen for a simd loop"); | |
3006 | ||
6d8fb6cf | 3007 | if (dump_enabled_p ()) |
7bd765d4 | 3008 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3009 | "cost model: the vector iteration cost = %d " | |
3010 | "divided by the scalar iteration cost = %d " | |
78bb46f5 | 3011 | "is greater or equal to the vectorization factor = %d" |
3012 | ".\n", | |
7bd765d4 | 3013 | vec_inside_cost, scalar_single_iter_cost, vf); |
5938768b | 3014 | *ret_min_profitable_niters = -1; |
3015 | *ret_min_profitable_estimate = -1; | |
3016 | return; | |
fb85abff | 3017 | } |
3018 | ||
6d8fb6cf | 3019 | if (dump_enabled_p ()) |
7bd765d4 | 3020 | { |
3021 | dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n"); | |
3022 | dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n", | |
3023 | vec_inside_cost); | |
3024 | dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", | |
3025 | vec_prologue_cost); | |
3026 | dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", | |
3027 | vec_epilogue_cost); | |
3028 | dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n", | |
3029 | scalar_single_iter_cost); | |
3030 | dump_printf (MSG_NOTE, " Scalar outside cost: %d\n", | |
3031 | scalar_outside_cost); | |
5938768b | 3032 | dump_printf (MSG_NOTE, " Vector outside cost: %d\n", |
3033 | vec_outside_cost); | |
7bd765d4 | 3034 | dump_printf (MSG_NOTE, " prologue iterations: %d\n", |
3035 | peel_iters_prologue); | |
3036 | dump_printf (MSG_NOTE, " epilogue iterations: %d\n", | |
3037 | peel_iters_epilogue); | |
78bb46f5 | 3038 | dump_printf (MSG_NOTE, |
7bd765d4 | 3039 | " Calculated minimum iters for profitability: %d\n", |
3040 | min_profitable_iters); | |
78bb46f5 | 3041 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 3042 | } |
3043 | ||
48e1416a | 3044 | min_profitable_iters = |
fb85abff | 3045 | min_profitable_iters < vf ? vf : min_profitable_iters; |
3046 | ||
3047 | /* Because the condition we create is: | |
3048 | if (niters <= min_profitable_iters) | |
3049 | then skip the vectorized loop. */ | |
3050 | min_profitable_iters--; | |
3051 | ||
6d8fb6cf | 3052 | if (dump_enabled_p ()) |
7bd765d4 | 3053 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 3054 | " Runtime profitability threshold = %d\n", |
3055 | min_profitable_iters); | |
5938768b | 3056 | |
3057 | *ret_min_profitable_niters = min_profitable_iters; | |
3058 | ||
3059 | /* Calculate number of iterations required to make the vector version | |
3060 | profitable, relative to the loop bodies only. | |
3061 | ||
3062 | Non-vectorized variant is SIC * niters and it must win over vector | |
3063 | variant on the expected loop trip count. The following condition must hold true: | |
3064 | SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */ | |
3065 | ||
3066 | if (vec_outside_cost <= 0) | |
3067 | min_profitable_estimate = 1; | |
3068 | else | |
3069 | { | |
3070 | min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf | |
3071 | - vec_inside_cost * peel_iters_prologue | |
3072 | - vec_inside_cost * peel_iters_epilogue) | |
3073 | / ((scalar_single_iter_cost * vf) | |
3074 | - vec_inside_cost); | |
3075 | } | |
3076 | min_profitable_estimate --; | |
3077 | min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters); | |
6d8fb6cf | 3078 | if (dump_enabled_p ()) |
5938768b | 3079 | dump_printf_loc (MSG_NOTE, vect_location, |
3080 | " Static estimate profitability threshold = %d\n", | |
3081 | min_profitable_iters); | |
48e1416a | 3082 | |
5938768b | 3083 | *ret_min_profitable_estimate = min_profitable_estimate; |
fb85abff | 3084 | } |
3085 | ||
3086 | ||
48e1416a | 3087 | /* TODO: Close dependency between vect_model_*_cost and vectorizable_* |
fb85abff | 3088 | functions. Design better to avoid maintenance issues. */ |
fb85abff | 3089 | |
48e1416a | 3090 | /* Function vect_model_reduction_cost. |
3091 | ||
3092 | Models cost for a reduction operation, including the vector ops | |
fb85abff | 3093 | generated within the strip-mine loop, the initial definition before |
3094 | the loop, and the epilogue code that must be generated. */ | |
3095 | ||
48e1416a | 3096 | static bool |
fb85abff | 3097 | vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, |
3098 | int ncopies) | |
3099 | { | |
f97dec81 | 3100 | int prologue_cost = 0, epilogue_cost = 0; |
fb85abff | 3101 | enum tree_code code; |
3102 | optab optab; | |
3103 | tree vectype; | |
3104 | gimple stmt, orig_stmt; | |
3105 | tree reduction_op; | |
3754d046 | 3106 | machine_mode mode; |
fb85abff | 3107 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
3108 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
f97dec81 | 3109 | void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); |
fb85abff | 3110 | |
fb85abff | 3111 | /* Cost of reduction op inside loop. */ |
f97dec81 | 3112 | unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, |
3113 | stmt_info, 0, vect_body); | |
fb85abff | 3114 | stmt = STMT_VINFO_STMT (stmt_info); |
3115 | ||
3116 | switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) | |
3117 | { | |
3118 | case GIMPLE_SINGLE_RHS: | |
3119 | gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op); | |
3120 | reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2); | |
3121 | break; | |
3122 | case GIMPLE_UNARY_RHS: | |
3123 | reduction_op = gimple_assign_rhs1 (stmt); | |
3124 | break; | |
3125 | case GIMPLE_BINARY_RHS: | |
3126 | reduction_op = gimple_assign_rhs2 (stmt); | |
3127 | break; | |
c86930b0 | 3128 | case GIMPLE_TERNARY_RHS: |
3129 | reduction_op = gimple_assign_rhs3 (stmt); | |
3130 | break; | |
fb85abff | 3131 | default: |
3132 | gcc_unreachable (); | |
3133 | } | |
3134 | ||
3135 | vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); | |
3136 | if (!vectype) | |
3137 | { | |
6d8fb6cf | 3138 | if (dump_enabled_p ()) |
fb85abff | 3139 | { |
7bd765d4 | 3140 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3141 | "unsupported data-type "); | |
3142 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
3143 | TREE_TYPE (reduction_op)); | |
78bb46f5 | 3144 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
fb85abff | 3145 | } |
3146 | return false; | |
3147 | } | |
48e1416a | 3148 | |
fb85abff | 3149 | mode = TYPE_MODE (vectype); |
3150 | orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
3151 | ||
48e1416a | 3152 | if (!orig_stmt) |
fb85abff | 3153 | orig_stmt = STMT_VINFO_STMT (stmt_info); |
3154 | ||
3155 | code = gimple_assign_rhs_code (orig_stmt); | |
3156 | ||
3157 | /* Add in cost for initial definition. */ | |
f97dec81 | 3158 | prologue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec, |
3159 | stmt_info, 0, vect_prologue); | |
fb85abff | 3160 | |
3161 | /* Determine cost of epilogue code. | |
3162 | ||
3163 | We have a reduction operator that will reduce the vector in one statement. | |
3164 | Also requires scalar extract. */ | |
3165 | ||
3166 | if (!nested_in_vect_loop_p (loop, orig_stmt)) | |
3167 | { | |
8458f4ca | 3168 | if (reduc_code != ERROR_MARK) |
f97dec81 | 3169 | { |
3170 | epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt, | |
3171 | stmt_info, 0, vect_epilogue); | |
3172 | epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, | |
3173 | stmt_info, 0, vect_epilogue); | |
3174 | } | |
48e1416a | 3175 | else |
fb85abff | 3176 | { |
e913b5cd | 3177 | int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); |
fb85abff | 3178 | tree bitsize = |
3179 | TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt))); | |
e913b5cd | 3180 | int element_bitsize = tree_to_uhwi (bitsize); |
fb85abff | 3181 | int nelements = vec_size_in_bits / element_bitsize; |
3182 | ||
3183 | optab = optab_for_tree_code (code, vectype, optab_default); | |
3184 | ||
3185 | /* We have a whole vector shift available. */ | |
3186 | if (VECTOR_MODE_P (mode) | |
d6bf3b14 | 3187 | && optab_handler (optab, mode) != CODE_FOR_nothing |
3188 | && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) | |
f97dec81 | 3189 | { |
3190 | /* Final reduction via vector shifts and the reduction operator. | |
3191 | Also requires scalar extract. */ | |
3192 | epilogue_cost += add_stmt_cost (target_cost_data, | |
3193 | exact_log2 (nelements) * 2, | |
3194 | vector_stmt, stmt_info, 0, | |
3195 | vect_epilogue); | |
3196 | epilogue_cost += add_stmt_cost (target_cost_data, 1, | |
3197 | vec_to_scalar, stmt_info, 0, | |
3198 | vect_epilogue); | |
3199 | } | |
fb85abff | 3200 | else |
f97dec81 | 3201 | /* Use extracts and reduction op for final reduction. For N |
3202 | elements, we have N extracts and N-1 reduction ops. */ | |
3203 | epilogue_cost += add_stmt_cost (target_cost_data, | |
3204 | nelements + nelements - 1, | |
3205 | vector_stmt, stmt_info, 0, | |
3206 | vect_epilogue); | |
fb85abff | 3207 | } |
3208 | } | |
3209 | ||
6d8fb6cf | 3210 | if (dump_enabled_p ()) |
7bd765d4 | 3211 | dump_printf (MSG_NOTE, |
3212 | "vect_model_reduction_cost: inside_cost = %d, " | |
78bb46f5 | 3213 | "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost, |
7bd765d4 | 3214 | prologue_cost, epilogue_cost); |
fb85abff | 3215 | |
3216 | return true; | |
3217 | } | |
3218 | ||
3219 | ||
3220 | /* Function vect_model_induction_cost. | |
3221 | ||
3222 | Models cost for induction operations. */ | |
3223 | ||
3224 | static void | |
3225 | vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies) | |
3226 | { | |
4db2b577 | 3227 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
f97dec81 | 3228 | void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); |
3229 | unsigned inside_cost, prologue_cost; | |
4db2b577 | 3230 | |
fb85abff | 3231 | /* loop cost for vec_loop. */ |
f97dec81 | 3232 | inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, |
3233 | stmt_info, 0, vect_body); | |
4db2b577 | 3234 | |
fb85abff | 3235 | /* prologue cost for vec_init and vec_step. */ |
f97dec81 | 3236 | prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec, |
3237 | stmt_info, 0, vect_prologue); | |
48e1416a | 3238 | |
6d8fb6cf | 3239 | if (dump_enabled_p ()) |
7bd765d4 | 3240 | dump_printf_loc (MSG_NOTE, vect_location, |
3241 | "vect_model_induction_cost: inside_cost = %d, " | |
78bb46f5 | 3242 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
fb85abff | 3243 | } |
3244 | ||
3245 | ||
3246 | /* Function get_initial_def_for_induction | |
3247 | ||
3248 | Input: | |
3249 | STMT - a stmt that performs an induction operation in the loop. | |
3250 | IV_PHI - the initial value of the induction variable | |
3251 | ||
3252 | Output: | |
3253 | Return a vector variable, initialized with the first VF values of | |
282bf14c | 3254 | the induction variable. E.g., for an iv with IV_PHI='X' and |
48e1416a | 3255 | evolution S, for a vector of 4 units, we want to return: |
fb85abff | 3256 | [X, X + S, X + 2*S, X + 3*S]. */ |
3257 | ||
3258 | static tree | |
3259 | get_initial_def_for_induction (gimple iv_phi) | |
3260 | { | |
3261 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi); | |
3262 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); | |
3263 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
48e1416a | 3264 | tree vectype; |
fb85abff | 3265 | int nunits; |
3266 | edge pe = loop_preheader_edge (loop); | |
3267 | struct loop *iv_loop; | |
3268 | basic_block new_bb; | |
f1f41a6c | 3269 | tree new_vec, vec_init, vec_step, t; |
fb85abff | 3270 | tree new_var; |
3271 | tree new_name; | |
3272 | gimple init_stmt, induction_phi, new_stmt; | |
3273 | tree induc_def, vec_def, vec_dest; | |
3274 | tree init_expr, step_expr; | |
3275 | int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
3276 | int i; | |
fb85abff | 3277 | int ncopies; |
3278 | tree expr; | |
3279 | stmt_vec_info phi_info = vinfo_for_stmt (iv_phi); | |
3280 | bool nested_in_vect_loop = false; | |
3281 | gimple_seq stmts = NULL; | |
3282 | imm_use_iterator imm_iter; | |
3283 | use_operand_p use_p; | |
3284 | gimple exit_phi; | |
3285 | edge latch_e; | |
3286 | tree loop_arg; | |
3287 | gimple_stmt_iterator si; | |
3288 | basic_block bb = gimple_bb (iv_phi); | |
f1a47479 | 3289 | tree stepvectype; |
0185abae | 3290 | tree resvectype; |
fb85abff | 3291 | |
3292 | /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */ | |
3293 | if (nested_in_vect_loop_p (loop, iv_phi)) | |
3294 | { | |
3295 | nested_in_vect_loop = true; | |
3296 | iv_loop = loop->inner; | |
3297 | } | |
3298 | else | |
3299 | iv_loop = loop; | |
3300 | gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father); | |
3301 | ||
3302 | latch_e = loop_latch_edge (iv_loop); | |
3303 | loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e); | |
3304 | ||
e431757f | 3305 | step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info); |
3306 | gcc_assert (step_expr != NULL_TREE); | |
3307 | ||
fb85abff | 3308 | pe = loop_preheader_edge (iv_loop); |
e431757f | 3309 | init_expr = PHI_ARG_DEF_FROM_EDGE (iv_phi, |
3310 | loop_preheader_edge (iv_loop)); | |
fb85abff | 3311 | |
99f81ffb | 3312 | vectype = get_vectype_for_scalar_type (TREE_TYPE (init_expr)); |
0185abae | 3313 | resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi))); |
3314 | gcc_assert (vectype); | |
3315 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
3316 | ncopies = vf / nunits; | |
3317 | ||
3318 | gcc_assert (phi_info); | |
3319 | gcc_assert (ncopies >= 1); | |
3320 | ||
e431757f | 3321 | /* Convert the step to the desired type. */ |
3322 | step_expr = force_gimple_operand (fold_convert (TREE_TYPE (vectype), | |
3323 | step_expr), | |
3324 | &stmts, true, NULL_TREE); | |
3325 | if (stmts) | |
3326 | { | |
3327 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
3328 | gcc_assert (!new_bb); | |
3329 | } | |
3330 | ||
0185abae | 3331 | /* Find the first insertion point in the BB. */ |
3332 | si = gsi_after_labels (bb); | |
3333 | ||
fb85abff | 3334 | /* Create the vector that holds the initial_value of the induction. */ |
3335 | if (nested_in_vect_loop) | |
3336 | { | |
3337 | /* iv_loop is nested in the loop to be vectorized. init_expr had already | |
282bf14c | 3338 | been created during vectorization of previous stmts. We obtain it |
3339 | from the STMT_VINFO_VEC_STMT of the defining stmt. */ | |
e431757f | 3340 | vec_init = vect_get_vec_def_for_operand (init_expr, iv_phi, NULL); |
abad9af1 | 3341 | /* If the initial value is not of proper type, convert it. */ |
3342 | if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init))) | |
3343 | { | |
3344 | new_stmt = gimple_build_assign_with_ops | |
3345 | (VIEW_CONVERT_EXPR, | |
3346 | vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"), | |
3347 | build1 (VIEW_CONVERT_EXPR, vectype, vec_init), NULL_TREE); | |
3348 | vec_init = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt); | |
3349 | gimple_assign_set_lhs (new_stmt, vec_init); | |
3350 | new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop), | |
3351 | new_stmt); | |
3352 | gcc_assert (!new_bb); | |
3353 | set_vinfo_for_stmt (new_stmt, | |
3354 | new_stmt_vec_info (new_stmt, loop_vinfo, NULL)); | |
3355 | } | |
fb85abff | 3356 | } |
3357 | else | |
3358 | { | |
f1f41a6c | 3359 | vec<constructor_elt, va_gc> *v; |
3e299f5d | 3360 | |
fb85abff | 3361 | /* iv_loop is the loop to be vectorized. Create: |
3362 | vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */ | |
99f81ffb | 3363 | new_var = vect_get_new_vect_var (TREE_TYPE (vectype), |
3364 | vect_scalar_var, "var_"); | |
3365 | new_name = force_gimple_operand (fold_convert (TREE_TYPE (vectype), | |
3366 | init_expr), | |
3367 | &stmts, false, new_var); | |
fb85abff | 3368 | if (stmts) |
3369 | { | |
3370 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
3371 | gcc_assert (!new_bb); | |
3372 | } | |
3373 | ||
f1f41a6c | 3374 | vec_alloc (v, nunits); |
6c2c88c7 | 3375 | bool constant_p = is_gimple_min_invariant (new_name); |
3e299f5d | 3376 | CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name); |
fb85abff | 3377 | for (i = 1; i < nunits; i++) |
3378 | { | |
3379 | /* Create: new_name_i = new_name + step_expr */ | |
99f81ffb | 3380 | new_name = fold_build2 (PLUS_EXPR, TREE_TYPE (new_name), |
3381 | new_name, step_expr); | |
6c2c88c7 | 3382 | if (!is_gimple_min_invariant (new_name)) |
fb85abff | 3383 | { |
6c2c88c7 | 3384 | init_stmt = gimple_build_assign (new_var, new_name); |
3385 | new_name = make_ssa_name (new_var, init_stmt); | |
3386 | gimple_assign_set_lhs (init_stmt, new_name); | |
3387 | new_bb = gsi_insert_on_edge_immediate (pe, init_stmt); | |
3388 | gcc_assert (!new_bb); | |
3389 | if (dump_enabled_p ()) | |
3390 | { | |
3391 | dump_printf_loc (MSG_NOTE, vect_location, | |
3392 | "created new init_stmt: "); | |
3393 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, init_stmt, 0); | |
78bb46f5 | 3394 | dump_printf (MSG_NOTE, "\n"); |
6c2c88c7 | 3395 | } |
3396 | constant_p = false; | |
fb85abff | 3397 | } |
3e299f5d | 3398 | CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name); |
fb85abff | 3399 | } |
3400 | /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */ | |
6c2c88c7 | 3401 | if (constant_p) |
3402 | new_vec = build_vector_from_ctor (vectype, v); | |
3403 | else | |
3404 | new_vec = build_constructor (vectype, v); | |
f1f41a6c | 3405 | vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL); |
fb85abff | 3406 | } |
3407 | ||
3408 | ||
3409 | /* Create the vector that holds the step of the induction. */ | |
3410 | if (nested_in_vect_loop) | |
3411 | /* iv_loop is nested in the loop to be vectorized. Generate: | |
3412 | vec_step = [S, S, S, S] */ | |
3413 | new_name = step_expr; | |
3414 | else | |
3415 | { | |
3416 | /* iv_loop is the loop to be vectorized. Generate: | |
3417 | vec_step = [VF*S, VF*S, VF*S, VF*S] */ | |
1d62df1c | 3418 | if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) |
3419 | { | |
3420 | expr = build_int_cst (integer_type_node, vf); | |
3421 | expr = fold_convert (TREE_TYPE (step_expr), expr); | |
3422 | } | |
3423 | else | |
3424 | expr = build_int_cst (TREE_TYPE (step_expr), vf); | |
f1a47479 | 3425 | new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), |
3426 | expr, step_expr); | |
bb0d2509 | 3427 | if (TREE_CODE (step_expr) == SSA_NAME) |
3428 | new_name = vect_init_vector (iv_phi, new_name, | |
3429 | TREE_TYPE (step_expr), NULL); | |
fb85abff | 3430 | } |
3431 | ||
b797154e | 3432 | t = unshare_expr (new_name); |
bb0d2509 | 3433 | gcc_assert (CONSTANT_CLASS_P (new_name) |
3434 | || TREE_CODE (new_name) == SSA_NAME); | |
f1a47479 | 3435 | stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name)); |
3436 | gcc_assert (stepvectype); | |
f1f41a6c | 3437 | new_vec = build_vector_from_val (stepvectype, t); |
3438 | vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL); | |
fb85abff | 3439 | |
3440 | ||
3441 | /* Create the following def-use cycle: | |
3442 | loop prolog: | |
3443 | vec_init = ... | |
3444 | vec_step = ... | |
3445 | loop: | |
3446 | vec_iv = PHI <vec_init, vec_loop> | |
3447 | ... | |
3448 | STMT | |
3449 | ... | |
3450 | vec_loop = vec_iv + vec_step; */ | |
3451 | ||
3452 | /* Create the induction-phi that defines the induction-operand. */ | |
3453 | vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); | |
fb85abff | 3454 | induction_phi = create_phi_node (vec_dest, iv_loop->header); |
3455 | set_vinfo_for_stmt (induction_phi, | |
37545e54 | 3456 | new_stmt_vec_info (induction_phi, loop_vinfo, NULL)); |
fb85abff | 3457 | induc_def = PHI_RESULT (induction_phi); |
3458 | ||
3459 | /* Create the iv update inside the loop */ | |
3460 | new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest, | |
3461 | induc_def, vec_step); | |
3462 | vec_def = make_ssa_name (vec_dest, new_stmt); | |
3463 | gimple_assign_set_lhs (new_stmt, vec_def); | |
3464 | gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); | |
48e1416a | 3465 | set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo, |
37545e54 | 3466 | NULL)); |
fb85abff | 3467 | |
3468 | /* Set the arguments of the phi node: */ | |
60d535d2 | 3469 | add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION); |
48e1416a | 3470 | add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop), |
60d535d2 | 3471 | UNKNOWN_LOCATION); |
fb85abff | 3472 | |
3473 | ||
3474 | /* In case that vectorization factor (VF) is bigger than the number | |
3475 | of elements that we can fit in a vectype (nunits), we have to generate | |
3476 | more than one vector stmt - i.e - we need to "unroll" the | |
3477 | vector stmt by a factor VF/nunits. For more details see documentation | |
3478 | in vectorizable_operation. */ | |
48e1416a | 3479 | |
fb85abff | 3480 | if (ncopies > 1) |
3481 | { | |
3482 | stmt_vec_info prev_stmt_vinfo; | |
3483 | /* FORNOW. This restriction should be relaxed. */ | |
3484 | gcc_assert (!nested_in_vect_loop); | |
3485 | ||
3486 | /* Create the vector that holds the step of the induction. */ | |
1d62df1c | 3487 | if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) |
3488 | { | |
3489 | expr = build_int_cst (integer_type_node, nunits); | |
3490 | expr = fold_convert (TREE_TYPE (step_expr), expr); | |
3491 | } | |
3492 | else | |
3493 | expr = build_int_cst (TREE_TYPE (step_expr), nunits); | |
f1a47479 | 3494 | new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), |
3495 | expr, step_expr); | |
bb0d2509 | 3496 | if (TREE_CODE (step_expr) == SSA_NAME) |
3497 | new_name = vect_init_vector (iv_phi, new_name, | |
3498 | TREE_TYPE (step_expr), NULL); | |
b797154e | 3499 | t = unshare_expr (new_name); |
bb0d2509 | 3500 | gcc_assert (CONSTANT_CLASS_P (new_name) |
3501 | || TREE_CODE (new_name) == SSA_NAME); | |
f1f41a6c | 3502 | new_vec = build_vector_from_val (stepvectype, t); |
3503 | vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL); | |
fb85abff | 3504 | |
3505 | vec_def = induc_def; | |
3506 | prev_stmt_vinfo = vinfo_for_stmt (induction_phi); | |
3507 | for (i = 1; i < ncopies; i++) | |
3508 | { | |
3509 | /* vec_i = vec_prev + vec_step */ | |
3510 | new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest, | |
3511 | vec_def, vec_step); | |
3512 | vec_def = make_ssa_name (vec_dest, new_stmt); | |
3513 | gimple_assign_set_lhs (new_stmt, vec_def); | |
39a5d6b1 | 3514 | |
fb85abff | 3515 | gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); |
0185abae | 3516 | if (!useless_type_conversion_p (resvectype, vectype)) |
3517 | { | |
3518 | new_stmt = gimple_build_assign_with_ops | |
3519 | (VIEW_CONVERT_EXPR, | |
3520 | vect_get_new_vect_var (resvectype, vect_simple_var, | |
3521 | "vec_iv_"), | |
3522 | build1 (VIEW_CONVERT_EXPR, resvectype, | |
3523 | gimple_assign_lhs (new_stmt)), NULL_TREE); | |
3524 | gimple_assign_set_lhs (new_stmt, | |
3525 | make_ssa_name | |
3526 | (gimple_assign_lhs (new_stmt), new_stmt)); | |
3527 | gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); | |
3528 | } | |
fb85abff | 3529 | set_vinfo_for_stmt (new_stmt, |
37545e54 | 3530 | new_stmt_vec_info (new_stmt, loop_vinfo, NULL)); |
fb85abff | 3531 | STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt; |
48e1416a | 3532 | prev_stmt_vinfo = vinfo_for_stmt (new_stmt); |
fb85abff | 3533 | } |
3534 | } | |
3535 | ||
3536 | if (nested_in_vect_loop) | |
3537 | { | |
3538 | /* Find the loop-closed exit-phi of the induction, and record | |
3539 | the final vector of induction results: */ | |
3540 | exit_phi = NULL; | |
3541 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) | |
3542 | { | |
0b308eee | 3543 | gimple use_stmt = USE_STMT (use_p); |
3544 | if (is_gimple_debug (use_stmt)) | |
3545 | continue; | |
3546 | ||
3547 | if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt))) | |
fb85abff | 3548 | { |
0b308eee | 3549 | exit_phi = use_stmt; |
fb85abff | 3550 | break; |
3551 | } | |
3552 | } | |
48e1416a | 3553 | if (exit_phi) |
fb85abff | 3554 | { |
3555 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); | |
3556 | /* FORNOW. Currently not supporting the case that an inner-loop induction | |
3557 | is not used in the outer-loop (i.e. only outside the outer-loop). */ | |
3558 | gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) | |
3559 | && !STMT_VINFO_LIVE_P (stmt_vinfo)); | |
3560 | ||
3561 | STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt; | |
6d8fb6cf | 3562 | if (dump_enabled_p ()) |
fb85abff | 3563 | { |
7bd765d4 | 3564 | dump_printf_loc (MSG_NOTE, vect_location, |
3565 | "vector of inductions after inner-loop:"); | |
3566 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); | |
78bb46f5 | 3567 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 3568 | } |
3569 | } | |
3570 | } | |
3571 | ||
3572 | ||
6d8fb6cf | 3573 | if (dump_enabled_p ()) |
fb85abff | 3574 | { |
7bd765d4 | 3575 | dump_printf_loc (MSG_NOTE, vect_location, |
3576 | "transform induction: created def-use cycle: "); | |
3577 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0); | |
3578 | dump_printf (MSG_NOTE, "\n"); | |
3579 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, | |
3580 | SSA_NAME_DEF_STMT (vec_def), 0); | |
78bb46f5 | 3581 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 3582 | } |
3583 | ||
3584 | STMT_VINFO_VEC_STMT (phi_info) = induction_phi; | |
0185abae | 3585 | if (!useless_type_conversion_p (resvectype, vectype)) |
3586 | { | |
3587 | new_stmt = gimple_build_assign_with_ops | |
3588 | (VIEW_CONVERT_EXPR, | |
3589 | vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"), | |
3590 | build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE); | |
3591 | induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt); | |
3592 | gimple_assign_set_lhs (new_stmt, induc_def); | |
c3c33891 | 3593 | si = gsi_after_labels (bb); |
0185abae | 3594 | gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); |
ffb35eed | 3595 | set_vinfo_for_stmt (new_stmt, |
3596 | new_stmt_vec_info (new_stmt, loop_vinfo, NULL)); | |
3597 | STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt)) | |
3598 | = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi)); | |
0185abae | 3599 | } |
3600 | ||
fb85abff | 3601 | return induc_def; |
3602 | } | |
3603 | ||
3604 | ||
3605 | /* Function get_initial_def_for_reduction | |
3606 | ||
3607 | Input: | |
3608 | STMT - a stmt that performs a reduction operation in the loop. | |
3609 | INIT_VAL - the initial value of the reduction variable | |
3610 | ||
3611 | Output: | |
3612 | ADJUSTMENT_DEF - a tree that holds a value to be added to the final result | |
3613 | of the reduction (used for adjusting the epilog - see below). | |
3614 | Return a vector variable, initialized according to the operation that STMT | |
3615 | performs. This vector will be used as the initial value of the | |
3616 | vector of partial results. | |
3617 | ||
3618 | Option1 (adjust in epilog): Initialize the vector as follows: | |
0df23b96 | 3619 | add/bit or/xor: [0,0,...,0,0] |
3620 | mult/bit and: [1,1,...,1,1] | |
3621 | min/max/cond_expr: [init_val,init_val,..,init_val,init_val] | |
fb85abff | 3622 | and when necessary (e.g. add/mult case) let the caller know |
3623 | that it needs to adjust the result by init_val. | |
3624 | ||
3625 | Option2: Initialize the vector as follows: | |
0df23b96 | 3626 | add/bit or/xor: [init_val,0,0,...,0] |
3627 | mult/bit and: [init_val,1,1,...,1] | |
3628 | min/max/cond_expr: [init_val,init_val,...,init_val] | |
fb85abff | 3629 | and no adjustments are needed. |
3630 | ||
3631 | For example, for the following code: | |
3632 | ||
3633 | s = init_val; | |
3634 | for (i=0;i<n;i++) | |
3635 | s = s + a[i]; | |
3636 | ||
3637 | STMT is 's = s + a[i]', and the reduction variable is 's'. | |
3638 | For a vector of 4 units, we want to return either [0,0,0,init_val], | |
3639 | or [0,0,0,0] and let the caller know that it needs to adjust | |
3640 | the result at the end by 'init_val'. | |
3641 | ||
3642 | FORNOW, we are using the 'adjust in epilog' scheme, because this way the | |
7aa0d350 | 3643 | initialization vector is simpler (same element in all entries), if |
3644 | ADJUSTMENT_DEF is not NULL, and Option2 otherwise. | |
48e1416a | 3645 | |
fb85abff | 3646 | A cost model should help decide between these two schemes. */ |
3647 | ||
3648 | tree | |
48e1416a | 3649 | get_initial_def_for_reduction (gimple stmt, tree init_val, |
7aa0d350 | 3650 | tree *adjustment_def) |
fb85abff | 3651 | { |
3652 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); | |
3653 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); | |
3654 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
1efcacec | 3655 | tree scalar_type = TREE_TYPE (init_val); |
3656 | tree vectype = get_vectype_for_scalar_type (scalar_type); | |
3657 | int nunits; | |
fb85abff | 3658 | enum tree_code code = gimple_assign_rhs_code (stmt); |
fb85abff | 3659 | tree def_for_init; |
3660 | tree init_def; | |
fadf62f4 | 3661 | tree *elts; |
fb85abff | 3662 | int i; |
48e1416a | 3663 | bool nested_in_vect_loop = false; |
7aa0d350 | 3664 | tree init_value; |
3665 | REAL_VALUE_TYPE real_init_val = dconst0; | |
3666 | int int_init_val = 0; | |
c0a0357c | 3667 | gimple def_stmt = NULL; |
fb85abff | 3668 | |
1efcacec | 3669 | gcc_assert (vectype); |
3670 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
3671 | ||
3672 | gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type) | |
3673 | || SCALAR_FLOAT_TYPE_P (scalar_type)); | |
7aa0d350 | 3674 | |
fb85abff | 3675 | if (nested_in_vect_loop_p (loop, stmt)) |
3676 | nested_in_vect_loop = true; | |
3677 | else | |
3678 | gcc_assert (loop == (gimple_bb (stmt))->loop_father); | |
3679 | ||
7aa0d350 | 3680 | /* In case of double reduction we only create a vector variable to be put |
282bf14c | 3681 | in the reduction phi node. The actual statement creation is done in |
7aa0d350 | 3682 | vect_create_epilog_for_reduction. */ |
c0a0357c | 3683 | if (adjustment_def && nested_in_vect_loop |
3684 | && TREE_CODE (init_val) == SSA_NAME | |
3685 | && (def_stmt = SSA_NAME_DEF_STMT (init_val)) | |
3686 | && gimple_code (def_stmt) == GIMPLE_PHI | |
3687 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) | |
48e1416a | 3688 | && vinfo_for_stmt (def_stmt) |
3689 | && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) | |
7aa0d350 | 3690 | == vect_double_reduction_def) |
3691 | { | |
3692 | *adjustment_def = NULL; | |
3693 | return vect_create_destination_var (init_val, vectype); | |
3694 | } | |
fb85abff | 3695 | |
7aa0d350 | 3696 | if (TREE_CONSTANT (init_val)) |
3697 | { | |
3698 | if (SCALAR_FLOAT_TYPE_P (scalar_type)) | |
3699 | init_value = build_real (scalar_type, TREE_REAL_CST (init_val)); | |
3700 | else | |
f9ae6f95 | 3701 | init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val)); |
7aa0d350 | 3702 | } |
3703 | else | |
3704 | init_value = init_val; | |
fb85abff | 3705 | |
7aa0d350 | 3706 | switch (code) |
3707 | { | |
3708 | case WIDEN_SUM_EXPR: | |
3709 | case DOT_PROD_EXPR: | |
a2287001 | 3710 | case SAD_EXPR: |
7aa0d350 | 3711 | case PLUS_EXPR: |
3712 | case MINUS_EXPR: | |
3713 | case BIT_IOR_EXPR: | |
3714 | case BIT_XOR_EXPR: | |
3715 | case MULT_EXPR: | |
3716 | case BIT_AND_EXPR: | |
48e1416a | 3717 | /* ADJUSMENT_DEF is NULL when called from |
7aa0d350 | 3718 | vect_create_epilog_for_reduction to vectorize double reduction. */ |
3719 | if (adjustment_def) | |
3720 | { | |
3721 | if (nested_in_vect_loop) | |
48e1416a | 3722 | *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt, |
7aa0d350 | 3723 | NULL); |
3724 | else | |
3725 | *adjustment_def = init_val; | |
3726 | } | |
3727 | ||
b036fcd8 | 3728 | if (code == MULT_EXPR) |
7aa0d350 | 3729 | { |
3730 | real_init_val = dconst1; | |
3731 | int_init_val = 1; | |
3732 | } | |
3733 | ||
b036fcd8 | 3734 | if (code == BIT_AND_EXPR) |
3735 | int_init_val = -1; | |
3736 | ||
7aa0d350 | 3737 | if (SCALAR_FLOAT_TYPE_P (scalar_type)) |
3738 | def_for_init = build_real (scalar_type, real_init_val); | |
3739 | else | |
3740 | def_for_init = build_int_cst (scalar_type, int_init_val); | |
3741 | ||
48e1416a | 3742 | /* Create a vector of '0' or '1' except the first element. */ |
fadf62f4 | 3743 | elts = XALLOCAVEC (tree, nunits); |
7aa0d350 | 3744 | for (i = nunits - 2; i >= 0; --i) |
fadf62f4 | 3745 | elts[i + 1] = def_for_init; |
7aa0d350 | 3746 | |
3747 | /* Option1: the first element is '0' or '1' as well. */ | |
3748 | if (adjustment_def) | |
3749 | { | |
fadf62f4 | 3750 | elts[0] = def_for_init; |
3751 | init_def = build_vector (vectype, elts); | |
7aa0d350 | 3752 | break; |
3753 | } | |
3754 | ||
3755 | /* Option2: the first element is INIT_VAL. */ | |
fadf62f4 | 3756 | elts[0] = init_val; |
7aa0d350 | 3757 | if (TREE_CONSTANT (init_val)) |
fadf62f4 | 3758 | init_def = build_vector (vectype, elts); |
7aa0d350 | 3759 | else |
fadf62f4 | 3760 | { |
f1f41a6c | 3761 | vec<constructor_elt, va_gc> *v; |
3762 | vec_alloc (v, nunits); | |
fadf62f4 | 3763 | CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val); |
3764 | for (i = 1; i < nunits; ++i) | |
3765 | CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]); | |
3766 | init_def = build_constructor (vectype, v); | |
3767 | } | |
7aa0d350 | 3768 | |
3769 | break; | |
3770 | ||
3771 | case MIN_EXPR: | |
3772 | case MAX_EXPR: | |
0df23b96 | 3773 | case COND_EXPR: |
7aa0d350 | 3774 | if (adjustment_def) |
3775 | { | |
3776 | *adjustment_def = NULL_TREE; | |
3777 | init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL); | |
3778 | break; | |
3779 | } | |
3780 | ||
b797154e | 3781 | init_def = build_vector_from_val (vectype, init_value); |
7aa0d350 | 3782 | break; |
3783 | ||
3784 | default: | |
3785 | gcc_unreachable (); | |
3786 | } | |
fb85abff | 3787 | |
3788 | return init_def; | |
3789 | } | |
3790 | ||
3791 | ||
3792 | /* Function vect_create_epilog_for_reduction | |
48e1416a | 3793 | |
fb85abff | 3794 | Create code at the loop-epilog to finalize the result of a reduction |
eefa05c8 | 3795 | computation. |
3796 | ||
3797 | VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector | |
3798 | reduction statements. | |
3799 | STMT is the scalar reduction stmt that is being vectorized. | |
fb85abff | 3800 | NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the |
282bf14c | 3801 | number of elements that we can fit in a vectype (nunits). In this case |
fb85abff | 3802 | we have to generate more than one vector stmt - i.e - we need to "unroll" |
3803 | the vector stmt by a factor VF/nunits. For more details see documentation | |
3804 | in vectorizable_operation. | |
eefa05c8 | 3805 | REDUC_CODE is the tree-code for the epilog reduction. |
3806 | REDUCTION_PHIS is a list of the phi-nodes that carry the reduction | |
3807 | computation. | |
3808 | REDUC_INDEX is the index of the operand in the right hand side of the | |
ade2ac53 | 3809 | statement that is defined by REDUCTION_PHI. |
7aa0d350 | 3810 | DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled. |
eefa05c8 | 3811 | SLP_NODE is an SLP node containing a group of reduction statements. The |
3812 | first one in this group is STMT. | |
fb85abff | 3813 | |
3814 | This function: | |
eefa05c8 | 3815 | 1. Creates the reduction def-use cycles: sets the arguments for |
3816 | REDUCTION_PHIS: | |
fb85abff | 3817 | The loop-entry argument is the vectorized initial-value of the reduction. |
eefa05c8 | 3818 | The loop-latch argument is taken from VECT_DEFS - the vector of partial |
3819 | sums. | |
3820 | 2. "Reduces" each vector of partial results VECT_DEFS into a single result, | |
3821 | by applying the operation specified by REDUC_CODE if available, or by | |
fb85abff | 3822 | other means (whole-vector shifts or a scalar loop). |
48e1416a | 3823 | The function also creates a new phi node at the loop exit to preserve |
fb85abff | 3824 | loop-closed form, as illustrated below. |
48e1416a | 3825 | |
fb85abff | 3826 | The flow at the entry to this function: |
48e1416a | 3827 | |
fb85abff | 3828 | loop: |
3829 | vec_def = phi <null, null> # REDUCTION_PHI | |
3830 | VECT_DEF = vector_stmt # vectorized form of STMT | |
3831 | s_loop = scalar_stmt # (scalar) STMT | |
3832 | loop_exit: | |
3833 | s_out0 = phi <s_loop> # (scalar) EXIT_PHI | |
3834 | use <s_out0> | |
3835 | use <s_out0> | |
3836 | ||
3837 | The above is transformed by this function into: | |
3838 | ||
3839 | loop: | |
3840 | vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI | |
3841 | VECT_DEF = vector_stmt # vectorized form of STMT | |
48e1416a | 3842 | s_loop = scalar_stmt # (scalar) STMT |
fb85abff | 3843 | loop_exit: |
3844 | s_out0 = phi <s_loop> # (scalar) EXIT_PHI | |
3845 | v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI | |
3846 | v_out2 = reduce <v_out1> | |
3847 | s_out3 = extract_field <v_out2, 0> | |
3848 | s_out4 = adjust_result <s_out3> | |
3849 | use <s_out4> | |
3850 | use <s_out4> | |
3851 | */ | |
3852 | ||
3853 | static void | |
f1f41a6c | 3854 | vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt, |
eefa05c8 | 3855 | int ncopies, enum tree_code reduc_code, |
f1f41a6c | 3856 | vec<gimple> reduction_phis, |
eefa05c8 | 3857 | int reduc_index, bool double_reduc, |
3858 | slp_tree slp_node) | |
fb85abff | 3859 | { |
3860 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
3861 | stmt_vec_info prev_phi_info; | |
3862 | tree vectype; | |
3754d046 | 3863 | machine_mode mode; |
fb85abff | 3864 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
7aa0d350 | 3865 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL; |
fb85abff | 3866 | basic_block exit_bb; |
3867 | tree scalar_dest; | |
3868 | tree scalar_type; | |
3869 | gimple new_phi = NULL, phi; | |
3870 | gimple_stmt_iterator exit_gsi; | |
3871 | tree vec_dest; | |
eefa05c8 | 3872 | tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest; |
fb85abff | 3873 | gimple epilog_stmt = NULL; |
eefa05c8 | 3874 | enum tree_code code = gimple_assign_rhs_code (stmt); |
fb85abff | 3875 | gimple exit_phi; |
f018d957 | 3876 | tree bitsize, bitpos; |
eefa05c8 | 3877 | tree adjustment_def = NULL; |
3878 | tree vec_initial_def = NULL; | |
3879 | tree reduction_op, expr, def; | |
3880 | tree orig_name, scalar_result; | |
b219ece3 | 3881 | imm_use_iterator imm_iter, phi_imm_iter; |
3882 | use_operand_p use_p, phi_use_p; | |
fb85abff | 3883 | bool extract_scalar_result = false; |
eefa05c8 | 3884 | gimple use_stmt, orig_stmt, reduction_phi = NULL; |
fb85abff | 3885 | bool nested_in_vect_loop = false; |
c2078b80 | 3886 | auto_vec<gimple> new_phis; |
3887 | auto_vec<gimple> inner_phis; | |
fb85abff | 3888 | enum vect_def_type dt = vect_unknown_def_type; |
3889 | int j, i; | |
c2078b80 | 3890 | auto_vec<tree> scalar_results; |
47deb25f | 3891 | unsigned int group_size = 1, k, ratio; |
c2078b80 | 3892 | auto_vec<tree> vec_initial_defs; |
3893 | auto_vec<gimple> phis; | |
39a5d6b1 | 3894 | bool slp_reduc = false; |
3895 | tree new_phi_result; | |
58045f90 | 3896 | gimple inner_phi = NULL; |
eefa05c8 | 3897 | |
3898 | if (slp_node) | |
f1f41a6c | 3899 | group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); |
48e1416a | 3900 | |
fb85abff | 3901 | if (nested_in_vect_loop_p (loop, stmt)) |
3902 | { | |
7aa0d350 | 3903 | outer_loop = loop; |
fb85abff | 3904 | loop = loop->inner; |
3905 | nested_in_vect_loop = true; | |
eefa05c8 | 3906 | gcc_assert (!slp_node); |
fb85abff | 3907 | } |
48e1416a | 3908 | |
fb85abff | 3909 | switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) |
3910 | { | |
3911 | case GIMPLE_SINGLE_RHS: | |
48e1416a | 3912 | gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) |
c86930b0 | 3913 | == ternary_op); |
ade2ac53 | 3914 | reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index); |
fb85abff | 3915 | break; |
3916 | case GIMPLE_UNARY_RHS: | |
3917 | reduction_op = gimple_assign_rhs1 (stmt); | |
3918 | break; | |
3919 | case GIMPLE_BINARY_RHS: | |
48e1416a | 3920 | reduction_op = reduc_index ? |
ade2ac53 | 3921 | gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt); |
fb85abff | 3922 | break; |
c86930b0 | 3923 | case GIMPLE_TERNARY_RHS: |
3924 | reduction_op = gimple_op (stmt, reduc_index + 1); | |
3925 | break; | |
fb85abff | 3926 | default: |
3927 | gcc_unreachable (); | |
3928 | } | |
3929 | ||
3930 | vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); | |
3931 | gcc_assert (vectype); | |
3932 | mode = TYPE_MODE (vectype); | |
3933 | ||
eefa05c8 | 3934 | /* 1. Create the reduction def-use cycle: |
3935 | Set the arguments of REDUCTION_PHIS, i.e., transform | |
48e1416a | 3936 | |
eefa05c8 | 3937 | loop: |
3938 | vec_def = phi <null, null> # REDUCTION_PHI | |
3939 | VECT_DEF = vector_stmt # vectorized form of STMT | |
3940 | ... | |
fb85abff | 3941 | |
eefa05c8 | 3942 | into: |
3943 | ||
3944 | loop: | |
3945 | vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI | |
3946 | VECT_DEF = vector_stmt # vectorized form of STMT | |
3947 | ... | |
3948 | ||
3949 | (in case of SLP, do it for all the phis). */ | |
3950 | ||
3951 | /* Get the loop-entry arguments. */ | |
3952 | if (slp_node) | |
b0f64919 | 3953 | vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs, |
3954 | NULL, slp_node, reduc_index); | |
eefa05c8 | 3955 | else |
3956 | { | |
f1f41a6c | 3957 | vec_initial_defs.create (1); |
eefa05c8 | 3958 | /* For the case of reduction, vect_get_vec_def_for_operand returns |
3959 | the scalar def before the loop, that defines the initial value | |
3960 | of the reduction variable. */ | |
3961 | vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt, | |
3962 | &adjustment_def); | |
f1f41a6c | 3963 | vec_initial_defs.quick_push (vec_initial_def); |
eefa05c8 | 3964 | } |
3965 | ||
3966 | /* Set phi nodes arguments. */ | |
f1f41a6c | 3967 | FOR_EACH_VEC_ELT (reduction_phis, i, phi) |
fb85abff | 3968 | { |
aae8f543 | 3969 | tree vec_init_def, def; |
3970 | gimple_seq stmts; | |
3971 | vec_init_def = force_gimple_operand (vec_initial_defs[i], &stmts, | |
3972 | true, NULL_TREE); | |
3973 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
3974 | def = vect_defs[i]; | |
eefa05c8 | 3975 | for (j = 0; j < ncopies; j++) |
3976 | { | |
3977 | /* Set the loop-entry arg of the reduction-phi. */ | |
3978 | add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop), | |
60d535d2 | 3979 | UNKNOWN_LOCATION); |
fb85abff | 3980 | |
eefa05c8 | 3981 | /* Set the loop-latch arg for the reduction-phi. */ |
3982 | if (j > 0) | |
3983 | def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def); | |
fb85abff | 3984 | |
60d535d2 | 3985 | add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION); |
fb85abff | 3986 | |
6d8fb6cf | 3987 | if (dump_enabled_p ()) |
eefa05c8 | 3988 | { |
7bd765d4 | 3989 | dump_printf_loc (MSG_NOTE, vect_location, |
3990 | "transform reduction: created def-use cycle: "); | |
3991 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); | |
3992 | dump_printf (MSG_NOTE, "\n"); | |
3993 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0); | |
78bb46f5 | 3994 | dump_printf (MSG_NOTE, "\n"); |
eefa05c8 | 3995 | } |
3996 | ||
3997 | phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); | |
3998 | } | |
fb85abff | 3999 | } |
4000 | ||
eefa05c8 | 4001 | /* 2. Create epilog code. |
4002 | The reduction epilog code operates across the elements of the vector | |
4003 | of partial results computed by the vectorized loop. | |
4004 | The reduction epilog code consists of: | |
fb85abff | 4005 | |
eefa05c8 | 4006 | step 1: compute the scalar result in a vector (v_out2) |
4007 | step 2: extract the scalar result (s_out3) from the vector (v_out2) | |
4008 | step 3: adjust the scalar result (s_out3) if needed. | |
4009 | ||
4010 | Step 1 can be accomplished using one the following three schemes: | |
fb85abff | 4011 | (scheme 1) using reduc_code, if available. |
4012 | (scheme 2) using whole-vector shifts, if available. | |
48e1416a | 4013 | (scheme 3) using a scalar loop. In this case steps 1+2 above are |
fb85abff | 4014 | combined. |
48e1416a | 4015 | |
fb85abff | 4016 | The overall epilog code looks like this: |
4017 | ||
4018 | s_out0 = phi <s_loop> # original EXIT_PHI | |
4019 | v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI | |
4020 | v_out2 = reduce <v_out1> # step 1 | |
4021 | s_out3 = extract_field <v_out2, 0> # step 2 | |
4022 | s_out4 = adjust_result <s_out3> # step 3 | |
4023 | ||
4024 | (step 3 is optional, and steps 1 and 2 may be combined). | |
eefa05c8 | 4025 | Lastly, the uses of s_out0 are replaced by s_out4. */ |
fb85abff | 4026 | |
fb85abff | 4027 | |
eefa05c8 | 4028 | /* 2.1 Create new loop-exit-phis to preserve loop-closed form: |
4029 | v_out1 = phi <VECT_DEF> | |
4030 | Store them in NEW_PHIS. */ | |
fb85abff | 4031 | |
4032 | exit_bb = single_exit (loop)->dest; | |
fb85abff | 4033 | prev_phi_info = NULL; |
f1f41a6c | 4034 | new_phis.create (vect_defs.length ()); |
4035 | FOR_EACH_VEC_ELT (vect_defs, i, def) | |
fb85abff | 4036 | { |
eefa05c8 | 4037 | for (j = 0; j < ncopies; j++) |
4038 | { | |
874117c8 | 4039 | tree new_def = copy_ssa_name (def, NULL); |
4040 | phi = create_phi_node (new_def, exit_bb); | |
eefa05c8 | 4041 | set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL)); |
4042 | if (j == 0) | |
f1f41a6c | 4043 | new_phis.quick_push (phi); |
eefa05c8 | 4044 | else |
4045 | { | |
4046 | def = vect_get_vec_def_for_stmt_copy (dt, def); | |
4047 | STMT_VINFO_RELATED_STMT (prev_phi_info) = phi; | |
4048 | } | |
4049 | ||
4050 | SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def); | |
4051 | prev_phi_info = vinfo_for_stmt (phi); | |
4052 | } | |
fb85abff | 4053 | } |
ade2ac53 | 4054 | |
b219ece3 | 4055 | /* The epilogue is created for the outer-loop, i.e., for the loop being |
58045f90 | 4056 | vectorized. Create exit phis for the outer loop. */ |
b219ece3 | 4057 | if (double_reduc) |
4058 | { | |
4059 | loop = outer_loop; | |
4060 | exit_bb = single_exit (loop)->dest; | |
f1f41a6c | 4061 | inner_phis.create (vect_defs.length ()); |
4062 | FOR_EACH_VEC_ELT (new_phis, i, phi) | |
58045f90 | 4063 | { |
874117c8 | 4064 | tree new_result = copy_ssa_name (PHI_RESULT (phi), NULL); |
4065 | gimple outer_phi = create_phi_node (new_result, exit_bb); | |
58045f90 | 4066 | SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, |
4067 | PHI_RESULT (phi)); | |
4068 | set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, | |
4069 | loop_vinfo, NULL)); | |
f1f41a6c | 4070 | inner_phis.quick_push (phi); |
4071 | new_phis[i] = outer_phi; | |
58045f90 | 4072 | prev_phi_info = vinfo_for_stmt (outer_phi); |
4073 | while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi))) | |
4074 | { | |
4075 | phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); | |
874117c8 | 4076 | new_result = copy_ssa_name (PHI_RESULT (phi), NULL); |
4077 | outer_phi = create_phi_node (new_result, exit_bb); | |
58045f90 | 4078 | SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, |
4079 | PHI_RESULT (phi)); | |
4080 | set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, | |
4081 | loop_vinfo, NULL)); | |
4082 | STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi; | |
4083 | prev_phi_info = vinfo_for_stmt (outer_phi); | |
4084 | } | |
4085 | } | |
b219ece3 | 4086 | } |
4087 | ||
fb85abff | 4088 | exit_gsi = gsi_after_labels (exit_bb); |
4089 | ||
48e1416a | 4090 | /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3 |
fb85abff | 4091 | (i.e. when reduc_code is not available) and in the final adjustment |
4092 | code (if needed). Also get the original scalar reduction variable as | |
48e1416a | 4093 | defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it |
4094 | represents a reduction pattern), the tree-code and scalar-def are | |
4095 | taken from the original stmt that the pattern-stmt (STMT) replaces. | |
fb85abff | 4096 | Otherwise (it is a regular reduction) - the tree-code and scalar-def |
48e1416a | 4097 | are taken from STMT. */ |
fb85abff | 4098 | |
4099 | orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
4100 | if (!orig_stmt) | |
4101 | { | |
4102 | /* Regular reduction */ | |
4103 | orig_stmt = stmt; | |
4104 | } | |
4105 | else | |
4106 | { | |
4107 | /* Reduction pattern */ | |
4108 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt); | |
4109 | gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)); | |
4110 | gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt); | |
4111 | } | |
ade2ac53 | 4112 | |
fb85abff | 4113 | code = gimple_assign_rhs_code (orig_stmt); |
eefa05c8 | 4114 | /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore, |
4115 | partial results are added and not subtracted. */ | |
4116 | if (code == MINUS_EXPR) | |
4117 | code = PLUS_EXPR; | |
4118 | ||
fb85abff | 4119 | scalar_dest = gimple_assign_lhs (orig_stmt); |
4120 | scalar_type = TREE_TYPE (scalar_dest); | |
f1f41a6c | 4121 | scalar_results.create (group_size); |
fb85abff | 4122 | new_scalar_dest = vect_create_destination_var (scalar_dest, NULL); |
4123 | bitsize = TYPE_SIZE (scalar_type); | |
fb85abff | 4124 | |
fb85abff | 4125 | /* In case this is a reduction in an inner-loop while vectorizing an outer |
4126 | loop - we don't need to extract a single scalar result at the end of the | |
7aa0d350 | 4127 | inner-loop (unless it is double reduction, i.e., the use of reduction is |
282bf14c | 4128 | outside the outer-loop). The final vector of partial results will be used |
7aa0d350 | 4129 | in the vectorized outer-loop, or reduced to a scalar result at the end of |
4130 | the outer-loop. */ | |
4131 | if (nested_in_vect_loop && !double_reduc) | |
fb85abff | 4132 | goto vect_finalize_reduction; |
4133 | ||
39a5d6b1 | 4134 | /* SLP reduction without reduction chain, e.g., |
4135 | # a1 = phi <a2, a0> | |
4136 | # b1 = phi <b2, b0> | |
4137 | a2 = operation (a1) | |
4138 | b2 = operation (b1) */ | |
4139 | slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); | |
4140 | ||
4141 | /* In case of reduction chain, e.g., | |
4142 | # a1 = phi <a3, a0> | |
4143 | a2 = operation (a1) | |
4144 | a3 = operation (a2), | |
4145 | ||
4146 | we may end up with more than one vector result. Here we reduce them to | |
4147 | one vector. */ | |
4148 | if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) | |
4149 | { | |
f1f41a6c | 4150 | tree first_vect = PHI_RESULT (new_phis[0]); |
39a5d6b1 | 4151 | tree tmp; |
2f4ce795 | 4152 | gimple new_vec_stmt = NULL; |
39a5d6b1 | 4153 | |
4154 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
f1f41a6c | 4155 | for (k = 1; k < new_phis.length (); k++) |
39a5d6b1 | 4156 | { |
f1f41a6c | 4157 | gimple next_phi = new_phis[k]; |
39a5d6b1 | 4158 | tree second_vect = PHI_RESULT (next_phi); |
39a5d6b1 | 4159 | |
4160 | tmp = build2 (code, vectype, first_vect, second_vect); | |
4161 | new_vec_stmt = gimple_build_assign (vec_dest, tmp); | |
4162 | first_vect = make_ssa_name (vec_dest, new_vec_stmt); | |
4163 | gimple_assign_set_lhs (new_vec_stmt, first_vect); | |
4164 | gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT); | |
4165 | } | |
4166 | ||
4167 | new_phi_result = first_vect; | |
2f4ce795 | 4168 | if (new_vec_stmt) |
4169 | { | |
f1f41a6c | 4170 | new_phis.truncate (0); |
4171 | new_phis.safe_push (new_vec_stmt); | |
2f4ce795 | 4172 | } |
39a5d6b1 | 4173 | } |
4174 | else | |
f1f41a6c | 4175 | new_phi_result = PHI_RESULT (new_phis[0]); |
39a5d6b1 | 4176 | |
fb85abff | 4177 | /* 2.3 Create the reduction code, using one of the three schemes described |
eefa05c8 | 4178 | above. In SLP we simply need to extract all the elements from the |
4179 | vector (without reducing them), so we use scalar shifts. */ | |
39a5d6b1 | 4180 | if (reduc_code != ERROR_MARK && !slp_reduc) |
fb85abff | 4181 | { |
4182 | tree tmp; | |
7ba68b18 | 4183 | tree vec_elem_type; |
fb85abff | 4184 | |
4185 | /*** Case 1: Create: | |
eefa05c8 | 4186 | v_out2 = reduc_expr <v_out1> */ |
fb85abff | 4187 | |
6d8fb6cf | 4188 | if (dump_enabled_p ()) |
7bd765d4 | 4189 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 4190 | "Reduce using direct vector reduction.\n"); |
fb85abff | 4191 | |
7ba68b18 | 4192 | vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result)); |
4193 | if (!useless_type_conversion_p (scalar_type, vec_elem_type)) | |
4194 | { | |
4195 | tree tmp_dest = | |
4196 | vect_create_destination_var (scalar_dest, vec_elem_type); | |
4197 | tmp = build1 (reduc_code, vec_elem_type, new_phi_result); | |
4198 | epilog_stmt = gimple_build_assign (tmp_dest, tmp); | |
4199 | new_temp = make_ssa_name (tmp_dest, epilog_stmt); | |
4200 | gimple_assign_set_lhs (epilog_stmt, new_temp); | |
4201 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); | |
4202 | ||
4203 | tmp = build1 (NOP_EXPR, scalar_type, new_temp); | |
4204 | } | |
4205 | else | |
4206 | tmp = build1 (reduc_code, scalar_type, new_phi_result); | |
4207 | epilog_stmt = gimple_build_assign (new_scalar_dest, tmp); | |
4208 | new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); | |
fb85abff | 4209 | gimple_assign_set_lhs (epilog_stmt, new_temp); |
4210 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); | |
7ba68b18 | 4211 | scalar_results.safe_push (new_temp); |
fb85abff | 4212 | } |
4213 | else | |
4214 | { | |
bc620c5c | 4215 | enum tree_code shift_code = ERROR_MARK; |
fb85abff | 4216 | bool have_whole_vector_shift = true; |
4217 | int bit_offset; | |
e913b5cd | 4218 | int element_bitsize = tree_to_uhwi (bitsize); |
4219 | int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); | |
fb85abff | 4220 | tree vec_temp; |
4221 | ||
d6bf3b14 | 4222 | if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) |
eefa05c8 | 4223 | shift_code = VEC_RSHIFT_EXPR; |
fb85abff | 4224 | else |
eefa05c8 | 4225 | have_whole_vector_shift = false; |
fb85abff | 4226 | |
4227 | /* Regardless of whether we have a whole vector shift, if we're | |
eefa05c8 | 4228 | emulating the operation via tree-vect-generic, we don't want |
4229 | to use it. Only the first round of the reduction is likely | |
4230 | to still be profitable via emulation. */ | |
fb85abff | 4231 | /* ??? It might be better to emit a reduction tree code here, so that |
eefa05c8 | 4232 | tree-vect-generic can expand the first round via bit tricks. */ |
fb85abff | 4233 | if (!VECTOR_MODE_P (mode)) |
eefa05c8 | 4234 | have_whole_vector_shift = false; |
fb85abff | 4235 | else |
fb85abff | 4236 | { |
eefa05c8 | 4237 | optab optab = optab_for_tree_code (code, vectype, optab_default); |
d6bf3b14 | 4238 | if (optab_handler (optab, mode) == CODE_FOR_nothing) |
eefa05c8 | 4239 | have_whole_vector_shift = false; |
4240 | } | |
fb85abff | 4241 | |
39a5d6b1 | 4242 | if (have_whole_vector_shift && !slp_reduc) |
eefa05c8 | 4243 | { |
4244 | /*** Case 2: Create: | |
4245 | for (offset = VS/2; offset >= element_size; offset/=2) | |
4246 | { | |
4247 | Create: va' = vec_shift <va, offset> | |
4248 | Create: va = vop <va, va'> | |
4249 | } */ | |
fb85abff | 4250 | |
6d8fb6cf | 4251 | if (dump_enabled_p ()) |
7bd765d4 | 4252 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 4253 | "Reduce using vector shifts\n"); |
eefa05c8 | 4254 | |
4255 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
39a5d6b1 | 4256 | new_temp = new_phi_result; |
eefa05c8 | 4257 | for (bit_offset = vec_size_in_bits/2; |
4258 | bit_offset >= element_bitsize; | |
4259 | bit_offset /= 2) | |
4260 | { | |
4261 | tree bitpos = size_int (bit_offset); | |
4262 | ||
4263 | epilog_stmt = gimple_build_assign_with_ops (shift_code, | |
4264 | vec_dest, new_temp, bitpos); | |
4265 | new_name = make_ssa_name (vec_dest, epilog_stmt); | |
4266 | gimple_assign_set_lhs (epilog_stmt, new_name); | |
4267 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); | |
4268 | ||
4269 | epilog_stmt = gimple_build_assign_with_ops (code, vec_dest, | |
4270 | new_name, new_temp); | |
4271 | new_temp = make_ssa_name (vec_dest, epilog_stmt); | |
4272 | gimple_assign_set_lhs (epilog_stmt, new_temp); | |
4273 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); | |
4274 | } | |
fb85abff | 4275 | |
eefa05c8 | 4276 | extract_scalar_result = true; |
4277 | } | |
fb85abff | 4278 | else |
4279 | { | |
eefa05c8 | 4280 | tree rhs; |
4281 | ||
4282 | /*** Case 3: Create: | |
4283 | s = extract_field <v_out2, 0> | |
4284 | for (offset = element_size; | |
4285 | offset < vector_size; | |
4286 | offset += element_size;) | |
4287 | { | |
4288 | Create: s' = extract_field <v_out2, offset> | |
4289 | Create: s = op <s, s'> // For non SLP cases | |
4290 | } */ | |
fb85abff | 4291 | |
6d8fb6cf | 4292 | if (dump_enabled_p ()) |
7bd765d4 | 4293 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 4294 | "Reduce using scalar code.\n"); |
fb85abff | 4295 | |
e913b5cd | 4296 | vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); |
f1f41a6c | 4297 | FOR_EACH_VEC_ELT (new_phis, i, new_phi) |
eefa05c8 | 4298 | { |
2f4ce795 | 4299 | if (gimple_code (new_phi) == GIMPLE_PHI) |
4300 | vec_temp = PHI_RESULT (new_phi); | |
4301 | else | |
4302 | vec_temp = gimple_assign_lhs (new_phi); | |
eefa05c8 | 4303 | rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, |
4304 | bitsize_zero_node); | |
4305 | epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); | |
4306 | new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); | |
4307 | gimple_assign_set_lhs (epilog_stmt, new_temp); | |
4308 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); | |
4309 | ||
4310 | /* In SLP we don't need to apply reduction operation, so we just | |
4311 | collect s' values in SCALAR_RESULTS. */ | |
39a5d6b1 | 4312 | if (slp_reduc) |
f1f41a6c | 4313 | scalar_results.safe_push (new_temp); |
eefa05c8 | 4314 | |
4315 | for (bit_offset = element_bitsize; | |
4316 | bit_offset < vec_size_in_bits; | |
4317 | bit_offset += element_bitsize) | |
4318 | { | |
4319 | tree bitpos = bitsize_int (bit_offset); | |
4320 | tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, | |
4321 | bitsize, bitpos); | |
4322 | ||
4323 | epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); | |
4324 | new_name = make_ssa_name (new_scalar_dest, epilog_stmt); | |
4325 | gimple_assign_set_lhs (epilog_stmt, new_name); | |
4326 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); | |
4327 | ||
39a5d6b1 | 4328 | if (slp_reduc) |
eefa05c8 | 4329 | { |
4330 | /* In SLP we don't need to apply reduction operation, so | |
4331 | we just collect s' values in SCALAR_RESULTS. */ | |
4332 | new_temp = new_name; | |
f1f41a6c | 4333 | scalar_results.safe_push (new_name); |
eefa05c8 | 4334 | } |
4335 | else | |
4336 | { | |
4337 | epilog_stmt = gimple_build_assign_with_ops (code, | |
4338 | new_scalar_dest, new_name, new_temp); | |
4339 | new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); | |
4340 | gimple_assign_set_lhs (epilog_stmt, new_temp); | |
4341 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); | |
4342 | } | |
4343 | } | |
4344 | } | |
4345 | ||
4346 | /* The only case where we need to reduce scalar results in SLP, is | |
282bf14c | 4347 | unrolling. If the size of SCALAR_RESULTS is greater than |
eefa05c8 | 4348 | GROUP_SIZE, we reduce them combining elements modulo |
4349 | GROUP_SIZE. */ | |
39a5d6b1 | 4350 | if (slp_reduc) |
eefa05c8 | 4351 | { |
4352 | tree res, first_res, new_res; | |
4353 | gimple new_stmt; | |
4354 | ||
4355 | /* Reduce multiple scalar results in case of SLP unrolling. */ | |
f1f41a6c | 4356 | for (j = group_size; scalar_results.iterate (j, &res); |
eefa05c8 | 4357 | j++) |
4358 | { | |
f1f41a6c | 4359 | first_res = scalar_results[j % group_size]; |
eefa05c8 | 4360 | new_stmt = gimple_build_assign_with_ops (code, |
4361 | new_scalar_dest, first_res, res); | |
4362 | new_res = make_ssa_name (new_scalar_dest, new_stmt); | |
4363 | gimple_assign_set_lhs (new_stmt, new_res); | |
4364 | gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT); | |
f1f41a6c | 4365 | scalar_results[j % group_size] = new_res; |
eefa05c8 | 4366 | } |
4367 | } | |
4368 | else | |
4369 | /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */ | |
f1f41a6c | 4370 | scalar_results.safe_push (new_temp); |
eefa05c8 | 4371 | |
4372 | extract_scalar_result = false; | |
4373 | } | |
fb85abff | 4374 | } |
4375 | ||
4376 | /* 2.4 Extract the final scalar result. Create: | |
eefa05c8 | 4377 | s_out3 = extract_field <v_out2, bitpos> */ |
48e1416a | 4378 | |
fb85abff | 4379 | if (extract_scalar_result) |
4380 | { | |
4381 | tree rhs; | |
4382 | ||
6d8fb6cf | 4383 | if (dump_enabled_p ()) |
7bd765d4 | 4384 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 4385 | "extract scalar result\n"); |
fb85abff | 4386 | |
4387 | if (BYTES_BIG_ENDIAN) | |
eefa05c8 | 4388 | bitpos = size_binop (MULT_EXPR, |
4389 | bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1), | |
4390 | TYPE_SIZE (scalar_type)); | |
fb85abff | 4391 | else |
eefa05c8 | 4392 | bitpos = bitsize_zero_node; |
fb85abff | 4393 | |
4394 | rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos); | |
4395 | epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); | |
4396 | new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); | |
4397 | gimple_assign_set_lhs (epilog_stmt, new_temp); | |
4398 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); | |
f1f41a6c | 4399 | scalar_results.safe_push (new_temp); |
fb85abff | 4400 | } |
eefa05c8 | 4401 | |
fb85abff | 4402 | vect_finalize_reduction: |
4403 | ||
b219ece3 | 4404 | if (double_reduc) |
4405 | loop = loop->inner; | |
4406 | ||
fb85abff | 4407 | /* 2.5 Adjust the final result by the initial value of the reduction |
4408 | variable. (When such adjustment is not needed, then | |
4409 | 'adjustment_def' is zero). For example, if code is PLUS we create: | |
4410 | new_temp = loop_exit_def + adjustment_def */ | |
4411 | ||
4412 | if (adjustment_def) | |
4413 | { | |
39a5d6b1 | 4414 | gcc_assert (!slp_reduc); |
fb85abff | 4415 | if (nested_in_vect_loop) |
4416 | { | |
f1f41a6c | 4417 | new_phi = new_phis[0]; |
fb85abff | 4418 | gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE); |
4419 | expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def); | |
4420 | new_dest = vect_create_destination_var (scalar_dest, vectype); | |
4421 | } | |
4422 | else | |
4423 | { | |
f1f41a6c | 4424 | new_temp = scalar_results[0]; |
fb85abff | 4425 | gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE); |
4426 | expr = build2 (code, scalar_type, new_temp, adjustment_def); | |
4427 | new_dest = vect_create_destination_var (scalar_dest, scalar_type); | |
4428 | } | |
ade2ac53 | 4429 | |
fb85abff | 4430 | epilog_stmt = gimple_build_assign (new_dest, expr); |
4431 | new_temp = make_ssa_name (new_dest, epilog_stmt); | |
4432 | gimple_assign_set_lhs (epilog_stmt, new_temp); | |
fb85abff | 4433 | gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); |
eefa05c8 | 4434 | if (nested_in_vect_loop) |
4435 | { | |
4436 | set_vinfo_for_stmt (epilog_stmt, | |
4437 | new_stmt_vec_info (epilog_stmt, loop_vinfo, | |
4438 | NULL)); | |
4439 | STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) = | |
4440 | STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi)); | |
4441 | ||
4442 | if (!double_reduc) | |
f1f41a6c | 4443 | scalar_results.quick_push (new_temp); |
eefa05c8 | 4444 | else |
f1f41a6c | 4445 | scalar_results[0] = new_temp; |
eefa05c8 | 4446 | } |
4447 | else | |
f1f41a6c | 4448 | scalar_results[0] = new_temp; |
eefa05c8 | 4449 | |
f1f41a6c | 4450 | new_phis[0] = epilog_stmt; |
fb85abff | 4451 | } |
4452 | ||
282bf14c | 4453 | /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit |
eefa05c8 | 4454 | phis with new adjusted scalar results, i.e., replace use <s_out0> |
4455 | with use <s_out4>. | |
fb85abff | 4456 | |
eefa05c8 | 4457 | Transform: |
4458 | loop_exit: | |
4459 | s_out0 = phi <s_loop> # (scalar) EXIT_PHI | |
4460 | v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI | |
4461 | v_out2 = reduce <v_out1> | |
4462 | s_out3 = extract_field <v_out2, 0> | |
4463 | s_out4 = adjust_result <s_out3> | |
4464 | use <s_out0> | |
4465 | use <s_out0> | |
4466 | ||
4467 | into: | |
fb85abff | 4468 | |
eefa05c8 | 4469 | loop_exit: |
4470 | s_out0 = phi <s_loop> # (scalar) EXIT_PHI | |
4471 | v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI | |
4472 | v_out2 = reduce <v_out1> | |
4473 | s_out3 = extract_field <v_out2, 0> | |
4474 | s_out4 = adjust_result <s_out3> | |
47deb25f | 4475 | use <s_out4> |
4476 | use <s_out4> */ | |
eefa05c8 | 4477 | |
39a5d6b1 | 4478 | |
4479 | /* In SLP reduction chain we reduce vector results into one vector if | |
4480 | necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of | |
4481 | the last stmt in the reduction chain, since we are looking for the loop | |
4482 | exit phi node. */ | |
4483 | if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) | |
4484 | { | |
f1f41a6c | 4485 | scalar_dest = gimple_assign_lhs ( |
4486 | SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]); | |
39a5d6b1 | 4487 | group_size = 1; |
4488 | } | |
4489 | ||
eefa05c8 | 4490 | /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in |
282bf14c | 4491 | case that GROUP_SIZE is greater than vectorization factor). Therefore, we |
4492 | need to match SCALAR_RESULTS with corresponding statements. The first | |
eefa05c8 | 4493 | (GROUP_SIZE / number of new vector stmts) scalar results correspond to |
4494 | the first vector stmt, etc. | |
4495 | (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */ | |
f1f41a6c | 4496 | if (group_size > new_phis.length ()) |
47deb25f | 4497 | { |
f1f41a6c | 4498 | ratio = group_size / new_phis.length (); |
4499 | gcc_assert (!(group_size % new_phis.length ())); | |
47deb25f | 4500 | } |
4501 | else | |
4502 | ratio = 1; | |
eefa05c8 | 4503 | |
4504 | for (k = 0; k < group_size; k++) | |
fb85abff | 4505 | { |
eefa05c8 | 4506 | if (k % ratio == 0) |
4507 | { | |
f1f41a6c | 4508 | epilog_stmt = new_phis[k / ratio]; |
4509 | reduction_phi = reduction_phis[k / ratio]; | |
58045f90 | 4510 | if (double_reduc) |
f1f41a6c | 4511 | inner_phi = inner_phis[k / ratio]; |
eefa05c8 | 4512 | } |
7aa0d350 | 4513 | |
39a5d6b1 | 4514 | if (slp_reduc) |
eefa05c8 | 4515 | { |
f1f41a6c | 4516 | gimple current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k]; |
fb85abff | 4517 | |
eefa05c8 | 4518 | orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt)); |
4519 | /* SLP statements can't participate in patterns. */ | |
4520 | gcc_assert (!orig_stmt); | |
4521 | scalar_dest = gimple_assign_lhs (current_stmt); | |
4522 | } | |
4523 | ||
f1f41a6c | 4524 | phis.create (3); |
eefa05c8 | 4525 | /* Find the loop-closed-use at the loop exit of the original scalar |
282bf14c | 4526 | result. (The reduction result is expected to have two immediate uses - |
eefa05c8 | 4527 | one at the latch block, and one at the loop exit). */ |
4528 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) | |
f898e094 | 4529 | if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))) |
4530 | && !is_gimple_debug (USE_STMT (use_p))) | |
f1f41a6c | 4531 | phis.safe_push (USE_STMT (use_p)); |
eefa05c8 | 4532 | |
1d4bc0bb | 4533 | /* While we expect to have found an exit_phi because of loop-closed-ssa |
4534 | form we can end up without one if the scalar cycle is dead. */ | |
eefa05c8 | 4535 | |
f1f41a6c | 4536 | FOR_EACH_VEC_ELT (phis, i, exit_phi) |
eefa05c8 | 4537 | { |
4538 | if (outer_loop) | |
7aa0d350 | 4539 | { |
eefa05c8 | 4540 | stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); |
4541 | gimple vect_phi; | |
4542 | ||
4543 | /* FORNOW. Currently not supporting the case that an inner-loop | |
4544 | reduction is not used in the outer-loop (but only outside the | |
4545 | outer-loop), unless it is double reduction. */ | |
4546 | gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo) | |
4547 | && !STMT_VINFO_LIVE_P (exit_phi_vinfo)) | |
4548 | || double_reduc); | |
4549 | ||
4550 | STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt; | |
4551 | if (!double_reduc | |
4552 | || STMT_VINFO_DEF_TYPE (exit_phi_vinfo) | |
4553 | != vect_double_reduction_def) | |
7aa0d350 | 4554 | continue; |
4555 | ||
eefa05c8 | 4556 | /* Handle double reduction: |
7aa0d350 | 4557 | |
eefa05c8 | 4558 | stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop) |
4559 | stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop) | |
4560 | stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop) | |
4561 | stmt4: s2 = phi <s4> - double reduction stmt (outer loop) | |
7aa0d350 | 4562 | |
eefa05c8 | 4563 | At that point the regular reduction (stmt2 and stmt3) is |
4564 | already vectorized, as well as the exit phi node, stmt4. | |
4565 | Here we vectorize the phi node of double reduction, stmt1, and | |
4566 | update all relevant statements. */ | |
7aa0d350 | 4567 | |
eefa05c8 | 4568 | /* Go through all the uses of s2 to find double reduction phi |
4569 | node, i.e., stmt1 above. */ | |
4570 | orig_name = PHI_RESULT (exit_phi); | |
4571 | FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) | |
7aa0d350 | 4572 | { |
f83623cc | 4573 | stmt_vec_info use_stmt_vinfo; |
eefa05c8 | 4574 | stmt_vec_info new_phi_vinfo; |
4575 | tree vect_phi_init, preheader_arg, vect_phi_res, init_def; | |
4576 | basic_block bb = gimple_bb (use_stmt); | |
4577 | gimple use; | |
4578 | ||
4579 | /* Check that USE_STMT is really double reduction phi | |
4580 | node. */ | |
4581 | if (gimple_code (use_stmt) != GIMPLE_PHI | |
4582 | || gimple_phi_num_args (use_stmt) != 2 | |
eefa05c8 | 4583 | || bb->loop_father != outer_loop) |
4584 | continue; | |
f83623cc | 4585 | use_stmt_vinfo = vinfo_for_stmt (use_stmt); |
4586 | if (!use_stmt_vinfo | |
4587 | || STMT_VINFO_DEF_TYPE (use_stmt_vinfo) | |
4588 | != vect_double_reduction_def) | |
4589 | continue; | |
eefa05c8 | 4590 | |
4591 | /* Create vector phi node for double reduction: | |
4592 | vs1 = phi <vs0, vs2> | |
4593 | vs1 was created previously in this function by a call to | |
4594 | vect_get_vec_def_for_operand and is stored in | |
4595 | vec_initial_def; | |
58045f90 | 4596 | vs2 is defined by INNER_PHI, the vectorized EXIT_PHI; |
eefa05c8 | 4597 | vs0 is created here. */ |
4598 | ||
4599 | /* Create vector phi node. */ | |
4600 | vect_phi = create_phi_node (vec_initial_def, bb); | |
4601 | new_phi_vinfo = new_stmt_vec_info (vect_phi, | |
4602 | loop_vec_info_for_loop (outer_loop), NULL); | |
4603 | set_vinfo_for_stmt (vect_phi, new_phi_vinfo); | |
4604 | ||
4605 | /* Create vs0 - initial def of the double reduction phi. */ | |
4606 | preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt, | |
4607 | loop_preheader_edge (outer_loop)); | |
4608 | init_def = get_initial_def_for_reduction (stmt, | |
4609 | preheader_arg, NULL); | |
4610 | vect_phi_init = vect_init_vector (use_stmt, init_def, | |
4611 | vectype, NULL); | |
4612 | ||
4613 | /* Update phi node arguments with vs0 and vs2. */ | |
4614 | add_phi_arg (vect_phi, vect_phi_init, | |
4615 | loop_preheader_edge (outer_loop), | |
60d535d2 | 4616 | UNKNOWN_LOCATION); |
58045f90 | 4617 | add_phi_arg (vect_phi, PHI_RESULT (inner_phi), |
60d535d2 | 4618 | loop_latch_edge (outer_loop), UNKNOWN_LOCATION); |
6d8fb6cf | 4619 | if (dump_enabled_p ()) |
eefa05c8 | 4620 | { |
7bd765d4 | 4621 | dump_printf_loc (MSG_NOTE, vect_location, |
4622 | "created double reduction phi node: "); | |
4623 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0); | |
78bb46f5 | 4624 | dump_printf (MSG_NOTE, "\n"); |
eefa05c8 | 4625 | } |
4626 | ||
4627 | vect_phi_res = PHI_RESULT (vect_phi); | |
4628 | ||
4629 | /* Replace the use, i.e., set the correct vs1 in the regular | |
282bf14c | 4630 | reduction phi node. FORNOW, NCOPIES is always 1, so the |
eefa05c8 | 4631 | loop is redundant. */ |
4632 | use = reduction_phi; | |
4633 | for (j = 0; j < ncopies; j++) | |
4634 | { | |
4635 | edge pr_edge = loop_preheader_edge (loop); | |
4636 | SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res); | |
4637 | use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use)); | |
4638 | } | |
7aa0d350 | 4639 | } |
4640 | } | |
b219ece3 | 4641 | } |
4642 | ||
f1f41a6c | 4643 | phis.release (); |
b219ece3 | 4644 | if (nested_in_vect_loop) |
4645 | { | |
4646 | if (double_reduc) | |
4647 | loop = outer_loop; | |
4648 | else | |
4649 | continue; | |
4650 | } | |
4651 | ||
f1f41a6c | 4652 | phis.create (3); |
b219ece3 | 4653 | /* Find the loop-closed-use at the loop exit of the original scalar |
282bf14c | 4654 | result. (The reduction result is expected to have two immediate uses, |
4655 | one at the latch block, and one at the loop exit). For double | |
b219ece3 | 4656 | reductions we are looking for exit phis of the outer loop. */ |
4657 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) | |
4658 | { | |
4659 | if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))) | |
f898e094 | 4660 | { |
4661 | if (!is_gimple_debug (USE_STMT (use_p))) | |
4662 | phis.safe_push (USE_STMT (use_p)); | |
4663 | } | |
b219ece3 | 4664 | else |
4665 | { | |
4666 | if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI) | |
4667 | { | |
4668 | tree phi_res = PHI_RESULT (USE_STMT (use_p)); | |
4669 | ||
4670 | FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res) | |
4671 | { | |
4672 | if (!flow_bb_inside_loop_p (loop, | |
f898e094 | 4673 | gimple_bb (USE_STMT (phi_use_p))) |
4674 | && !is_gimple_debug (USE_STMT (phi_use_p))) | |
f1f41a6c | 4675 | phis.safe_push (USE_STMT (phi_use_p)); |
b219ece3 | 4676 | } |
4677 | } | |
4678 | } | |
4679 | } | |
fb85abff | 4680 | |
f1f41a6c | 4681 | FOR_EACH_VEC_ELT (phis, i, exit_phi) |
b219ece3 | 4682 | { |
eefa05c8 | 4683 | /* Replace the uses: */ |
4684 | orig_name = PHI_RESULT (exit_phi); | |
f1f41a6c | 4685 | scalar_result = scalar_results[k]; |
eefa05c8 | 4686 | FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) |
4687 | FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) | |
4688 | SET_USE (use_p, scalar_result); | |
4689 | } | |
4690 | ||
f1f41a6c | 4691 | phis.release (); |
fb85abff | 4692 | } |
6ae8a044 | 4693 | } |
fb85abff | 4694 | |
4695 | ||
4696 | /* Function vectorizable_reduction. | |
4697 | ||
4698 | Check if STMT performs a reduction operation that can be vectorized. | |
4699 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
ade2ac53 | 4700 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. |
fb85abff | 4701 | Return FALSE if not a vectorizable STMT, TRUE otherwise. |
4702 | ||
48e1416a | 4703 | This function also handles reduction idioms (patterns) that have been |
282bf14c | 4704 | recognized in advance during vect_pattern_recog. In this case, STMT may be |
fb85abff | 4705 | of this form: |
4706 | X = pattern_expr (arg0, arg1, ..., X) | |
4707 | and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original | |
4708 | sequence that had been detected and replaced by the pattern-stmt (STMT). | |
48e1416a | 4709 | |
fb85abff | 4710 | In some cases of reduction patterns, the type of the reduction variable X is |
4711 | different than the type of the other arguments of STMT. | |
4712 | In such cases, the vectype that is used when transforming STMT into a vector | |
4713 | stmt is different than the vectype that is used to determine the | |
48e1416a | 4714 | vectorization factor, because it consists of a different number of elements |
fb85abff | 4715 | than the actual number of elements that are being operated upon in parallel. |
4716 | ||
4717 | For example, consider an accumulation of shorts into an int accumulator. | |
4718 | On some targets it's possible to vectorize this pattern operating on 8 | |
4719 | shorts at a time (hence, the vectype for purposes of determining the | |
4720 | vectorization factor should be V8HI); on the other hand, the vectype that | |
4721 | is used to create the vector form is actually V4SI (the type of the result). | |
4722 | ||
4723 | Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that | |
4724 | indicates what is the actual level of parallelism (V8HI in the example), so | |
282bf14c | 4725 | that the right vectorization factor would be derived. This vectype |
fb85abff | 4726 | corresponds to the type of arguments to the reduction stmt, and should *NOT* |
282bf14c | 4727 | be used to create the vectorized stmt. The right vectype for the vectorized |
fb85abff | 4728 | stmt is obtained from the type of the result X: |
4729 | get_vectype_for_scalar_type (TREE_TYPE (X)) | |
4730 | ||
4731 | This means that, contrary to "regular" reductions (or "regular" stmts in | |
4732 | general), the following equation: | |
4733 | STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X)) | |
4734 | does *NOT* necessarily hold for reduction patterns. */ | |
4735 | ||
4736 | bool | |
4737 | vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi, | |
eefa05c8 | 4738 | gimple *vec_stmt, slp_tree slp_node) |
fb85abff | 4739 | { |
4740 | tree vec_dest; | |
4741 | tree scalar_dest; | |
4742 | tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE; | |
4743 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
b334cbba | 4744 | tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
4745 | tree vectype_in = NULL_TREE; | |
fb85abff | 4746 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
4747 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
8458f4ca | 4748 | enum tree_code code, orig_code, epilog_reduc_code; |
3754d046 | 4749 | machine_mode vec_mode; |
fb85abff | 4750 | int op_type; |
4751 | optab optab, reduc_optab; | |
4752 | tree new_temp = NULL_TREE; | |
4753 | tree def; | |
4754 | gimple def_stmt; | |
4755 | enum vect_def_type dt; | |
4756 | gimple new_phi = NULL; | |
4757 | tree scalar_type; | |
4758 | bool is_simple_use; | |
4759 | gimple orig_stmt; | |
4760 | stmt_vec_info orig_stmt_info; | |
4761 | tree expr = NULL_TREE; | |
4762 | int i; | |
b334cbba | 4763 | int ncopies; |
fb85abff | 4764 | int epilog_copies; |
4765 | stmt_vec_info prev_stmt_info, prev_phi_info; | |
fb85abff | 4766 | bool single_defuse_cycle = false; |
0df23b96 | 4767 | tree reduc_def = NULL_TREE; |
fb85abff | 4768 | gimple new_stmt = NULL; |
4769 | int j; | |
4770 | tree ops[3]; | |
ade2ac53 | 4771 | bool nested_cycle = false, found_nested_cycle_def = false; |
4772 | gimple reduc_def_stmt = NULL; | |
4773 | /* The default is that the reduction variable is the last in statement. */ | |
4774 | int reduc_index = 2; | |
7aa0d350 | 4775 | bool double_reduc = false, dummy; |
4776 | basic_block def_bb; | |
c0a0357c | 4777 | struct loop * def_stmt_loop, *outer_loop = NULL; |
7aa0d350 | 4778 | tree def_arg; |
c0a0357c | 4779 | gimple def_arg_stmt; |
c2078b80 | 4780 | auto_vec<tree> vec_oprnds0; |
4781 | auto_vec<tree> vec_oprnds1; | |
4782 | auto_vec<tree> vect_defs; | |
4783 | auto_vec<gimple> phis; | |
eefa05c8 | 4784 | int vec_num; |
d42d0fe0 | 4785 | tree def0, def1, tem, op0, op1 = NULL_TREE; |
fb85abff | 4786 | |
39a5d6b1 | 4787 | /* In case of reduction chain we switch to the first stmt in the chain, but |
4788 | we don't update STMT_INFO, since only the last stmt is marked as reduction | |
4789 | and has reduction properties. */ | |
4790 | if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) | |
4791 | stmt = GROUP_FIRST_ELEMENT (stmt_info); | |
4792 | ||
fb85abff | 4793 | if (nested_in_vect_loop_p (loop, stmt)) |
ade2ac53 | 4794 | { |
c0a0357c | 4795 | outer_loop = loop; |
ade2ac53 | 4796 | loop = loop->inner; |
4797 | nested_cycle = true; | |
4798 | } | |
fb85abff | 4799 | |
fb85abff | 4800 | /* 1. Is vectorizable reduction? */ |
39a5d6b1 | 4801 | /* Not supportable if the reduction variable is used in the loop, unless |
4802 | it's a reduction chain. */ | |
4803 | if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer | |
4804 | && !GROUP_FIRST_ELEMENT (stmt_info)) | |
fb85abff | 4805 | return false; |
4806 | ||
4807 | /* Reductions that are not used even in an enclosing outer-loop, | |
4808 | are expected to be "live" (used out of the loop). */ | |
f083cd24 | 4809 | if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope |
fb85abff | 4810 | && !STMT_VINFO_LIVE_P (stmt_info)) |
4811 | return false; | |
4812 | ||
4813 | /* Make sure it was already recognized as a reduction computation. */ | |
ade2ac53 | 4814 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def |
4815 | && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle) | |
fb85abff | 4816 | return false; |
4817 | ||
48e1416a | 4818 | /* 2. Has this been recognized as a reduction pattern? |
fb85abff | 4819 | |
4820 | Check if STMT represents a pattern that has been recognized | |
4821 | in earlier analysis stages. For stmts that represent a pattern, | |
4822 | the STMT_VINFO_RELATED_STMT field records the last stmt in | |
4823 | the original sequence that constitutes the pattern. */ | |
4824 | ||
4825 | orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
4826 | if (orig_stmt) | |
4827 | { | |
4828 | orig_stmt_info = vinfo_for_stmt (orig_stmt); | |
fb85abff | 4829 | gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info)); |
4830 | gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info)); | |
4831 | } | |
48e1416a | 4832 | |
282bf14c | 4833 | /* 3. Check the operands of the operation. The first operands are defined |
fb85abff | 4834 | inside the loop body. The last operand is the reduction variable, |
4835 | which is defined by the loop-header-phi. */ | |
4836 | ||
4837 | gcc_assert (is_gimple_assign (stmt)); | |
4838 | ||
09e31a48 | 4839 | /* Flatten RHS. */ |
fb85abff | 4840 | switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) |
4841 | { | |
4842 | case GIMPLE_SINGLE_RHS: | |
4843 | op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)); | |
4844 | if (op_type == ternary_op) | |
4845 | { | |
4846 | tree rhs = gimple_assign_rhs1 (stmt); | |
4847 | ops[0] = TREE_OPERAND (rhs, 0); | |
4848 | ops[1] = TREE_OPERAND (rhs, 1); | |
4849 | ops[2] = TREE_OPERAND (rhs, 2); | |
4850 | code = TREE_CODE (rhs); | |
4851 | } | |
4852 | else | |
4853 | return false; | |
4854 | break; | |
4855 | ||
4856 | case GIMPLE_BINARY_RHS: | |
4857 | code = gimple_assign_rhs_code (stmt); | |
4858 | op_type = TREE_CODE_LENGTH (code); | |
4859 | gcc_assert (op_type == binary_op); | |
4860 | ops[0] = gimple_assign_rhs1 (stmt); | |
4861 | ops[1] = gimple_assign_rhs2 (stmt); | |
4862 | break; | |
4863 | ||
c86930b0 | 4864 | case GIMPLE_TERNARY_RHS: |
4865 | code = gimple_assign_rhs_code (stmt); | |
4866 | op_type = TREE_CODE_LENGTH (code); | |
4867 | gcc_assert (op_type == ternary_op); | |
4868 | ops[0] = gimple_assign_rhs1 (stmt); | |
4869 | ops[1] = gimple_assign_rhs2 (stmt); | |
4870 | ops[2] = gimple_assign_rhs3 (stmt); | |
4871 | break; | |
4872 | ||
fb85abff | 4873 | case GIMPLE_UNARY_RHS: |
4874 | return false; | |
4875 | ||
4876 | default: | |
4877 | gcc_unreachable (); | |
4878 | } | |
4879 | ||
f2104a54 | 4880 | if (code == COND_EXPR && slp_node) |
4881 | return false; | |
4882 | ||
fb85abff | 4883 | scalar_dest = gimple_assign_lhs (stmt); |
4884 | scalar_type = TREE_TYPE (scalar_dest); | |
48e1416a | 4885 | if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type) |
fb85abff | 4886 | && !SCALAR_FLOAT_TYPE_P (scalar_type)) |
4887 | return false; | |
4888 | ||
6960a794 | 4889 | /* Do not try to vectorize bit-precision reductions. */ |
4890 | if ((TYPE_PRECISION (scalar_type) | |
4891 | != GET_MODE_PRECISION (TYPE_MODE (scalar_type)))) | |
4892 | return false; | |
4893 | ||
fb85abff | 4894 | /* All uses but the last are expected to be defined in the loop. |
282bf14c | 4895 | The last use is the reduction variable. In case of nested cycle this |
ade2ac53 | 4896 | assumption is not true: we use reduc_index to record the index of the |
4897 | reduction variable. */ | |
a82fc9c6 | 4898 | for (i = 0; i < op_type - 1; i++) |
fb85abff | 4899 | { |
0df23b96 | 4900 | /* The condition of COND_EXPR is checked in vectorizable_condition(). */ |
4901 | if (i == 0 && code == COND_EXPR) | |
4902 | continue; | |
4903 | ||
bed8b93b | 4904 | is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL, |
b334cbba | 4905 | &def_stmt, &def, &dt, &tem); |
4906 | if (!vectype_in) | |
4907 | vectype_in = tem; | |
fb85abff | 4908 | gcc_assert (is_simple_use); |
39a5d6b1 | 4909 | |
f083cd24 | 4910 | if (dt != vect_internal_def |
4911 | && dt != vect_external_def | |
fb85abff | 4912 | && dt != vect_constant_def |
ade2ac53 | 4913 | && dt != vect_induction_def |
0df23b96 | 4914 | && !(dt == vect_nested_cycle && nested_cycle)) |
fb85abff | 4915 | return false; |
ade2ac53 | 4916 | |
4917 | if (dt == vect_nested_cycle) | |
4918 | { | |
4919 | found_nested_cycle_def = true; | |
4920 | reduc_def_stmt = def_stmt; | |
4921 | reduc_index = i; | |
4922 | } | |
fb85abff | 4923 | } |
4924 | ||
bed8b93b | 4925 | is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL, |
4926 | &def_stmt, &def, &dt, &tem); | |
fae41702 | 4927 | if (!vectype_in) |
4928 | vectype_in = tem; | |
fb85abff | 4929 | gcc_assert (is_simple_use); |
a82fc9c6 | 4930 | if (!(dt == vect_reduction_def |
4931 | || dt == vect_nested_cycle | |
4932 | || ((dt == vect_internal_def || dt == vect_external_def | |
4933 | || dt == vect_constant_def || dt == vect_induction_def) | |
4934 | && nested_cycle && found_nested_cycle_def))) | |
4935 | { | |
4936 | /* For pattern recognized stmts, orig_stmt might be a reduction, | |
4937 | but some helper statements for the pattern might not, or | |
4938 | might be COND_EXPRs with reduction uses in the condition. */ | |
4939 | gcc_assert (orig_stmt); | |
4940 | return false; | |
4941 | } | |
ade2ac53 | 4942 | if (!found_nested_cycle_def) |
4943 | reduc_def_stmt = def_stmt; | |
4944 | ||
4945 | gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI); | |
48e1416a | 4946 | if (orig_stmt) |
4947 | gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo, | |
4948 | reduc_def_stmt, | |
4949 | !nested_cycle, | |
7aa0d350 | 4950 | &dummy)); |
fb85abff | 4951 | else |
39a5d6b1 | 4952 | { |
4953 | gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt, | |
4954 | !nested_cycle, &dummy); | |
4955 | /* We changed STMT to be the first stmt in reduction chain, hence we | |
4956 | check that in this case the first element in the chain is STMT. */ | |
4957 | gcc_assert (stmt == tmp | |
4958 | || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt); | |
4959 | } | |
48e1416a | 4960 | |
ade2ac53 | 4961 | if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt))) |
fb85abff | 4962 | return false; |
4963 | ||
bc937a44 | 4964 | if (slp_node || PURE_SLP_STMT (stmt_info)) |
eefa05c8 | 4965 | ncopies = 1; |
4966 | else | |
4967 | ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
4968 | / TYPE_VECTOR_SUBPARTS (vectype_in)); | |
b334cbba | 4969 | |
b334cbba | 4970 | gcc_assert (ncopies >= 1); |
4971 | ||
4972 | vec_mode = TYPE_MODE (vectype_in); | |
fb85abff | 4973 | |
0df23b96 | 4974 | if (code == COND_EXPR) |
fb85abff | 4975 | { |
f2104a54 | 4976 | if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL)) |
0df23b96 | 4977 | { |
6d8fb6cf | 4978 | if (dump_enabled_p ()) |
7bd765d4 | 4979 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 4980 | "unsupported condition in reduction\n"); |
0df23b96 | 4981 | |
4982 | return false; | |
4983 | } | |
fb85abff | 4984 | } |
0df23b96 | 4985 | else |
fb85abff | 4986 | { |
0df23b96 | 4987 | /* 4. Supportable by target? */ |
fb85abff | 4988 | |
2d788f29 | 4989 | if (code == LSHIFT_EXPR || code == RSHIFT_EXPR |
4990 | || code == LROTATE_EXPR || code == RROTATE_EXPR) | |
4991 | { | |
4992 | /* Shifts and rotates are only supported by vectorizable_shifts, | |
4993 | not vectorizable_reduction. */ | |
4994 | if (dump_enabled_p ()) | |
4995 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
78bb46f5 | 4996 | "unsupported shift or rotation.\n"); |
2d788f29 | 4997 | return false; |
4998 | } | |
4999 | ||
0df23b96 | 5000 | /* 4.1. check support for the operation in the loop */ |
b334cbba | 5001 | optab = optab_for_tree_code (code, vectype_in, optab_default); |
0df23b96 | 5002 | if (!optab) |
5003 | { | |
6d8fb6cf | 5004 | if (dump_enabled_p ()) |
7bd765d4 | 5005 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 5006 | "no optab.\n"); |
0df23b96 | 5007 | |
5008 | return false; | |
5009 | } | |
5010 | ||
d6bf3b14 | 5011 | if (optab_handler (optab, vec_mode) == CODE_FOR_nothing) |
0df23b96 | 5012 | { |
6d8fb6cf | 5013 | if (dump_enabled_p ()) |
78bb46f5 | 5014 | dump_printf (MSG_NOTE, "op not supported by target.\n"); |
0df23b96 | 5015 | |
5016 | if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD | |
5017 | || LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
5018 | < vect_min_worthwhile_factor (code)) | |
5019 | return false; | |
5020 | ||
6d8fb6cf | 5021 | if (dump_enabled_p ()) |
78bb46f5 | 5022 | dump_printf (MSG_NOTE, "proceeding using word mode.\n"); |
0df23b96 | 5023 | } |
5024 | ||
5025 | /* Worthwhile without SIMD support? */ | |
b334cbba | 5026 | if (!VECTOR_MODE_P (TYPE_MODE (vectype_in)) |
0df23b96 | 5027 | && LOOP_VINFO_VECT_FACTOR (loop_vinfo) |
5028 | < vect_min_worthwhile_factor (code)) | |
5029 | { | |
6d8fb6cf | 5030 | if (dump_enabled_p ()) |
7bd765d4 | 5031 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 5032 | "not worthwhile without SIMD support.\n"); |
0df23b96 | 5033 | |
5034 | return false; | |
5035 | } | |
fb85abff | 5036 | } |
5037 | ||
5038 | /* 4.2. Check support for the epilog operation. | |
5039 | ||
5040 | If STMT represents a reduction pattern, then the type of the | |
5041 | reduction variable may be different than the type of the rest | |
5042 | of the arguments. For example, consider the case of accumulation | |
5043 | of shorts into an int accumulator; The original code: | |
5044 | S1: int_a = (int) short_a; | |
5045 | orig_stmt-> S2: int_acc = plus <int_a ,int_acc>; | |
5046 | ||
5047 | was replaced with: | |
5048 | STMT: int_acc = widen_sum <short_a, int_acc> | |
5049 | ||
5050 | This means that: | |
48e1416a | 5051 | 1. The tree-code that is used to create the vector operation in the |
5052 | epilog code (that reduces the partial results) is not the | |
5053 | tree-code of STMT, but is rather the tree-code of the original | |
282bf14c | 5054 | stmt from the pattern that STMT is replacing. I.e, in the example |
48e1416a | 5055 | above we want to use 'widen_sum' in the loop, but 'plus' in the |
fb85abff | 5056 | epilog. |
5057 | 2. The type (mode) we use to check available target support | |
48e1416a | 5058 | for the vector operation to be created in the *epilog*, is |
5059 | determined by the type of the reduction variable (in the example | |
d6bf3b14 | 5060 | above we'd check this: optab_handler (plus_optab, vect_int_mode])). |
fb85abff | 5061 | However the type (mode) we use to check available target support |
5062 | for the vector operation to be created *inside the loop*, is | |
5063 | determined by the type of the other arguments to STMT (in the | |
d6bf3b14 | 5064 | example we'd check this: optab_handler (widen_sum_optab, |
5065 | vect_short_mode)). | |
48e1416a | 5066 | |
5067 | This is contrary to "regular" reductions, in which the types of all | |
5068 | the arguments are the same as the type of the reduction variable. | |
5069 | For "regular" reductions we can therefore use the same vector type | |
fb85abff | 5070 | (and also the same tree-code) when generating the epilog code and |
5071 | when generating the code inside the loop. */ | |
5072 | ||
5073 | if (orig_stmt) | |
5074 | { | |
5075 | /* This is a reduction pattern: get the vectype from the type of the | |
5076 | reduction variable, and get the tree-code from orig_stmt. */ | |
5077 | orig_code = gimple_assign_rhs_code (orig_stmt); | |
b334cbba | 5078 | gcc_assert (vectype_out); |
5079 | vec_mode = TYPE_MODE (vectype_out); | |
fb85abff | 5080 | } |
5081 | else | |
5082 | { | |
5083 | /* Regular reduction: use the same vectype and tree-code as used for | |
5084 | the vector code inside the loop can be used for the epilog code. */ | |
5085 | orig_code = code; | |
5086 | } | |
5087 | ||
c0a0357c | 5088 | if (nested_cycle) |
5089 | { | |
5090 | def_bb = gimple_bb (reduc_def_stmt); | |
5091 | def_stmt_loop = def_bb->loop_father; | |
5092 | def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt, | |
5093 | loop_preheader_edge (def_stmt_loop)); | |
5094 | if (TREE_CODE (def_arg) == SSA_NAME | |
5095 | && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg)) | |
5096 | && gimple_code (def_arg_stmt) == GIMPLE_PHI | |
5097 | && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt)) | |
5098 | && vinfo_for_stmt (def_arg_stmt) | |
5099 | && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt)) | |
5100 | == vect_double_reduction_def) | |
5101 | double_reduc = true; | |
5102 | } | |
7aa0d350 | 5103 | |
0df23b96 | 5104 | epilog_reduc_code = ERROR_MARK; |
5105 | if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code)) | |
5106 | { | |
b334cbba | 5107 | reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out, |
0df23b96 | 5108 | optab_default); |
5109 | if (!reduc_optab) | |
5110 | { | |
6d8fb6cf | 5111 | if (dump_enabled_p ()) |
7bd765d4 | 5112 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 5113 | "no optab for reduction.\n"); |
0df23b96 | 5114 | |
5115 | epilog_reduc_code = ERROR_MARK; | |
5116 | } | |
f3d76545 | 5117 | else if (optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing) |
0df23b96 | 5118 | { |
f3d76545 | 5119 | optab = scalar_reduc_to_vector (reduc_optab, vectype_out); |
5120 | if (optab_handler (optab, vec_mode) == CODE_FOR_nothing) | |
5121 | { | |
5122 | if (dump_enabled_p ()) | |
5123 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5124 | "reduc op not supported by target.\n"); | |
48e1416a | 5125 | |
f3d76545 | 5126 | epilog_reduc_code = ERROR_MARK; |
5127 | } | |
0df23b96 | 5128 | } |
5129 | } | |
5130 | else | |
5131 | { | |
5132 | if (!nested_cycle || double_reduc) | |
5133 | { | |
6d8fb6cf | 5134 | if (dump_enabled_p ()) |
7bd765d4 | 5135 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 5136 | "no reduc code for scalar code.\n"); |
0df23b96 | 5137 | |
5138 | return false; | |
5139 | } | |
5140 | } | |
5141 | ||
7aa0d350 | 5142 | if (double_reduc && ncopies > 1) |
5143 | { | |
6d8fb6cf | 5144 | if (dump_enabled_p ()) |
7bd765d4 | 5145 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 5146 | "multiple types in double reduction\n"); |
7aa0d350 | 5147 | |
5148 | return false; | |
5149 | } | |
48e1416a | 5150 | |
f0c50415 | 5151 | /* In case of widenning multiplication by a constant, we update the type |
5152 | of the constant to be the type of the other operand. We check that the | |
5153 | constant fits the type in the pattern recognition pass. */ | |
5154 | if (code == DOT_PROD_EXPR | |
5155 | && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1]))) | |
5156 | { | |
5157 | if (TREE_CODE (ops[0]) == INTEGER_CST) | |
5158 | ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]); | |
5159 | else if (TREE_CODE (ops[1]) == INTEGER_CST) | |
5160 | ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]); | |
5161 | else | |
5162 | { | |
6d8fb6cf | 5163 | if (dump_enabled_p ()) |
7bd765d4 | 5164 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 5165 | "invalid types in dot-prod\n"); |
f0c50415 | 5166 | |
5167 | return false; | |
5168 | } | |
5169 | } | |
5170 | ||
fb85abff | 5171 | if (!vec_stmt) /* transformation not required. */ |
5172 | { | |
fb85abff | 5173 | if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies)) |
5174 | return false; | |
2814125e | 5175 | STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; |
fb85abff | 5176 | return true; |
5177 | } | |
5178 | ||
5179 | /** Transform. **/ | |
5180 | ||
6d8fb6cf | 5181 | if (dump_enabled_p ()) |
78bb46f5 | 5182 | dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n"); |
fb85abff | 5183 | |
0df23b96 | 5184 | /* FORNOW: Multiple types are not supported for condition. */ |
5185 | if (code == COND_EXPR) | |
5186 | gcc_assert (ncopies == 1); | |
5187 | ||
fb85abff | 5188 | /* Create the destination vector */ |
b334cbba | 5189 | vec_dest = vect_create_destination_var (scalar_dest, vectype_out); |
fb85abff | 5190 | |
5191 | /* In case the vectorization factor (VF) is bigger than the number | |
5192 | of elements that we can fit in a vectype (nunits), we have to generate | |
5193 | more than one vector stmt - i.e - we need to "unroll" the | |
5194 | vector stmt by a factor VF/nunits. For more details see documentation | |
5195 | in vectorizable_operation. */ | |
5196 | ||
5197 | /* If the reduction is used in an outer loop we need to generate | |
5198 | VF intermediate results, like so (e.g. for ncopies=2): | |
5199 | r0 = phi (init, r0) | |
5200 | r1 = phi (init, r1) | |
5201 | r0 = x0 + r0; | |
5202 | r1 = x1 + r1; | |
5203 | (i.e. we generate VF results in 2 registers). | |
5204 | In this case we have a separate def-use cycle for each copy, and therefore | |
5205 | for each copy we get the vector def for the reduction variable from the | |
5206 | respective phi node created for this copy. | |
5207 | ||
5208 | Otherwise (the reduction is unused in the loop nest), we can combine | |
5209 | together intermediate results, like so (e.g. for ncopies=2): | |
5210 | r = phi (init, r) | |
5211 | r = x0 + r; | |
5212 | r = x1 + r; | |
5213 | (i.e. we generate VF/2 results in a single register). | |
5214 | In this case for each copy we get the vector def for the reduction variable | |
5215 | from the vectorized reduction operation generated in the previous iteration. | |
5216 | */ | |
5217 | ||
f083cd24 | 5218 | if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope) |
fb85abff | 5219 | { |
5220 | single_defuse_cycle = true; | |
5221 | epilog_copies = 1; | |
5222 | } | |
5223 | else | |
5224 | epilog_copies = ncopies; | |
5225 | ||
5226 | prev_stmt_info = NULL; | |
5227 | prev_phi_info = NULL; | |
eefa05c8 | 5228 | if (slp_node) |
5229 | { | |
5230 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
5231 | gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out) | |
5232 | == TYPE_VECTOR_SUBPARTS (vectype_in)); | |
5233 | } | |
5234 | else | |
5235 | { | |
5236 | vec_num = 1; | |
f1f41a6c | 5237 | vec_oprnds0.create (1); |
eefa05c8 | 5238 | if (op_type == ternary_op) |
f1f41a6c | 5239 | vec_oprnds1.create (1); |
eefa05c8 | 5240 | } |
5241 | ||
f1f41a6c | 5242 | phis.create (vec_num); |
5243 | vect_defs.create (vec_num); | |
eefa05c8 | 5244 | if (!slp_node) |
f1f41a6c | 5245 | vect_defs.quick_push (NULL_TREE); |
eefa05c8 | 5246 | |
fb85abff | 5247 | for (j = 0; j < ncopies; j++) |
5248 | { | |
5249 | if (j == 0 || !single_defuse_cycle) | |
5250 | { | |
eefa05c8 | 5251 | for (i = 0; i < vec_num; i++) |
5252 | { | |
5253 | /* Create the reduction-phi that defines the reduction | |
5254 | operand. */ | |
5255 | new_phi = create_phi_node (vec_dest, loop->header); | |
5256 | set_vinfo_for_stmt (new_phi, | |
5257 | new_stmt_vec_info (new_phi, loop_vinfo, | |
5258 | NULL)); | |
5259 | if (j == 0 || slp_node) | |
f1f41a6c | 5260 | phis.quick_push (new_phi); |
eefa05c8 | 5261 | } |
5262 | } | |
fb85abff | 5263 | |
0df23b96 | 5264 | if (code == COND_EXPR) |
5265 | { | |
eefa05c8 | 5266 | gcc_assert (!slp_node); |
5267 | vectorizable_condition (stmt, gsi, vec_stmt, | |
f1f41a6c | 5268 | PHI_RESULT (phis[0]), |
f2104a54 | 5269 | reduc_index, NULL); |
0df23b96 | 5270 | /* Multiple types are not supported for condition. */ |
5271 | break; | |
5272 | } | |
5273 | ||
fb85abff | 5274 | /* Handle uses. */ |
5275 | if (j == 0) | |
5276 | { | |
09e31a48 | 5277 | op0 = ops[!reduc_index]; |
5278 | if (op_type == ternary_op) | |
5279 | { | |
5280 | if (reduc_index == 0) | |
5281 | op1 = ops[2]; | |
5282 | else | |
5283 | op1 = ops[1]; | |
5284 | } | |
5285 | ||
eefa05c8 | 5286 | if (slp_node) |
b0f64919 | 5287 | vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, |
5288 | slp_node, -1); | |
eefa05c8 | 5289 | else |
fb85abff | 5290 | { |
eefa05c8 | 5291 | loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index], |
5292 | stmt, NULL); | |
f1f41a6c | 5293 | vec_oprnds0.quick_push (loop_vec_def0); |
eefa05c8 | 5294 | if (op_type == ternary_op) |
5295 | { | |
09e31a48 | 5296 | loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt, |
5297 | NULL); | |
f1f41a6c | 5298 | vec_oprnds1.quick_push (loop_vec_def1); |
eefa05c8 | 5299 | } |
fb85abff | 5300 | } |
fb85abff | 5301 | } |
5302 | else | |
5303 | { | |
eefa05c8 | 5304 | if (!slp_node) |
5305 | { | |
d42d0fe0 | 5306 | enum vect_def_type dt; |
5307 | gimple dummy_stmt; | |
5308 | tree dummy; | |
5309 | ||
bed8b93b | 5310 | vect_is_simple_use (ops[!reduc_index], stmt, loop_vinfo, NULL, |
d42d0fe0 | 5311 | &dummy_stmt, &dummy, &dt); |
5312 | loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, | |
5313 | loop_vec_def0); | |
f1f41a6c | 5314 | vec_oprnds0[0] = loop_vec_def0; |
eefa05c8 | 5315 | if (op_type == ternary_op) |
5316 | { | |
bed8b93b | 5317 | vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt, |
d42d0fe0 | 5318 | &dummy, &dt); |
eefa05c8 | 5319 | loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, |
5320 | loop_vec_def1); | |
f1f41a6c | 5321 | vec_oprnds1[0] = loop_vec_def1; |
eefa05c8 | 5322 | } |
5323 | } | |
fb85abff | 5324 | |
eefa05c8 | 5325 | if (single_defuse_cycle) |
5326 | reduc_def = gimple_assign_lhs (new_stmt); | |
fb85abff | 5327 | |
eefa05c8 | 5328 | STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi; |
fb85abff | 5329 | } |
5330 | ||
f1f41a6c | 5331 | FOR_EACH_VEC_ELT (vec_oprnds0, i, def0) |
ade2ac53 | 5332 | { |
eefa05c8 | 5333 | if (slp_node) |
f1f41a6c | 5334 | reduc_def = PHI_RESULT (phis[i]); |
ade2ac53 | 5335 | else |
eefa05c8 | 5336 | { |
5337 | if (!single_defuse_cycle || j == 0) | |
5338 | reduc_def = PHI_RESULT (new_phi); | |
5339 | } | |
5340 | ||
5341 | def1 = ((op_type == ternary_op) | |
f1f41a6c | 5342 | ? vec_oprnds1[i] : NULL); |
eefa05c8 | 5343 | if (op_type == binary_op) |
5344 | { | |
5345 | if (reduc_index == 0) | |
5346 | expr = build2 (code, vectype_out, reduc_def, def0); | |
5347 | else | |
5348 | expr = build2 (code, vectype_out, def0, reduc_def); | |
5349 | } | |
48e1416a | 5350 | else |
ade2ac53 | 5351 | { |
eefa05c8 | 5352 | if (reduc_index == 0) |
5353 | expr = build3 (code, vectype_out, reduc_def, def0, def1); | |
ade2ac53 | 5354 | else |
eefa05c8 | 5355 | { |
5356 | if (reduc_index == 1) | |
5357 | expr = build3 (code, vectype_out, def0, reduc_def, def1); | |
5358 | else | |
5359 | expr = build3 (code, vectype_out, def0, def1, reduc_def); | |
5360 | } | |
5361 | } | |
5362 | ||
5363 | new_stmt = gimple_build_assign (vec_dest, expr); | |
5364 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
5365 | gimple_assign_set_lhs (new_stmt, new_temp); | |
5366 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
39a5d6b1 | 5367 | |
eefa05c8 | 5368 | if (slp_node) |
5369 | { | |
f1f41a6c | 5370 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
5371 | vect_defs.quick_push (new_temp); | |
ade2ac53 | 5372 | } |
eefa05c8 | 5373 | else |
f1f41a6c | 5374 | vect_defs[0] = new_temp; |
ade2ac53 | 5375 | } |
5376 | ||
eefa05c8 | 5377 | if (slp_node) |
5378 | continue; | |
48e1416a | 5379 | |
fb85abff | 5380 | if (j == 0) |
5381 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
5382 | else | |
5383 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
0df23b96 | 5384 | |
fb85abff | 5385 | prev_stmt_info = vinfo_for_stmt (new_stmt); |
5386 | prev_phi_info = vinfo_for_stmt (new_phi); | |
5387 | } | |
5388 | ||
5389 | /* Finalize the reduction-phi (set its arguments) and create the | |
5390 | epilog reduction code. */ | |
eefa05c8 | 5391 | if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node) |
5392 | { | |
5393 | new_temp = gimple_assign_lhs (*vec_stmt); | |
f1f41a6c | 5394 | vect_defs[0] = new_temp; |
eefa05c8 | 5395 | } |
5396 | ||
5397 | vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies, | |
5398 | epilog_reduc_code, phis, reduc_index, | |
5399 | double_reduc, slp_node); | |
5400 | ||
fb85abff | 5401 | return true; |
5402 | } | |
5403 | ||
5404 | /* Function vect_min_worthwhile_factor. | |
5405 | ||
5406 | For a loop where we could vectorize the operation indicated by CODE, | |
5407 | return the minimum vectorization factor that makes it worthwhile | |
5408 | to use generic vectors. */ | |
5409 | int | |
5410 | vect_min_worthwhile_factor (enum tree_code code) | |
5411 | { | |
5412 | switch (code) | |
5413 | { | |
5414 | case PLUS_EXPR: | |
5415 | case MINUS_EXPR: | |
5416 | case NEGATE_EXPR: | |
5417 | return 4; | |
5418 | ||
5419 | case BIT_AND_EXPR: | |
5420 | case BIT_IOR_EXPR: | |
5421 | case BIT_XOR_EXPR: | |
5422 | case BIT_NOT_EXPR: | |
5423 | return 2; | |
5424 | ||
5425 | default: | |
5426 | return INT_MAX; | |
5427 | } | |
5428 | } | |
5429 | ||
5430 | ||
5431 | /* Function vectorizable_induction | |
5432 | ||
5433 | Check if PHI performs an induction computation that can be vectorized. | |
5434 | If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized | |
5435 | phi to replace it, put it in VEC_STMT, and add it to the same basic block. | |
5436 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
5437 | ||
5438 | bool | |
5439 | vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, | |
5440 | gimple *vec_stmt) | |
5441 | { | |
5442 | stmt_vec_info stmt_info = vinfo_for_stmt (phi); | |
5443 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
5444 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
5445 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
5446 | int nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
5447 | int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
5448 | tree vec_def; | |
5449 | ||
5450 | gcc_assert (ncopies >= 1); | |
02a2bdca | 5451 | /* FORNOW. These restrictions should be relaxed. */ |
5452 | if (nested_in_vect_loop_p (loop, phi)) | |
fb85abff | 5453 | { |
02a2bdca | 5454 | imm_use_iterator imm_iter; |
5455 | use_operand_p use_p; | |
5456 | gimple exit_phi; | |
5457 | edge latch_e; | |
5458 | tree loop_arg; | |
5459 | ||
5460 | if (ncopies > 1) | |
5461 | { | |
6d8fb6cf | 5462 | if (dump_enabled_p ()) |
7bd765d4 | 5463 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 5464 | "multiple types in nested loop.\n"); |
02a2bdca | 5465 | return false; |
5466 | } | |
5467 | ||
5468 | exit_phi = NULL; | |
5469 | latch_e = loop_latch_edge (loop->inner); | |
5470 | loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); | |
5471 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) | |
5472 | { | |
0b308eee | 5473 | gimple use_stmt = USE_STMT (use_p); |
5474 | if (is_gimple_debug (use_stmt)) | |
5475 | continue; | |
5476 | ||
5477 | if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt))) | |
02a2bdca | 5478 | { |
0b308eee | 5479 | exit_phi = use_stmt; |
02a2bdca | 5480 | break; |
5481 | } | |
5482 | } | |
5483 | if (exit_phi) | |
5484 | { | |
5485 | stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); | |
5486 | if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo) | |
5487 | && !STMT_VINFO_LIVE_P (exit_phi_vinfo))) | |
5488 | { | |
6d8fb6cf | 5489 | if (dump_enabled_p ()) |
78bb46f5 | 5490 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
7bd765d4 | 5491 | "inner-loop induction only used outside " |
78bb46f5 | 5492 | "of the outer vectorized loop.\n"); |
02a2bdca | 5493 | return false; |
5494 | } | |
5495 | } | |
fb85abff | 5496 | } |
5497 | ||
5498 | if (!STMT_VINFO_RELEVANT_P (stmt_info)) | |
5499 | return false; | |
5500 | ||
5501 | /* FORNOW: SLP not supported. */ | |
5502 | if (STMT_SLP_TYPE (stmt_info)) | |
5503 | return false; | |
5504 | ||
5505 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def); | |
5506 | ||
5507 | if (gimple_code (phi) != GIMPLE_PHI) | |
5508 | return false; | |
5509 | ||
5510 | if (!vec_stmt) /* transformation not required. */ | |
5511 | { | |
5512 | STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type; | |
6d8fb6cf | 5513 | if (dump_enabled_p ()) |
7bd765d4 | 5514 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 5515 | "=== vectorizable_induction ===\n"); |
fb85abff | 5516 | vect_model_induction_cost (stmt_info, ncopies); |
5517 | return true; | |
5518 | } | |
5519 | ||
5520 | /** Transform. **/ | |
5521 | ||
6d8fb6cf | 5522 | if (dump_enabled_p ()) |
78bb46f5 | 5523 | dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n"); |
fb85abff | 5524 | |
5525 | vec_def = get_initial_def_for_induction (phi); | |
5526 | *vec_stmt = SSA_NAME_DEF_STMT (vec_def); | |
5527 | return true; | |
5528 | } | |
5529 | ||
5530 | /* Function vectorizable_live_operation. | |
5531 | ||
282bf14c | 5532 | STMT computes a value that is used outside the loop. Check if |
fb85abff | 5533 | it can be supported. */ |
5534 | ||
5535 | bool | |
5536 | vectorizable_live_operation (gimple stmt, | |
5537 | gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, | |
3d483a94 | 5538 | gimple *vec_stmt) |
fb85abff | 5539 | { |
5540 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
5541 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
5542 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
5543 | int i; | |
5544 | int op_type; | |
5545 | tree op; | |
5546 | tree def; | |
5547 | gimple def_stmt; | |
48e1416a | 5548 | enum vect_def_type dt; |
fb85abff | 5549 | enum tree_code code; |
5550 | enum gimple_rhs_class rhs_class; | |
5551 | ||
5552 | gcc_assert (STMT_VINFO_LIVE_P (stmt_info)); | |
5553 | ||
5554 | if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) | |
5555 | return false; | |
5556 | ||
5557 | if (!is_gimple_assign (stmt)) | |
3d483a94 | 5558 | { |
5559 | if (gimple_call_internal_p (stmt) | |
5560 | && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE | |
5561 | && gimple_call_lhs (stmt) | |
5562 | && loop->simduid | |
5563 | && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME | |
5564 | && loop->simduid | |
5565 | == SSA_NAME_VAR (gimple_call_arg (stmt, 0))) | |
5566 | { | |
5567 | edge e = single_exit (loop); | |
5568 | basic_block merge_bb = e->dest; | |
5569 | imm_use_iterator imm_iter; | |
5570 | use_operand_p use_p; | |
5571 | tree lhs = gimple_call_lhs (stmt); | |
5572 | ||
5573 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) | |
5574 | { | |
5575 | gimple use_stmt = USE_STMT (use_p); | |
5576 | if (gimple_code (use_stmt) == GIMPLE_PHI | |
0b308eee | 5577 | && gimple_bb (use_stmt) == merge_bb) |
3d483a94 | 5578 | { |
5579 | if (vec_stmt) | |
5580 | { | |
5581 | tree vfm1 | |
5582 | = build_int_cst (unsigned_type_node, | |
5583 | loop_vinfo->vectorization_factor - 1); | |
5584 | SET_PHI_ARG_DEF (use_stmt, e->dest_idx, vfm1); | |
5585 | } | |
5586 | return true; | |
5587 | } | |
5588 | } | |
5589 | } | |
5590 | ||
5591 | return false; | |
5592 | } | |
fb85abff | 5593 | |
5594 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
5595 | return false; | |
5596 | ||
5597 | /* FORNOW. CHECKME. */ | |
5598 | if (nested_in_vect_loop_p (loop, stmt)) | |
5599 | return false; | |
5600 | ||
5601 | code = gimple_assign_rhs_code (stmt); | |
5602 | op_type = TREE_CODE_LENGTH (code); | |
5603 | rhs_class = get_gimple_rhs_class (code); | |
5604 | gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op); | |
5605 | gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op); | |
5606 | ||
282bf14c | 5607 | /* FORNOW: support only if all uses are invariant. This means |
fb85abff | 5608 | that the scalar operations can remain in place, unvectorized. |
5609 | The original last scalar value that they compute will be used. */ | |
5610 | ||
5611 | for (i = 0; i < op_type; i++) | |
5612 | { | |
5613 | if (rhs_class == GIMPLE_SINGLE_RHS) | |
5614 | op = TREE_OPERAND (gimple_op (stmt, 1), i); | |
5615 | else | |
5616 | op = gimple_op (stmt, i + 1); | |
37545e54 | 5617 | if (op |
bed8b93b | 5618 | && !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def, |
5619 | &dt)) | |
fb85abff | 5620 | { |
6d8fb6cf | 5621 | if (dump_enabled_p ()) |
7bd765d4 | 5622 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78bb46f5 | 5623 | "use not simple.\n"); |
fb85abff | 5624 | return false; |
5625 | } | |
5626 | ||
f083cd24 | 5627 | if (dt != vect_external_def && dt != vect_constant_def) |
fb85abff | 5628 | return false; |
5629 | } | |
5630 | ||
5631 | /* No transformation is required for the cases we currently support. */ | |
5632 | return true; | |
5633 | } | |
5634 | ||
4c48884e | 5635 | /* Kill any debug uses outside LOOP of SSA names defined in STMT. */ |
5636 | ||
5637 | static void | |
5638 | vect_loop_kill_debug_uses (struct loop *loop, gimple stmt) | |
5639 | { | |
5640 | ssa_op_iter op_iter; | |
5641 | imm_use_iterator imm_iter; | |
5642 | def_operand_p def_p; | |
5643 | gimple ustmt; | |
5644 | ||
5645 | FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF) | |
5646 | { | |
5647 | FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p)) | |
5648 | { | |
5649 | basic_block bb; | |
5650 | ||
5651 | if (!is_gimple_debug (ustmt)) | |
5652 | continue; | |
5653 | ||
5654 | bb = gimple_bb (ustmt); | |
5655 | ||
5656 | if (!flow_bb_inside_loop_p (loop, bb)) | |
5657 | { | |
5658 | if (gimple_debug_bind_p (ustmt)) | |
5659 | { | |
6d8fb6cf | 5660 | if (dump_enabled_p ()) |
7bd765d4 | 5661 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 5662 | "killing debug use\n"); |
4c48884e | 5663 | |
5664 | gimple_debug_bind_reset_value (ustmt); | |
5665 | update_stmt (ustmt); | |
5666 | } | |
5667 | else | |
5668 | gcc_unreachable (); | |
5669 | } | |
5670 | } | |
5671 | } | |
5672 | } | |
5673 | ||
782fd1d1 | 5674 | |
5675 | /* This function builds ni_name = number of iterations. Statements | |
313a5120 | 5676 | are emitted on the loop preheader edge. */ |
782fd1d1 | 5677 | |
5678 | static tree | |
313a5120 | 5679 | vect_build_loop_niters (loop_vec_info loop_vinfo) |
782fd1d1 | 5680 | { |
782fd1d1 | 5681 | tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo)); |
313a5120 | 5682 | if (TREE_CODE (ni) == INTEGER_CST) |
5683 | return ni; | |
5684 | else | |
5685 | { | |
5686 | tree ni_name, var; | |
5687 | gimple_seq stmts = NULL; | |
5688 | edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo)); | |
782fd1d1 | 5689 | |
313a5120 | 5690 | var = create_tmp_var (TREE_TYPE (ni), "niters"); |
5691 | ni_name = force_gimple_operand (ni, &stmts, false, var); | |
5692 | if (stmts) | |
5693 | gsi_insert_seq_on_edge_immediate (pe, stmts); | |
782fd1d1 | 5694 | |
313a5120 | 5695 | return ni_name; |
5696 | } | |
782fd1d1 | 5697 | } |
5698 | ||
5699 | ||
5700 | /* This function generates the following statements: | |
5701 | ||
313a5120 | 5702 | ni_name = number of iterations loop executes |
5703 | ratio = ni_name / vf | |
5704 | ratio_mult_vf_name = ratio * vf | |
782fd1d1 | 5705 | |
313a5120 | 5706 | and places them on the loop preheader edge. */ |
782fd1d1 | 5707 | |
5708 | static void | |
5709 | vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo, | |
5710 | tree ni_name, | |
5711 | tree *ratio_mult_vf_name_ptr, | |
313a5120 | 5712 | tree *ratio_name_ptr) |
782fd1d1 | 5713 | { |
782fd1d1 | 5714 | tree ni_minus_gap_name; |
5715 | tree var; | |
5716 | tree ratio_name; | |
5717 | tree ratio_mult_vf_name; | |
782fd1d1 | 5718 | int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
313a5120 | 5719 | edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo)); |
782fd1d1 | 5720 | tree log_vf; |
5721 | ||
796f6cba | 5722 | log_vf = build_int_cst (TREE_TYPE (ni_name), exact_log2 (vf)); |
782fd1d1 | 5723 | |
5724 | /* If epilogue loop is required because of data accesses with gaps, we | |
5725 | subtract one iteration from the total number of iterations here for | |
5726 | correct calculation of RATIO. */ | |
5727 | if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) | |
5728 | { | |
5729 | ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name), | |
5730 | ni_name, | |
5731 | build_one_cst (TREE_TYPE (ni_name))); | |
5732 | if (!is_gimple_val (ni_minus_gap_name)) | |
5733 | { | |
796f6cba | 5734 | var = create_tmp_var (TREE_TYPE (ni_name), "ni_gap"); |
313a5120 | 5735 | gimple stmts = NULL; |
782fd1d1 | 5736 | ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts, |
5737 | true, var); | |
313a5120 | 5738 | gsi_insert_seq_on_edge_immediate (pe, stmts); |
782fd1d1 | 5739 | } |
5740 | } | |
5741 | else | |
5742 | ni_minus_gap_name = ni_name; | |
5743 | ||
5744 | /* Create: ratio = ni >> log2(vf) */ | |
796f6cba | 5745 | /* ??? As we have ni == number of latch executions + 1, ni could |
5746 | have overflown to zero. So avoid computing ratio based on ni | |
5747 | but compute it using the fact that we know ratio will be at least | |
5748 | one, thus via (ni - vf) >> log2(vf) + 1. */ | |
5749 | ratio_name | |
5750 | = fold_build2 (PLUS_EXPR, TREE_TYPE (ni_name), | |
5751 | fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_name), | |
5752 | fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name), | |
5753 | ni_minus_gap_name, | |
5754 | build_int_cst | |
5755 | (TREE_TYPE (ni_name), vf)), | |
5756 | log_vf), | |
5757 | build_int_cst (TREE_TYPE (ni_name), 1)); | |
782fd1d1 | 5758 | if (!is_gimple_val (ratio_name)) |
5759 | { | |
796f6cba | 5760 | var = create_tmp_var (TREE_TYPE (ni_name), "bnd"); |
313a5120 | 5761 | gimple stmts = NULL; |
782fd1d1 | 5762 | ratio_name = force_gimple_operand (ratio_name, &stmts, true, var); |
313a5120 | 5763 | gsi_insert_seq_on_edge_immediate (pe, stmts); |
782fd1d1 | 5764 | } |
5765 | *ratio_name_ptr = ratio_name; | |
5766 | ||
5767 | /* Create: ratio_mult_vf = ratio << log2 (vf). */ | |
5768 | ||
5769 | if (ratio_mult_vf_name_ptr) | |
5770 | { | |
5771 | ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name), | |
5772 | ratio_name, log_vf); | |
5773 | if (!is_gimple_val (ratio_mult_vf_name)) | |
5774 | { | |
796f6cba | 5775 | var = create_tmp_var (TREE_TYPE (ni_name), "ratio_mult_vf"); |
313a5120 | 5776 | gimple stmts = NULL; |
782fd1d1 | 5777 | ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts, |
5778 | true, var); | |
313a5120 | 5779 | gsi_insert_seq_on_edge_immediate (pe, stmts); |
782fd1d1 | 5780 | } |
5781 | *ratio_mult_vf_name_ptr = ratio_mult_vf_name; | |
5782 | } | |
5783 | ||
5784 | return; | |
5785 | } | |
5786 | ||
5787 | ||
fb85abff | 5788 | /* Function vect_transform_loop. |
5789 | ||
5790 | The analysis phase has determined that the loop is vectorizable. | |
5791 | Vectorize the loop - created vectorized stmts to replace the scalar | |
5792 | stmts in the loop, and update the loop exit condition. */ | |
5793 | ||
5794 | void | |
5795 | vect_transform_loop (loop_vec_info loop_vinfo) | |
5796 | { | |
5797 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
5798 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); | |
5799 | int nbbs = loop->num_nodes; | |
5800 | gimple_stmt_iterator si; | |
5801 | int i; | |
5802 | tree ratio = NULL; | |
5803 | int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
ee612634 | 5804 | bool grouped_store; |
fb85abff | 5805 | bool slp_scheduled = false; |
18937389 | 5806 | gimple stmt, pattern_stmt; |
5807 | gimple_seq pattern_def_seq = NULL; | |
e3a19533 | 5808 | gimple_stmt_iterator pattern_def_si = gsi_none (); |
18937389 | 5809 | bool transform_pattern_stmt = false; |
13b31e0b | 5810 | bool check_profitability = false; |
e7430948 | 5811 | int th; |
d3f1934c | 5812 | /* Record number of iterations before we started tampering with the profile. */ |
5813 | gcov_type expected_iterations = expected_loop_iterations_unbounded (loop); | |
fb85abff | 5814 | |
6d8fb6cf | 5815 | if (dump_enabled_p ()) |
78bb46f5 | 5816 | dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n"); |
fb85abff | 5817 | |
d3f1934c | 5818 | /* If profile is inprecise, we have chance to fix it up. */ |
5819 | if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) | |
5820 | expected_iterations = LOOP_VINFO_INT_NITERS (loop_vinfo); | |
5821 | ||
e7430948 | 5822 | /* Use the more conservative vectorization threshold. If the number |
5823 | of iterations is constant assume the cost check has been performed | |
5824 | by our caller. If the threshold makes all loops profitable that | |
5825 | run at least the vectorization factor number of times checking | |
5826 | is pointless, too. */ | |
004a94a5 | 5827 | th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); |
e7430948 | 5828 | if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 |
5829 | && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) | |
5830 | { | |
6d8fb6cf | 5831 | if (dump_enabled_p ()) |
7bd765d4 | 5832 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 5833 | "Profitability threshold is %d loop iterations.\n", |
5834 | th); | |
e7430948 | 5835 | check_profitability = true; |
5836 | } | |
5837 | ||
2cd0995e | 5838 | /* Version the loop first, if required, so the profitability check |
5839 | comes first. */ | |
23a3430d | 5840 | |
2cd0995e | 5841 | if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo) |
5842 | || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) | |
e7430948 | 5843 | { |
2cd0995e | 5844 | vect_loop_versioning (loop_vinfo, th, check_profitability); |
e7430948 | 5845 | check_profitability = false; |
5846 | } | |
23a3430d | 5847 | |
313a5120 | 5848 | tree ni_name = vect_build_loop_niters (loop_vinfo); |
5849 | LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = ni_name; | |
5850 | ||
2cd0995e | 5851 | /* Peel the loop if there are data refs with unknown alignment. |
313a5120 | 5852 | Only one data ref with unknown store is allowed. */ |
2cd0995e | 5853 | |
313a5120 | 5854 | if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)) |
e7430948 | 5855 | { |
782fd1d1 | 5856 | vect_do_peeling_for_alignment (loop_vinfo, ni_name, |
5857 | th, check_profitability); | |
e7430948 | 5858 | check_profitability = false; |
313a5120 | 5859 | /* The above adjusts LOOP_VINFO_NITERS, so cause ni_name to |
5860 | be re-computed. */ | |
5861 | ni_name = NULL_TREE; | |
e7430948 | 5862 | } |
fb85abff | 5863 | |
fb85abff | 5864 | /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a |
5865 | compile time constant), or it is a constant that doesn't divide by the | |
5866 | vectorization factor, then an epilog loop needs to be created. | |
5867 | We therefore duplicate the loop: the original loop will be vectorized, | |
282bf14c | 5868 | and will compute the first (n/VF) iterations. The second copy of the loop |
fb85abff | 5869 | will remain scalar and will compute the remaining (n%VF) iterations. |
5870 | (VF is the vectorization factor). */ | |
5871 | ||
313a5120 | 5872 | if (LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) |
c8a2b4ff | 5873 | || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) |
782fd1d1 | 5874 | { |
313a5120 | 5875 | tree ratio_mult_vf; |
5876 | if (!ni_name) | |
5877 | ni_name = vect_build_loop_niters (loop_vinfo); | |
782fd1d1 | 5878 | vect_generate_tmps_on_preheader (loop_vinfo, ni_name, &ratio_mult_vf, |
313a5120 | 5879 | &ratio); |
782fd1d1 | 5880 | vect_do_peeling_for_loop_bound (loop_vinfo, ni_name, ratio_mult_vf, |
5881 | th, check_profitability); | |
5882 | } | |
c8a2b4ff | 5883 | else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) |
fb85abff | 5884 | ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)), |
5885 | LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor); | |
c8a2b4ff | 5886 | else |
5887 | { | |
313a5120 | 5888 | if (!ni_name) |
5889 | ni_name = vect_build_loop_niters (loop_vinfo); | |
5890 | vect_generate_tmps_on_preheader (loop_vinfo, ni_name, NULL, &ratio); | |
c8a2b4ff | 5891 | } |
fb85abff | 5892 | |
5893 | /* 1) Make sure the loop header has exactly two entries | |
5894 | 2) Make sure we have a preheader basic block. */ | |
5895 | ||
5896 | gcc_assert (EDGE_COUNT (loop->header->preds) == 2); | |
5897 | ||
5898 | split_edge (loop_preheader_edge (loop)); | |
5899 | ||
5900 | /* FORNOW: the vectorizer supports only loops which body consist | |
48e1416a | 5901 | of one basic block (header + empty latch). When the vectorizer will |
5902 | support more involved loop forms, the order by which the BBs are | |
fb85abff | 5903 | traversed need to be reconsidered. */ |
5904 | ||
5905 | for (i = 0; i < nbbs; i++) | |
5906 | { | |
5907 | basic_block bb = bbs[i]; | |
5908 | stmt_vec_info stmt_info; | |
5909 | gimple phi; | |
5910 | ||
5911 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
5912 | { | |
5913 | phi = gsi_stmt (si); | |
6d8fb6cf | 5914 | if (dump_enabled_p ()) |
fb85abff | 5915 | { |
7bd765d4 | 5916 | dump_printf_loc (MSG_NOTE, vect_location, |
5917 | "------>vectorizing phi: "); | |
5918 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); | |
78bb46f5 | 5919 | dump_printf (MSG_NOTE, "\n"); |
fb85abff | 5920 | } |
5921 | stmt_info = vinfo_for_stmt (phi); | |
5922 | if (!stmt_info) | |
5923 | continue; | |
5924 | ||
12e7ff4f | 5925 | if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) |
5926 | vect_loop_kill_debug_uses (loop, phi); | |
5927 | ||
fb85abff | 5928 | if (!STMT_VINFO_RELEVANT_P (stmt_info) |
5929 | && !STMT_VINFO_LIVE_P (stmt_info)) | |
12e7ff4f | 5930 | continue; |
fb85abff | 5931 | |
bb4b5e0f | 5932 | if (STMT_VINFO_VECTYPE (stmt_info) |
5933 | && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)) | |
5934 | != (unsigned HOST_WIDE_INT) vectorization_factor) | |
6d8fb6cf | 5935 | && dump_enabled_p ()) |
78bb46f5 | 5936 | dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); |
fb85abff | 5937 | |
5938 | if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) | |
5939 | { | |
6d8fb6cf | 5940 | if (dump_enabled_p ()) |
78bb46f5 | 5941 | dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n"); |
fb85abff | 5942 | vect_transform_stmt (phi, NULL, NULL, NULL, NULL); |
5943 | } | |
5944 | } | |
5945 | ||
8bf58742 | 5946 | pattern_stmt = NULL; |
5947 | for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;) | |
fb85abff | 5948 | { |
fb85abff | 5949 | bool is_store; |
5950 | ||
8bf58742 | 5951 | if (transform_pattern_stmt) |
18937389 | 5952 | stmt = pattern_stmt; |
8bf58742 | 5953 | else |
8911f4de | 5954 | { |
5955 | stmt = gsi_stmt (si); | |
5956 | /* During vectorization remove existing clobber stmts. */ | |
5957 | if (gimple_clobber_p (stmt)) | |
5958 | { | |
5959 | unlink_stmt_vdef (stmt); | |
5960 | gsi_remove (&si, true); | |
5961 | release_defs (stmt); | |
5962 | continue; | |
5963 | } | |
5964 | } | |
8bf58742 | 5965 | |
6d8fb6cf | 5966 | if (dump_enabled_p ()) |
fb85abff | 5967 | { |
7bd765d4 | 5968 | dump_printf_loc (MSG_NOTE, vect_location, |
5969 | "------>vectorizing statement: "); | |
5970 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
78bb46f5 | 5971 | dump_printf (MSG_NOTE, "\n"); |
48e1416a | 5972 | } |
fb85abff | 5973 | |
5974 | stmt_info = vinfo_for_stmt (stmt); | |
5975 | ||
5976 | /* vector stmts created in the outer-loop during vectorization of | |
5977 | stmts in an inner-loop may not have a stmt_info, and do not | |
5978 | need to be vectorized. */ | |
5979 | if (!stmt_info) | |
5980 | { | |
5981 | gsi_next (&si); | |
5982 | continue; | |
5983 | } | |
5984 | ||
12e7ff4f | 5985 | if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) |
5986 | vect_loop_kill_debug_uses (loop, stmt); | |
5987 | ||
fb85abff | 5988 | if (!STMT_VINFO_RELEVANT_P (stmt_info) |
5989 | && !STMT_VINFO_LIVE_P (stmt_info)) | |
cfdcf183 | 5990 | { |
5991 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) | |
5992 | && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) | |
5993 | && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) | |
5994 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) | |
5995 | { | |
5996 | stmt = pattern_stmt; | |
5997 | stmt_info = vinfo_for_stmt (stmt); | |
5998 | } | |
5999 | else | |
6000 | { | |
6001 | gsi_next (&si); | |
6002 | continue; | |
6003 | } | |
fb85abff | 6004 | } |
8bf58742 | 6005 | else if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
6006 | && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) | |
6007 | && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) | |
6008 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) | |
6009 | transform_pattern_stmt = true; | |
fb85abff | 6010 | |
18937389 | 6011 | /* If pattern statement has def stmts, vectorize them too. */ |
6012 | if (is_pattern_stmt_p (stmt_info)) | |
6013 | { | |
6014 | if (pattern_def_seq == NULL) | |
6015 | { | |
6016 | pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); | |
6017 | pattern_def_si = gsi_start (pattern_def_seq); | |
6018 | } | |
6019 | else if (!gsi_end_p (pattern_def_si)) | |
6020 | gsi_next (&pattern_def_si); | |
6021 | if (pattern_def_seq != NULL) | |
6022 | { | |
6023 | gimple pattern_def_stmt = NULL; | |
6024 | stmt_vec_info pattern_def_stmt_info = NULL; | |
45eea33f | 6025 | |
18937389 | 6026 | while (!gsi_end_p (pattern_def_si)) |
6027 | { | |
6028 | pattern_def_stmt = gsi_stmt (pattern_def_si); | |
6029 | pattern_def_stmt_info | |
6030 | = vinfo_for_stmt (pattern_def_stmt); | |
6031 | if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) | |
6032 | || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) | |
6033 | break; | |
6034 | gsi_next (&pattern_def_si); | |
6035 | } | |
6036 | ||
6037 | if (!gsi_end_p (pattern_def_si)) | |
6038 | { | |
6d8fb6cf | 6039 | if (dump_enabled_p ()) |
18937389 | 6040 | { |
7bd765d4 | 6041 | dump_printf_loc (MSG_NOTE, vect_location, |
6042 | "==> vectorizing pattern def " | |
6043 | "stmt: "); | |
6044 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, | |
6045 | pattern_def_stmt, 0); | |
78bb46f5 | 6046 | dump_printf (MSG_NOTE, "\n"); |
18937389 | 6047 | } |
6048 | ||
6049 | stmt = pattern_def_stmt; | |
6050 | stmt_info = pattern_def_stmt_info; | |
6051 | } | |
6052 | else | |
6053 | { | |
e3a19533 | 6054 | pattern_def_si = gsi_none (); |
18937389 | 6055 | transform_pattern_stmt = false; |
6056 | } | |
6057 | } | |
6058 | else | |
6059 | transform_pattern_stmt = false; | |
45eea33f | 6060 | } |
6061 | ||
d09768a4 | 6062 | if (STMT_VINFO_VECTYPE (stmt_info)) |
6063 | { | |
6064 | unsigned int nunits | |
6065 | = (unsigned int) | |
6066 | TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); | |
6067 | if (!STMT_SLP_TYPE (stmt_info) | |
6068 | && nunits != (unsigned int) vectorization_factor | |
6069 | && dump_enabled_p ()) | |
6070 | /* For SLP VF is set according to unrolling factor, and not | |
6071 | to vector size, hence for SLP this print is not valid. */ | |
6072 | dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); | |
6073 | } | |
fb85abff | 6074 | |
6075 | /* SLP. Schedule all the SLP instances when the first SLP stmt is | |
6076 | reached. */ | |
6077 | if (STMT_SLP_TYPE (stmt_info)) | |
6078 | { | |
6079 | if (!slp_scheduled) | |
6080 | { | |
6081 | slp_scheduled = true; | |
6082 | ||
6d8fb6cf | 6083 | if (dump_enabled_p ()) |
7bd765d4 | 6084 | dump_printf_loc (MSG_NOTE, vect_location, |
78bb46f5 | 6085 | "=== scheduling SLP instances ===\n"); |
fb85abff | 6086 | |
37545e54 | 6087 | vect_schedule_slp (loop_vinfo, NULL); |
fb85abff | 6088 | } |
6089 | ||
6090 | /* Hybrid SLP stmts must be vectorized in addition to SLP. */ | |
1065dd4e | 6091 | if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info)) |
fb85abff | 6092 | { |
18937389 | 6093 | if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) |
6094 | { | |
6095 | pattern_def_seq = NULL; | |
6096 | gsi_next (&si); | |
6097 | } | |
6098 | continue; | |
fb85abff | 6099 | } |
6100 | } | |
48e1416a | 6101 | |
fb85abff | 6102 | /* -------- vectorize statement ------------ */ |
6d8fb6cf | 6103 | if (dump_enabled_p ()) |
78bb46f5 | 6104 | dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n"); |
fb85abff | 6105 | |
ee612634 | 6106 | grouped_store = false; |
6107 | is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL); | |
fb85abff | 6108 | if (is_store) |
6109 | { | |
ee612634 | 6110 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
fb85abff | 6111 | { |
6112 | /* Interleaving. If IS_STORE is TRUE, the vectorization of the | |
6113 | interleaving chain was completed - free all the stores in | |
6114 | the chain. */ | |
3b515af5 | 6115 | gsi_next (&si); |
21009880 | 6116 | vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info)); |
fb85abff | 6117 | } |
6118 | else | |
6119 | { | |
6120 | /* Free the attached stmt_vec_info and remove the stmt. */ | |
bc8a8451 | 6121 | gimple store = gsi_stmt (si); |
6122 | free_stmt_vec_info (store); | |
6123 | unlink_stmt_vdef (store); | |
fb85abff | 6124 | gsi_remove (&si, true); |
bc8a8451 | 6125 | release_defs (store); |
fb85abff | 6126 | } |
8bf58742 | 6127 | |
512cbd67 | 6128 | /* Stores can only appear at the end of pattern statements. */ |
6129 | gcc_assert (!transform_pattern_stmt); | |
6130 | pattern_def_seq = NULL; | |
6131 | } | |
6132 | else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) | |
18937389 | 6133 | { |
6134 | pattern_def_seq = NULL; | |
6135 | gsi_next (&si); | |
6136 | } | |
fb85abff | 6137 | } /* stmts in BB */ |
6138 | } /* BBs in loop */ | |
6139 | ||
6140 | slpeel_make_loop_iterate_ntimes (loop, ratio); | |
6141 | ||
d3f1934c | 6142 | /* Reduce loop iterations by the vectorization factor. */ |
f9d4b7f4 | 6143 | scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor), |
d3f1934c | 6144 | expected_iterations / vectorization_factor); |
6145 | loop->nb_iterations_upper_bound | |
796b6678 | 6146 | = wi::udiv_floor (loop->nb_iterations_upper_bound, vectorization_factor); |
d3f1934c | 6147 | if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) |
796b6678 | 6148 | && loop->nb_iterations_upper_bound != 0) |
e913b5cd | 6149 | loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - 1; |
d3f1934c | 6150 | if (loop->any_estimate) |
6151 | { | |
6152 | loop->nb_iterations_estimate | |
796b6678 | 6153 | = wi::udiv_floor (loop->nb_iterations_estimate, vectorization_factor); |
d3f1934c | 6154 | if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) |
796b6678 | 6155 | && loop->nb_iterations_estimate != 0) |
e913b5cd | 6156 | loop->nb_iterations_estimate = loop->nb_iterations_estimate - 1; |
d3f1934c | 6157 | } |
6158 | ||
6d8fb6cf | 6159 | if (dump_enabled_p ()) |
b055bc88 | 6160 | { |
a21425b5 | 6161 | dump_printf_loc (MSG_NOTE, vect_location, |
b055bc88 | 6162 | "LOOP VECTORIZED\n"); |
6163 | if (loop->inner) | |
6164 | dump_printf_loc (MSG_NOTE, vect_location, | |
6165 | "OUTER LOOP VECTORIZED\n"); | |
78bb46f5 | 6166 | dump_printf (MSG_NOTE, "\n"); |
b055bc88 | 6167 | } |
fb85abff | 6168 | } |