]>
Commit | Line | Data |
---|---|---|
ebfd146a | 1 | /* Statement Analysis and Transformation for Vectorization |
cbe34bb5 | 2 | Copyright (C) 2003-2017 Free Software Foundation, Inc. |
b8698a0f | 3 | Contributed by Dorit Naishlos <dorit@il.ibm.com> |
ebfd146a IR |
4 | and Ira Rosen <irar@il.ibm.com> |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 3, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
c7131fb2 | 25 | #include "backend.h" |
957060b5 AM |
26 | #include "target.h" |
27 | #include "rtl.h" | |
ebfd146a | 28 | #include "tree.h" |
c7131fb2 | 29 | #include "gimple.h" |
c7131fb2 | 30 | #include "ssa.h" |
957060b5 AM |
31 | #include "optabs-tree.h" |
32 | #include "insn-config.h" | |
33 | #include "recog.h" /* FIXME: for insn_data */ | |
34 | #include "cgraph.h" | |
957060b5 | 35 | #include "dumpfile.h" |
c7131fb2 | 36 | #include "alias.h" |
40e23961 | 37 | #include "fold-const.h" |
d8a2d370 | 38 | #include "stor-layout.h" |
2fb9a547 | 39 | #include "tree-eh.h" |
45b0be94 | 40 | #include "gimplify.h" |
5be5c238 | 41 | #include "gimple-iterator.h" |
18f429e2 | 42 | #include "gimplify-me.h" |
442b4905 | 43 | #include "tree-cfg.h" |
e28030cf | 44 | #include "tree-ssa-loop-manip.h" |
ebfd146a | 45 | #include "cfgloop.h" |
0136f8f0 AH |
46 | #include "tree-ssa-loop.h" |
47 | #include "tree-scalar-evolution.h" | |
ebfd146a | 48 | #include "tree-vectorizer.h" |
9b2b7279 | 49 | #include "builtins.h" |
70439f0d | 50 | #include "internal-fn.h" |
ebfd146a | 51 | |
7ee2468b SB |
52 | /* For lang_hooks.types.type_for_mode. */ |
53 | #include "langhooks.h" | |
ebfd146a | 54 | |
2de001ee RS |
55 | /* Says whether a statement is a load, a store of a vectorized statement |
56 | result, or a store of an invariant value. */ | |
57 | enum vec_load_store_type { | |
58 | VLS_LOAD, | |
59 | VLS_STORE, | |
60 | VLS_STORE_INVARIANT | |
61 | }; | |
62 | ||
c3e7ee41 BS |
63 | /* Return the vectorized type for the given statement. */ |
64 | ||
65 | tree | |
66 | stmt_vectype (struct _stmt_vec_info *stmt_info) | |
67 | { | |
68 | return STMT_VINFO_VECTYPE (stmt_info); | |
69 | } | |
70 | ||
71 | /* Return TRUE iff the given statement is in an inner loop relative to | |
72 | the loop being vectorized. */ | |
73 | bool | |
74 | stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info) | |
75 | { | |
355fe088 | 76 | gimple *stmt = STMT_VINFO_STMT (stmt_info); |
c3e7ee41 BS |
77 | basic_block bb = gimple_bb (stmt); |
78 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
79 | struct loop* loop; | |
80 | ||
81 | if (!loop_vinfo) | |
82 | return false; | |
83 | ||
84 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
85 | ||
86 | return (bb->loop_father == loop->inner); | |
87 | } | |
88 | ||
89 | /* Record the cost of a statement, either by directly informing the | |
90 | target model or by saving it in a vector for later processing. | |
91 | Return a preliminary estimate of the statement's cost. */ | |
92 | ||
93 | unsigned | |
92345349 | 94 | record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count, |
c3e7ee41 | 95 | enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, |
92345349 | 96 | int misalign, enum vect_cost_model_location where) |
c3e7ee41 | 97 | { |
92345349 | 98 | if (body_cost_vec) |
c3e7ee41 | 99 | { |
92345349 | 100 | tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; |
ddf56386 RB |
101 | stmt_info_for_cost si = { count, kind, |
102 | stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL, | |
103 | misalign }; | |
104 | body_cost_vec->safe_push (si); | |
c3e7ee41 | 105 | return (unsigned) |
92345349 | 106 | (builtin_vectorization_cost (kind, vectype, misalign) * count); |
c3e7ee41 BS |
107 | } |
108 | else | |
310213d4 RB |
109 | return add_stmt_cost (stmt_info->vinfo->target_cost_data, |
110 | count, kind, stmt_info, misalign, where); | |
c3e7ee41 BS |
111 | } |
112 | ||
272c6793 RS |
113 | /* Return a variable of type ELEM_TYPE[NELEMS]. */ |
114 | ||
115 | static tree | |
116 | create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems) | |
117 | { | |
118 | return create_tmp_var (build_array_type_nelts (elem_type, nelems), | |
119 | "vect_array"); | |
120 | } | |
121 | ||
122 | /* ARRAY is an array of vectors created by create_vector_array. | |
123 | Return an SSA_NAME for the vector in index N. The reference | |
124 | is part of the vectorization of STMT and the vector is associated | |
125 | with scalar destination SCALAR_DEST. */ | |
126 | ||
127 | static tree | |
355fe088 | 128 | read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest, |
272c6793 RS |
129 | tree array, unsigned HOST_WIDE_INT n) |
130 | { | |
131 | tree vect_type, vect, vect_name, array_ref; | |
355fe088 | 132 | gimple *new_stmt; |
272c6793 RS |
133 | |
134 | gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE); | |
135 | vect_type = TREE_TYPE (TREE_TYPE (array)); | |
136 | vect = vect_create_destination_var (scalar_dest, vect_type); | |
137 | array_ref = build4 (ARRAY_REF, vect_type, array, | |
138 | build_int_cst (size_type_node, n), | |
139 | NULL_TREE, NULL_TREE); | |
140 | ||
141 | new_stmt = gimple_build_assign (vect, array_ref); | |
142 | vect_name = make_ssa_name (vect, new_stmt); | |
143 | gimple_assign_set_lhs (new_stmt, vect_name); | |
144 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
272c6793 RS |
145 | |
146 | return vect_name; | |
147 | } | |
148 | ||
149 | /* ARRAY is an array of vectors created by create_vector_array. | |
150 | Emit code to store SSA_NAME VECT in index N of the array. | |
151 | The store is part of the vectorization of STMT. */ | |
152 | ||
153 | static void | |
355fe088 | 154 | write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect, |
272c6793 RS |
155 | tree array, unsigned HOST_WIDE_INT n) |
156 | { | |
157 | tree array_ref; | |
355fe088 | 158 | gimple *new_stmt; |
272c6793 RS |
159 | |
160 | array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array, | |
161 | build_int_cst (size_type_node, n), | |
162 | NULL_TREE, NULL_TREE); | |
163 | ||
164 | new_stmt = gimple_build_assign (array_ref, vect); | |
165 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
272c6793 RS |
166 | } |
167 | ||
168 | /* PTR is a pointer to an array of type TYPE. Return a representation | |
169 | of *PTR. The memory reference replaces those in FIRST_DR | |
170 | (and its group). */ | |
171 | ||
172 | static tree | |
44fc7854 | 173 | create_array_ref (tree type, tree ptr, tree alias_ptr_type) |
272c6793 | 174 | { |
44fc7854 | 175 | tree mem_ref; |
272c6793 | 176 | |
272c6793 RS |
177 | mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0)); |
178 | /* Arrays have the same alignment as their type. */ | |
644ffefd | 179 | set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0); |
272c6793 RS |
180 | return mem_ref; |
181 | } | |
182 | ||
ebfd146a IR |
183 | /* Utility functions used by vect_mark_stmts_to_be_vectorized. */ |
184 | ||
185 | /* Function vect_mark_relevant. | |
186 | ||
187 | Mark STMT as "relevant for vectorization" and add it to WORKLIST. */ | |
188 | ||
189 | static void | |
355fe088 | 190 | vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt, |
97ecdb46 | 191 | enum vect_relevant relevant, bool live_p) |
ebfd146a IR |
192 | { |
193 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
194 | enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info); | |
195 | bool save_live_p = STMT_VINFO_LIVE_P (stmt_info); | |
355fe088 | 196 | gimple *pattern_stmt; |
ebfd146a | 197 | |
73fbfcad | 198 | if (dump_enabled_p ()) |
66c16fd9 RB |
199 | { |
200 | dump_printf_loc (MSG_NOTE, vect_location, | |
201 | "mark relevant %d, live %d: ", relevant, live_p); | |
202 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
203 | } | |
ebfd146a | 204 | |
83197f37 IR |
205 | /* If this stmt is an original stmt in a pattern, we might need to mark its |
206 | related pattern stmt instead of the original stmt. However, such stmts | |
207 | may have their own uses that are not in any pattern, in such cases the | |
208 | stmt itself should be marked. */ | |
ebfd146a IR |
209 | if (STMT_VINFO_IN_PATTERN_P (stmt_info)) |
210 | { | |
97ecdb46 JJ |
211 | /* This is the last stmt in a sequence that was detected as a |
212 | pattern that can potentially be vectorized. Don't mark the stmt | |
213 | as relevant/live because it's not going to be vectorized. | |
214 | Instead mark the pattern-stmt that replaces it. */ | |
83197f37 | 215 | |
97ecdb46 JJ |
216 | pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info); |
217 | ||
218 | if (dump_enabled_p ()) | |
219 | dump_printf_loc (MSG_NOTE, vect_location, | |
220 | "last stmt in pattern. don't mark" | |
221 | " relevant/live.\n"); | |
222 | stmt_info = vinfo_for_stmt (pattern_stmt); | |
223 | gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt); | |
224 | save_relevant = STMT_VINFO_RELEVANT (stmt_info); | |
225 | save_live_p = STMT_VINFO_LIVE_P (stmt_info); | |
226 | stmt = pattern_stmt; | |
ebfd146a IR |
227 | } |
228 | ||
229 | STMT_VINFO_LIVE_P (stmt_info) |= live_p; | |
230 | if (relevant > STMT_VINFO_RELEVANT (stmt_info)) | |
231 | STMT_VINFO_RELEVANT (stmt_info) = relevant; | |
232 | ||
233 | if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant | |
234 | && STMT_VINFO_LIVE_P (stmt_info) == save_live_p) | |
235 | { | |
73fbfcad | 236 | if (dump_enabled_p ()) |
78c60e3d | 237 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 238 | "already marked relevant/live.\n"); |
ebfd146a IR |
239 | return; |
240 | } | |
241 | ||
9771b263 | 242 | worklist->safe_push (stmt); |
ebfd146a IR |
243 | } |
244 | ||
245 | ||
b28ead45 AH |
246 | /* Function is_simple_and_all_uses_invariant |
247 | ||
248 | Return true if STMT is simple and all uses of it are invariant. */ | |
249 | ||
250 | bool | |
251 | is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo) | |
252 | { | |
253 | tree op; | |
254 | gimple *def_stmt; | |
255 | ssa_op_iter iter; | |
256 | ||
257 | if (!is_gimple_assign (stmt)) | |
258 | return false; | |
259 | ||
260 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE) | |
261 | { | |
262 | enum vect_def_type dt = vect_uninitialized_def; | |
263 | ||
264 | if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt)) | |
265 | { | |
266 | if (dump_enabled_p ()) | |
267 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
268 | "use not simple.\n"); | |
269 | return false; | |
270 | } | |
271 | ||
272 | if (dt != vect_external_def && dt != vect_constant_def) | |
273 | return false; | |
274 | } | |
275 | return true; | |
276 | } | |
277 | ||
ebfd146a IR |
278 | /* Function vect_stmt_relevant_p. |
279 | ||
280 | Return true if STMT in loop that is represented by LOOP_VINFO is | |
281 | "relevant for vectorization". | |
282 | ||
283 | A stmt is considered "relevant for vectorization" if: | |
284 | - it has uses outside the loop. | |
285 | - it has vdefs (it alters memory). | |
286 | - control stmts in the loop (except for the exit condition). | |
287 | ||
288 | CHECKME: what other side effects would the vectorizer allow? */ | |
289 | ||
290 | static bool | |
355fe088 | 291 | vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo, |
ebfd146a IR |
292 | enum vect_relevant *relevant, bool *live_p) |
293 | { | |
294 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
295 | ssa_op_iter op_iter; | |
296 | imm_use_iterator imm_iter; | |
297 | use_operand_p use_p; | |
298 | def_operand_p def_p; | |
299 | ||
8644a673 | 300 | *relevant = vect_unused_in_scope; |
ebfd146a IR |
301 | *live_p = false; |
302 | ||
303 | /* cond stmt other than loop exit cond. */ | |
b8698a0f L |
304 | if (is_ctrl_stmt (stmt) |
305 | && STMT_VINFO_TYPE (vinfo_for_stmt (stmt)) | |
306 | != loop_exit_ctrl_vec_info_type) | |
8644a673 | 307 | *relevant = vect_used_in_scope; |
ebfd146a IR |
308 | |
309 | /* changing memory. */ | |
310 | if (gimple_code (stmt) != GIMPLE_PHI) | |
ac6aeab4 RB |
311 | if (gimple_vdef (stmt) |
312 | && !gimple_clobber_p (stmt)) | |
ebfd146a | 313 | { |
73fbfcad | 314 | if (dump_enabled_p ()) |
78c60e3d | 315 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 316 | "vec_stmt_relevant_p: stmt has vdefs.\n"); |
8644a673 | 317 | *relevant = vect_used_in_scope; |
ebfd146a IR |
318 | } |
319 | ||
320 | /* uses outside the loop. */ | |
321 | FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF) | |
322 | { | |
323 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p)) | |
324 | { | |
325 | basic_block bb = gimple_bb (USE_STMT (use_p)); | |
326 | if (!flow_bb_inside_loop_p (loop, bb)) | |
327 | { | |
73fbfcad | 328 | if (dump_enabled_p ()) |
78c60e3d | 329 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 330 | "vec_stmt_relevant_p: used out of loop.\n"); |
ebfd146a | 331 | |
3157b0c2 AO |
332 | if (is_gimple_debug (USE_STMT (use_p))) |
333 | continue; | |
334 | ||
ebfd146a IR |
335 | /* We expect all such uses to be in the loop exit phis |
336 | (because of loop closed form) */ | |
337 | gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI); | |
338 | gcc_assert (bb == single_exit (loop)->dest); | |
339 | ||
340 | *live_p = true; | |
341 | } | |
342 | } | |
343 | } | |
344 | ||
3a2edf4c AH |
345 | if (*live_p && *relevant == vect_unused_in_scope |
346 | && !is_simple_and_all_uses_invariant (stmt, loop_vinfo)) | |
b28ead45 AH |
347 | { |
348 | if (dump_enabled_p ()) | |
349 | dump_printf_loc (MSG_NOTE, vect_location, | |
350 | "vec_stmt_relevant_p: stmt live but not relevant.\n"); | |
351 | *relevant = vect_used_only_live; | |
352 | } | |
353 | ||
ebfd146a IR |
354 | return (*live_p || *relevant); |
355 | } | |
356 | ||
357 | ||
b8698a0f | 358 | /* Function exist_non_indexing_operands_for_use_p |
ebfd146a | 359 | |
ff802fa1 | 360 | USE is one of the uses attached to STMT. Check if USE is |
ebfd146a IR |
361 | used in STMT for anything other than indexing an array. */ |
362 | ||
363 | static bool | |
355fe088 | 364 | exist_non_indexing_operands_for_use_p (tree use, gimple *stmt) |
ebfd146a IR |
365 | { |
366 | tree operand; | |
367 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
59a05b0c | 368 | |
ff802fa1 | 369 | /* USE corresponds to some operand in STMT. If there is no data |
ebfd146a IR |
370 | reference in STMT, then any operand that corresponds to USE |
371 | is not indexing an array. */ | |
372 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
373 | return true; | |
59a05b0c | 374 | |
ebfd146a IR |
375 | /* STMT has a data_ref. FORNOW this means that its of one of |
376 | the following forms: | |
377 | -1- ARRAY_REF = var | |
378 | -2- var = ARRAY_REF | |
379 | (This should have been verified in analyze_data_refs). | |
380 | ||
381 | 'var' in the second case corresponds to a def, not a use, | |
b8698a0f | 382 | so USE cannot correspond to any operands that are not used |
ebfd146a IR |
383 | for array indexing. |
384 | ||
385 | Therefore, all we need to check is if STMT falls into the | |
386 | first case, and whether var corresponds to USE. */ | |
ebfd146a IR |
387 | |
388 | if (!gimple_assign_copy_p (stmt)) | |
5ce9450f JJ |
389 | { |
390 | if (is_gimple_call (stmt) | |
391 | && gimple_call_internal_p (stmt)) | |
392 | switch (gimple_call_internal_fn (stmt)) | |
393 | { | |
394 | case IFN_MASK_STORE: | |
395 | operand = gimple_call_arg (stmt, 3); | |
396 | if (operand == use) | |
397 | return true; | |
398 | /* FALLTHRU */ | |
399 | case IFN_MASK_LOAD: | |
400 | operand = gimple_call_arg (stmt, 2); | |
401 | if (operand == use) | |
402 | return true; | |
403 | break; | |
404 | default: | |
405 | break; | |
406 | } | |
407 | return false; | |
408 | } | |
409 | ||
59a05b0c EB |
410 | if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME) |
411 | return false; | |
ebfd146a | 412 | operand = gimple_assign_rhs1 (stmt); |
ebfd146a IR |
413 | if (TREE_CODE (operand) != SSA_NAME) |
414 | return false; | |
415 | ||
416 | if (operand == use) | |
417 | return true; | |
418 | ||
419 | return false; | |
420 | } | |
421 | ||
422 | ||
b8698a0f | 423 | /* |
ebfd146a IR |
424 | Function process_use. |
425 | ||
426 | Inputs: | |
427 | - a USE in STMT in a loop represented by LOOP_VINFO | |
b28ead45 | 428 | - RELEVANT - enum value to be set in the STMT_VINFO of the stmt |
ff802fa1 | 429 | that defined USE. This is done by calling mark_relevant and passing it |
ebfd146a | 430 | the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant). |
aec7ae7d JJ |
431 | - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't |
432 | be performed. | |
ebfd146a IR |
433 | |
434 | Outputs: | |
435 | Generally, LIVE_P and RELEVANT are used to define the liveness and | |
436 | relevance info of the DEF_STMT of this USE: | |
437 | STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p | |
438 | STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant | |
439 | Exceptions: | |
440 | - case 1: If USE is used only for address computations (e.g. array indexing), | |
b8698a0f | 441 | which does not need to be directly vectorized, then the liveness/relevance |
ebfd146a | 442 | of the respective DEF_STMT is left unchanged. |
b8698a0f L |
443 | - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we |
444 | skip DEF_STMT cause it had already been processed. | |
ebfd146a IR |
445 | - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will |
446 | be modified accordingly. | |
447 | ||
448 | Return true if everything is as expected. Return false otherwise. */ | |
449 | ||
450 | static bool | |
b28ead45 | 451 | process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, |
355fe088 | 452 | enum vect_relevant relevant, vec<gimple *> *worklist, |
aec7ae7d | 453 | bool force) |
ebfd146a IR |
454 | { |
455 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
456 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); | |
457 | stmt_vec_info dstmt_vinfo; | |
458 | basic_block bb, def_bb; | |
355fe088 | 459 | gimple *def_stmt; |
ebfd146a IR |
460 | enum vect_def_type dt; |
461 | ||
b8698a0f | 462 | /* case 1: we are only interested in uses that need to be vectorized. Uses |
ebfd146a | 463 | that are used for address computation are not considered relevant. */ |
aec7ae7d | 464 | if (!force && !exist_non_indexing_operands_for_use_p (use, stmt)) |
ebfd146a IR |
465 | return true; |
466 | ||
81c40241 | 467 | if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt)) |
b8698a0f | 468 | { |
73fbfcad | 469 | if (dump_enabled_p ()) |
78c60e3d | 470 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 471 | "not vectorized: unsupported use in stmt.\n"); |
ebfd146a IR |
472 | return false; |
473 | } | |
474 | ||
475 | if (!def_stmt || gimple_nop_p (def_stmt)) | |
476 | return true; | |
477 | ||
478 | def_bb = gimple_bb (def_stmt); | |
479 | if (!flow_bb_inside_loop_p (loop, def_bb)) | |
480 | { | |
73fbfcad | 481 | if (dump_enabled_p ()) |
e645e942 | 482 | dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n"); |
ebfd146a IR |
483 | return true; |
484 | } | |
485 | ||
b8698a0f L |
486 | /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT). |
487 | DEF_STMT must have already been processed, because this should be the | |
488 | only way that STMT, which is a reduction-phi, was put in the worklist, | |
489 | as there should be no other uses for DEF_STMT in the loop. So we just | |
ebfd146a IR |
490 | check that everything is as expected, and we are done. */ |
491 | dstmt_vinfo = vinfo_for_stmt (def_stmt); | |
492 | bb = gimple_bb (stmt); | |
493 | if (gimple_code (stmt) == GIMPLE_PHI | |
494 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def | |
495 | && gimple_code (def_stmt) != GIMPLE_PHI | |
496 | && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def | |
497 | && bb->loop_father == def_bb->loop_father) | |
498 | { | |
73fbfcad | 499 | if (dump_enabled_p ()) |
78c60e3d | 500 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 501 | "reduc-stmt defining reduc-phi in the same nest.\n"); |
ebfd146a IR |
502 | if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo)) |
503 | dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo)); | |
504 | gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction); | |
b8698a0f | 505 | gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo) |
8644a673 | 506 | || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope); |
ebfd146a IR |
507 | return true; |
508 | } | |
509 | ||
510 | /* case 3a: outer-loop stmt defining an inner-loop stmt: | |
511 | outer-loop-header-bb: | |
512 | d = def_stmt | |
513 | inner-loop: | |
514 | stmt # use (d) | |
515 | outer-loop-tail-bb: | |
516 | ... */ | |
517 | if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father)) | |
518 | { | |
73fbfcad | 519 | if (dump_enabled_p ()) |
78c60e3d | 520 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 521 | "outer-loop def-stmt defining inner-loop stmt.\n"); |
7c5222ff | 522 | |
ebfd146a IR |
523 | switch (relevant) |
524 | { | |
8644a673 | 525 | case vect_unused_in_scope: |
7c5222ff IR |
526 | relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ? |
527 | vect_used_in_scope : vect_unused_in_scope; | |
ebfd146a | 528 | break; |
7c5222ff | 529 | |
ebfd146a | 530 | case vect_used_in_outer_by_reduction: |
7c5222ff | 531 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def); |
ebfd146a IR |
532 | relevant = vect_used_by_reduction; |
533 | break; | |
7c5222ff | 534 | |
ebfd146a | 535 | case vect_used_in_outer: |
7c5222ff | 536 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def); |
8644a673 | 537 | relevant = vect_used_in_scope; |
ebfd146a | 538 | break; |
7c5222ff | 539 | |
8644a673 | 540 | case vect_used_in_scope: |
ebfd146a IR |
541 | break; |
542 | ||
543 | default: | |
544 | gcc_unreachable (); | |
b8698a0f | 545 | } |
ebfd146a IR |
546 | } |
547 | ||
548 | /* case 3b: inner-loop stmt defining an outer-loop stmt: | |
549 | outer-loop-header-bb: | |
550 | ... | |
551 | inner-loop: | |
552 | d = def_stmt | |
06066f92 | 553 | outer-loop-tail-bb (or outer-loop-exit-bb in double reduction): |
ebfd146a IR |
554 | stmt # use (d) */ |
555 | else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father)) | |
556 | { | |
73fbfcad | 557 | if (dump_enabled_p ()) |
78c60e3d | 558 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 559 | "inner-loop def-stmt defining outer-loop stmt.\n"); |
7c5222ff | 560 | |
ebfd146a IR |
561 | switch (relevant) |
562 | { | |
8644a673 | 563 | case vect_unused_in_scope: |
b8698a0f | 564 | relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def |
06066f92 | 565 | || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ? |
a70d6342 | 566 | vect_used_in_outer_by_reduction : vect_unused_in_scope; |
ebfd146a IR |
567 | break; |
568 | ||
ebfd146a | 569 | case vect_used_by_reduction: |
b28ead45 | 570 | case vect_used_only_live: |
ebfd146a IR |
571 | relevant = vect_used_in_outer_by_reduction; |
572 | break; | |
573 | ||
8644a673 | 574 | case vect_used_in_scope: |
ebfd146a IR |
575 | relevant = vect_used_in_outer; |
576 | break; | |
577 | ||
578 | default: | |
579 | gcc_unreachable (); | |
580 | } | |
581 | } | |
643a9684 RB |
582 | /* We are also not interested in uses on loop PHI backedges that are |
583 | inductions. Otherwise we'll needlessly vectorize the IV increment | |
584 | and cause hybrid SLP for SLP inductions. */ | |
585 | else if (gimple_code (stmt) == GIMPLE_PHI | |
586 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def | |
587 | && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father)) | |
588 | == use)) | |
589 | { | |
590 | if (dump_enabled_p ()) | |
591 | dump_printf_loc (MSG_NOTE, vect_location, | |
592 | "induction value on backedge.\n"); | |
593 | return true; | |
594 | } | |
595 | ||
ebfd146a | 596 | |
b28ead45 | 597 | vect_mark_relevant (worklist, def_stmt, relevant, false); |
ebfd146a IR |
598 | return true; |
599 | } | |
600 | ||
601 | ||
602 | /* Function vect_mark_stmts_to_be_vectorized. | |
603 | ||
604 | Not all stmts in the loop need to be vectorized. For example: | |
605 | ||
606 | for i... | |
607 | for j... | |
608 | 1. T0 = i + j | |
609 | 2. T1 = a[T0] | |
610 | ||
611 | 3. j = j + 1 | |
612 | ||
613 | Stmt 1 and 3 do not need to be vectorized, because loop control and | |
614 | addressing of vectorized data-refs are handled differently. | |
615 | ||
616 | This pass detects such stmts. */ | |
617 | ||
618 | bool | |
619 | vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) | |
620 | { | |
ebfd146a IR |
621 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
622 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); | |
623 | unsigned int nbbs = loop->num_nodes; | |
624 | gimple_stmt_iterator si; | |
355fe088 | 625 | gimple *stmt; |
ebfd146a IR |
626 | unsigned int i; |
627 | stmt_vec_info stmt_vinfo; | |
628 | basic_block bb; | |
355fe088 | 629 | gimple *phi; |
ebfd146a | 630 | bool live_p; |
b28ead45 | 631 | enum vect_relevant relevant; |
ebfd146a | 632 | |
73fbfcad | 633 | if (dump_enabled_p ()) |
78c60e3d | 634 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 635 | "=== vect_mark_stmts_to_be_vectorized ===\n"); |
ebfd146a | 636 | |
355fe088 | 637 | auto_vec<gimple *, 64> worklist; |
ebfd146a IR |
638 | |
639 | /* 1. Init worklist. */ | |
640 | for (i = 0; i < nbbs; i++) | |
641 | { | |
642 | bb = bbs[i]; | |
643 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
b8698a0f | 644 | { |
ebfd146a | 645 | phi = gsi_stmt (si); |
73fbfcad | 646 | if (dump_enabled_p ()) |
ebfd146a | 647 | { |
78c60e3d SS |
648 | dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? "); |
649 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); | |
ebfd146a IR |
650 | } |
651 | ||
652 | if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p)) | |
97ecdb46 | 653 | vect_mark_relevant (&worklist, phi, relevant, live_p); |
ebfd146a IR |
654 | } |
655 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
656 | { | |
657 | stmt = gsi_stmt (si); | |
73fbfcad | 658 | if (dump_enabled_p ()) |
ebfd146a | 659 | { |
78c60e3d SS |
660 | dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? "); |
661 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
b8698a0f | 662 | } |
ebfd146a IR |
663 | |
664 | if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p)) | |
97ecdb46 | 665 | vect_mark_relevant (&worklist, stmt, relevant, live_p); |
ebfd146a IR |
666 | } |
667 | } | |
668 | ||
669 | /* 2. Process_worklist */ | |
9771b263 | 670 | while (worklist.length () > 0) |
ebfd146a IR |
671 | { |
672 | use_operand_p use_p; | |
673 | ssa_op_iter iter; | |
674 | ||
9771b263 | 675 | stmt = worklist.pop (); |
73fbfcad | 676 | if (dump_enabled_p ()) |
ebfd146a | 677 | { |
78c60e3d SS |
678 | dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: "); |
679 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
ebfd146a IR |
680 | } |
681 | ||
b8698a0f | 682 | /* Examine the USEs of STMT. For each USE, mark the stmt that defines it |
b28ead45 AH |
683 | (DEF_STMT) as relevant/irrelevant according to the relevance property |
684 | of STMT. */ | |
ebfd146a IR |
685 | stmt_vinfo = vinfo_for_stmt (stmt); |
686 | relevant = STMT_VINFO_RELEVANT (stmt_vinfo); | |
ebfd146a | 687 | |
b28ead45 AH |
688 | /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is |
689 | propagated as is to the DEF_STMTs of its USEs. | |
ebfd146a IR |
690 | |
691 | One exception is when STMT has been identified as defining a reduction | |
b28ead45 | 692 | variable; in this case we set the relevance to vect_used_by_reduction. |
ebfd146a | 693 | This is because we distinguish between two kinds of relevant stmts - |
b8698a0f | 694 | those that are used by a reduction computation, and those that are |
ff802fa1 | 695 | (also) used by a regular computation. This allows us later on to |
b8698a0f | 696 | identify stmts that are used solely by a reduction, and therefore the |
7c5222ff | 697 | order of the results that they produce does not have to be kept. */ |
ebfd146a | 698 | |
b28ead45 | 699 | switch (STMT_VINFO_DEF_TYPE (stmt_vinfo)) |
ebfd146a | 700 | { |
06066f92 | 701 | case vect_reduction_def: |
b28ead45 AH |
702 | gcc_assert (relevant != vect_unused_in_scope); |
703 | if (relevant != vect_unused_in_scope | |
704 | && relevant != vect_used_in_scope | |
705 | && relevant != vect_used_by_reduction | |
706 | && relevant != vect_used_only_live) | |
06066f92 | 707 | { |
b28ead45 AH |
708 | if (dump_enabled_p ()) |
709 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
710 | "unsupported use of reduction.\n"); | |
711 | return false; | |
06066f92 | 712 | } |
06066f92 | 713 | break; |
b8698a0f | 714 | |
06066f92 | 715 | case vect_nested_cycle: |
b28ead45 AH |
716 | if (relevant != vect_unused_in_scope |
717 | && relevant != vect_used_in_outer_by_reduction | |
718 | && relevant != vect_used_in_outer) | |
06066f92 | 719 | { |
73fbfcad | 720 | if (dump_enabled_p ()) |
78c60e3d | 721 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 722 | "unsupported use of nested cycle.\n"); |
7c5222ff | 723 | |
06066f92 IR |
724 | return false; |
725 | } | |
b8698a0f L |
726 | break; |
727 | ||
06066f92 | 728 | case vect_double_reduction_def: |
b28ead45 AH |
729 | if (relevant != vect_unused_in_scope |
730 | && relevant != vect_used_by_reduction | |
731 | && relevant != vect_used_only_live) | |
06066f92 | 732 | { |
73fbfcad | 733 | if (dump_enabled_p ()) |
78c60e3d | 734 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 735 | "unsupported use of double reduction.\n"); |
7c5222ff | 736 | |
7c5222ff | 737 | return false; |
06066f92 | 738 | } |
b8698a0f | 739 | break; |
7c5222ff | 740 | |
06066f92 IR |
741 | default: |
742 | break; | |
7c5222ff | 743 | } |
b8698a0f | 744 | |
aec7ae7d | 745 | if (is_pattern_stmt_p (stmt_vinfo)) |
9d5e7640 IR |
746 | { |
747 | /* Pattern statements are not inserted into the code, so | |
748 | FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we | |
749 | have to scan the RHS or function arguments instead. */ | |
750 | if (is_gimple_assign (stmt)) | |
751 | { | |
69d2aade JJ |
752 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
753 | tree op = gimple_assign_rhs1 (stmt); | |
754 | ||
755 | i = 1; | |
756 | if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op)) | |
757 | { | |
758 | if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo, | |
b28ead45 | 759 | relevant, &worklist, false) |
69d2aade | 760 | || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo, |
b28ead45 | 761 | relevant, &worklist, false)) |
566d377a | 762 | return false; |
69d2aade JJ |
763 | i = 2; |
764 | } | |
765 | for (; i < gimple_num_ops (stmt); i++) | |
9d5e7640 | 766 | { |
69d2aade | 767 | op = gimple_op (stmt, i); |
afbe6325 | 768 | if (TREE_CODE (op) == SSA_NAME |
b28ead45 | 769 | && !process_use (stmt, op, loop_vinfo, relevant, |
afbe6325 | 770 | &worklist, false)) |
07687835 | 771 | return false; |
9d5e7640 IR |
772 | } |
773 | } | |
774 | else if (is_gimple_call (stmt)) | |
775 | { | |
776 | for (i = 0; i < gimple_call_num_args (stmt); i++) | |
777 | { | |
778 | tree arg = gimple_call_arg (stmt, i); | |
b28ead45 | 779 | if (!process_use (stmt, arg, loop_vinfo, relevant, |
aec7ae7d | 780 | &worklist, false)) |
07687835 | 781 | return false; |
9d5e7640 IR |
782 | } |
783 | } | |
784 | } | |
785 | else | |
786 | FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE) | |
787 | { | |
788 | tree op = USE_FROM_PTR (use_p); | |
b28ead45 | 789 | if (!process_use (stmt, op, loop_vinfo, relevant, |
aec7ae7d | 790 | &worklist, false)) |
07687835 | 791 | return false; |
9d5e7640 | 792 | } |
aec7ae7d | 793 | |
3bab6342 | 794 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo)) |
aec7ae7d | 795 | { |
134c85ca RS |
796 | gather_scatter_info gs_info; |
797 | if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info)) | |
798 | gcc_unreachable (); | |
799 | if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant, | |
800 | &worklist, true)) | |
566d377a | 801 | return false; |
aec7ae7d | 802 | } |
ebfd146a IR |
803 | } /* while worklist */ |
804 | ||
ebfd146a IR |
805 | return true; |
806 | } | |
807 | ||
808 | ||
b8698a0f | 809 | /* Function vect_model_simple_cost. |
ebfd146a | 810 | |
b8698a0f | 811 | Models cost for simple operations, i.e. those that only emit ncopies of a |
ebfd146a IR |
812 | single op. Right now, this does not account for multiple insns that could |
813 | be generated for the single vector op. We will handle that shortly. */ | |
814 | ||
815 | void | |
b8698a0f | 816 | vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, |
92345349 | 817 | enum vect_def_type *dt, |
4fc5ebf1 | 818 | int ndts, |
92345349 BS |
819 | stmt_vector_for_cost *prologue_cost_vec, |
820 | stmt_vector_for_cost *body_cost_vec) | |
ebfd146a IR |
821 | { |
822 | int i; | |
92345349 | 823 | int inside_cost = 0, prologue_cost = 0; |
ebfd146a IR |
824 | |
825 | /* The SLP costs were already calculated during SLP tree build. */ | |
826 | if (PURE_SLP_STMT (stmt_info)) | |
827 | return; | |
828 | ||
4fc5ebf1 JG |
829 | /* Cost the "broadcast" of a scalar operand in to a vector operand. |
830 | Use scalar_to_vec to cost the broadcast, as elsewhere in the vector | |
831 | cost model. */ | |
832 | for (i = 0; i < ndts; i++) | |
92345349 | 833 | if (dt[i] == vect_constant_def || dt[i] == vect_external_def) |
4fc5ebf1 | 834 | prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec, |
92345349 | 835 | stmt_info, 0, vect_prologue); |
c3e7ee41 BS |
836 | |
837 | /* Pass the inside-of-loop statements to the target-specific cost model. */ | |
92345349 BS |
838 | inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt, |
839 | stmt_info, 0, vect_body); | |
c3e7ee41 | 840 | |
73fbfcad | 841 | if (dump_enabled_p ()) |
78c60e3d SS |
842 | dump_printf_loc (MSG_NOTE, vect_location, |
843 | "vect_model_simple_cost: inside_cost = %d, " | |
e645e942 | 844 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
ebfd146a IR |
845 | } |
846 | ||
847 | ||
8bd37302 BS |
848 | /* Model cost for type demotion and promotion operations. PWR is normally |
849 | zero for single-step promotions and demotions. It will be one if | |
850 | two-step promotion/demotion is required, and so on. Each additional | |
851 | step doubles the number of instructions required. */ | |
852 | ||
853 | static void | |
854 | vect_model_promotion_demotion_cost (stmt_vec_info stmt_info, | |
855 | enum vect_def_type *dt, int pwr) | |
856 | { | |
857 | int i, tmp; | |
92345349 | 858 | int inside_cost = 0, prologue_cost = 0; |
c3e7ee41 BS |
859 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
860 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
861 | void *target_cost_data; | |
8bd37302 BS |
862 | |
863 | /* The SLP costs were already calculated during SLP tree build. */ | |
864 | if (PURE_SLP_STMT (stmt_info)) | |
865 | return; | |
866 | ||
c3e7ee41 BS |
867 | if (loop_vinfo) |
868 | target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); | |
869 | else | |
870 | target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo); | |
871 | ||
8bd37302 BS |
872 | for (i = 0; i < pwr + 1; i++) |
873 | { | |
874 | tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ? | |
875 | (i + 1) : i; | |
c3e7ee41 | 876 | inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp), |
92345349 BS |
877 | vec_promote_demote, stmt_info, 0, |
878 | vect_body); | |
8bd37302 BS |
879 | } |
880 | ||
881 | /* FORNOW: Assuming maximum 2 args per stmts. */ | |
882 | for (i = 0; i < 2; i++) | |
92345349 BS |
883 | if (dt[i] == vect_constant_def || dt[i] == vect_external_def) |
884 | prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt, | |
885 | stmt_info, 0, vect_prologue); | |
8bd37302 | 886 | |
73fbfcad | 887 | if (dump_enabled_p ()) |
78c60e3d SS |
888 | dump_printf_loc (MSG_NOTE, vect_location, |
889 | "vect_model_promotion_demotion_cost: inside_cost = %d, " | |
e645e942 | 890 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
8bd37302 BS |
891 | } |
892 | ||
ebfd146a IR |
893 | /* Function vect_model_store_cost |
894 | ||
0d0293ac MM |
895 | Models cost for stores. In the case of grouped accesses, one access |
896 | has the overhead of the grouped access attributed to it. */ | |
ebfd146a IR |
897 | |
898 | void | |
b8698a0f | 899 | vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, |
2de001ee RS |
900 | vect_memory_access_type memory_access_type, |
901 | enum vect_def_type dt, slp_tree slp_node, | |
92345349 BS |
902 | stmt_vector_for_cost *prologue_cost_vec, |
903 | stmt_vector_for_cost *body_cost_vec) | |
ebfd146a | 904 | { |
92345349 | 905 | unsigned int inside_cost = 0, prologue_cost = 0; |
892a981f RS |
906 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
907 | gimple *first_stmt = STMT_VINFO_STMT (stmt_info); | |
908 | bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info); | |
ebfd146a | 909 | |
8644a673 | 910 | if (dt == vect_constant_def || dt == vect_external_def) |
92345349 BS |
911 | prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec, |
912 | stmt_info, 0, vect_prologue); | |
ebfd146a | 913 | |
892a981f RS |
914 | /* Grouped stores update all elements in the group at once, |
915 | so we want the DR for the first statement. */ | |
916 | if (!slp_node && grouped_access_p) | |
720f5239 | 917 | { |
892a981f RS |
918 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
919 | dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
720f5239 | 920 | } |
ebfd146a | 921 | |
892a981f RS |
922 | /* True if we should include any once-per-group costs as well as |
923 | the cost of the statement itself. For SLP we only get called | |
924 | once per group anyhow. */ | |
925 | bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info)); | |
926 | ||
272c6793 | 927 | /* We assume that the cost of a single store-lanes instruction is |
0d0293ac | 928 | equivalent to the cost of GROUP_SIZE separate stores. If a grouped |
272c6793 | 929 | access is instead being provided by a permute-and-store operation, |
2de001ee RS |
930 | include the cost of the permutes. */ |
931 | if (first_stmt_p | |
932 | && memory_access_type == VMAT_CONTIGUOUS_PERMUTE) | |
ebfd146a | 933 | { |
e1377713 ES |
934 | /* Uses a high and low interleave or shuffle operations for each |
935 | needed permute. */ | |
892a981f | 936 | int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
e1377713 | 937 | int nstmts = ncopies * ceil_log2 (group_size) * group_size; |
92345349 BS |
938 | inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm, |
939 | stmt_info, 0, vect_body); | |
ebfd146a | 940 | |
73fbfcad | 941 | if (dump_enabled_p ()) |
78c60e3d | 942 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 943 | "vect_model_store_cost: strided group_size = %d .\n", |
78c60e3d | 944 | group_size); |
ebfd146a IR |
945 | } |
946 | ||
cee62fee | 947 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
ebfd146a | 948 | /* Costs of the stores. */ |
067bc855 RB |
949 | if (memory_access_type == VMAT_ELEMENTWISE |
950 | || memory_access_type == VMAT_GATHER_SCATTER) | |
2de001ee RS |
951 | /* N scalar stores plus extracting the elements. */ |
952 | inside_cost += record_stmt_cost (body_cost_vec, | |
953 | ncopies * TYPE_VECTOR_SUBPARTS (vectype), | |
954 | scalar_store, stmt_info, 0, vect_body); | |
f2e2a985 | 955 | else |
892a981f | 956 | vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec); |
ebfd146a | 957 | |
2de001ee RS |
958 | if (memory_access_type == VMAT_ELEMENTWISE |
959 | || memory_access_type == VMAT_STRIDED_SLP) | |
cee62fee MM |
960 | inside_cost += record_stmt_cost (body_cost_vec, |
961 | ncopies * TYPE_VECTOR_SUBPARTS (vectype), | |
962 | vec_to_scalar, stmt_info, 0, vect_body); | |
963 | ||
73fbfcad | 964 | if (dump_enabled_p ()) |
78c60e3d SS |
965 | dump_printf_loc (MSG_NOTE, vect_location, |
966 | "vect_model_store_cost: inside_cost = %d, " | |
e645e942 | 967 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
ebfd146a IR |
968 | } |
969 | ||
970 | ||
720f5239 IR |
971 | /* Calculate cost of DR's memory access. */ |
972 | void | |
973 | vect_get_store_cost (struct data_reference *dr, int ncopies, | |
c3e7ee41 | 974 | unsigned int *inside_cost, |
92345349 | 975 | stmt_vector_for_cost *body_cost_vec) |
720f5239 IR |
976 | { |
977 | int alignment_support_scheme = vect_supportable_dr_alignment (dr, false); | |
355fe088 | 978 | gimple *stmt = DR_STMT (dr); |
c3e7ee41 | 979 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
720f5239 IR |
980 | |
981 | switch (alignment_support_scheme) | |
982 | { | |
983 | case dr_aligned: | |
984 | { | |
92345349 BS |
985 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
986 | vector_store, stmt_info, 0, | |
987 | vect_body); | |
720f5239 | 988 | |
73fbfcad | 989 | if (dump_enabled_p ()) |
78c60e3d | 990 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 991 | "vect_model_store_cost: aligned.\n"); |
720f5239 IR |
992 | break; |
993 | } | |
994 | ||
995 | case dr_unaligned_supported: | |
996 | { | |
720f5239 | 997 | /* Here, we assign an additional cost for the unaligned store. */ |
92345349 | 998 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
c3e7ee41 | 999 | unaligned_store, stmt_info, |
92345349 | 1000 | DR_MISALIGNMENT (dr), vect_body); |
73fbfcad | 1001 | if (dump_enabled_p ()) |
78c60e3d SS |
1002 | dump_printf_loc (MSG_NOTE, vect_location, |
1003 | "vect_model_store_cost: unaligned supported by " | |
e645e942 | 1004 | "hardware.\n"); |
720f5239 IR |
1005 | break; |
1006 | } | |
1007 | ||
38eec4c6 UW |
1008 | case dr_unaligned_unsupported: |
1009 | { | |
1010 | *inside_cost = VECT_MAX_COST; | |
1011 | ||
73fbfcad | 1012 | if (dump_enabled_p ()) |
78c60e3d | 1013 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 1014 | "vect_model_store_cost: unsupported access.\n"); |
38eec4c6 UW |
1015 | break; |
1016 | } | |
1017 | ||
720f5239 IR |
1018 | default: |
1019 | gcc_unreachable (); | |
1020 | } | |
1021 | } | |
1022 | ||
1023 | ||
ebfd146a IR |
1024 | /* Function vect_model_load_cost |
1025 | ||
892a981f RS |
1026 | Models cost for loads. In the case of grouped accesses, one access has |
1027 | the overhead of the grouped access attributed to it. Since unaligned | |
b8698a0f | 1028 | accesses are supported for loads, we also account for the costs of the |
ebfd146a IR |
1029 | access scheme chosen. */ |
1030 | ||
1031 | void | |
92345349 | 1032 | vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, |
2de001ee RS |
1033 | vect_memory_access_type memory_access_type, |
1034 | slp_tree slp_node, | |
92345349 BS |
1035 | stmt_vector_for_cost *prologue_cost_vec, |
1036 | stmt_vector_for_cost *body_cost_vec) | |
ebfd146a | 1037 | { |
892a981f RS |
1038 | gimple *first_stmt = STMT_VINFO_STMT (stmt_info); |
1039 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
92345349 | 1040 | unsigned int inside_cost = 0, prologue_cost = 0; |
892a981f | 1041 | bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info); |
ebfd146a | 1042 | |
892a981f RS |
1043 | /* Grouped loads read all elements in the group at once, |
1044 | so we want the DR for the first statement. */ | |
1045 | if (!slp_node && grouped_access_p) | |
ebfd146a | 1046 | { |
892a981f RS |
1047 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
1048 | dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
ebfd146a IR |
1049 | } |
1050 | ||
892a981f RS |
1051 | /* True if we should include any once-per-group costs as well as |
1052 | the cost of the statement itself. For SLP we only get called | |
1053 | once per group anyhow. */ | |
1054 | bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info)); | |
1055 | ||
272c6793 | 1056 | /* We assume that the cost of a single load-lanes instruction is |
0d0293ac | 1057 | equivalent to the cost of GROUP_SIZE separate loads. If a grouped |
272c6793 | 1058 | access is instead being provided by a load-and-permute operation, |
2de001ee RS |
1059 | include the cost of the permutes. */ |
1060 | if (first_stmt_p | |
1061 | && memory_access_type == VMAT_CONTIGUOUS_PERMUTE) | |
ebfd146a | 1062 | { |
2c23db6d ES |
1063 | /* Uses an even and odd extract operations or shuffle operations |
1064 | for each needed permute. */ | |
892a981f | 1065 | int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
2c23db6d ES |
1066 | int nstmts = ncopies * ceil_log2 (group_size) * group_size; |
1067 | inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm, | |
1068 | stmt_info, 0, vect_body); | |
ebfd146a | 1069 | |
73fbfcad | 1070 | if (dump_enabled_p ()) |
e645e942 TJ |
1071 | dump_printf_loc (MSG_NOTE, vect_location, |
1072 | "vect_model_load_cost: strided group_size = %d .\n", | |
78c60e3d | 1073 | group_size); |
ebfd146a IR |
1074 | } |
1075 | ||
1076 | /* The loads themselves. */ | |
067bc855 RB |
1077 | if (memory_access_type == VMAT_ELEMENTWISE |
1078 | || memory_access_type == VMAT_GATHER_SCATTER) | |
a82960aa | 1079 | { |
a21892ad BS |
1080 | /* N scalar loads plus gathering them into a vector. */ |
1081 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
92345349 | 1082 | inside_cost += record_stmt_cost (body_cost_vec, |
c3e7ee41 | 1083 | ncopies * TYPE_VECTOR_SUBPARTS (vectype), |
92345349 | 1084 | scalar_load, stmt_info, 0, vect_body); |
a82960aa RG |
1085 | } |
1086 | else | |
892a981f | 1087 | vect_get_load_cost (dr, ncopies, first_stmt_p, |
92345349 BS |
1088 | &inside_cost, &prologue_cost, |
1089 | prologue_cost_vec, body_cost_vec, true); | |
2de001ee RS |
1090 | if (memory_access_type == VMAT_ELEMENTWISE |
1091 | || memory_access_type == VMAT_STRIDED_SLP) | |
892a981f RS |
1092 | inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct, |
1093 | stmt_info, 0, vect_body); | |
720f5239 | 1094 | |
73fbfcad | 1095 | if (dump_enabled_p ()) |
78c60e3d SS |
1096 | dump_printf_loc (MSG_NOTE, vect_location, |
1097 | "vect_model_load_cost: inside_cost = %d, " | |
e645e942 | 1098 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
720f5239 IR |
1099 | } |
1100 | ||
1101 | ||
1102 | /* Calculate cost of DR's memory access. */ | |
1103 | void | |
1104 | vect_get_load_cost (struct data_reference *dr, int ncopies, | |
c3e7ee41 | 1105 | bool add_realign_cost, unsigned int *inside_cost, |
92345349 BS |
1106 | unsigned int *prologue_cost, |
1107 | stmt_vector_for_cost *prologue_cost_vec, | |
1108 | stmt_vector_for_cost *body_cost_vec, | |
1109 | bool record_prologue_costs) | |
720f5239 IR |
1110 | { |
1111 | int alignment_support_scheme = vect_supportable_dr_alignment (dr, false); | |
355fe088 | 1112 | gimple *stmt = DR_STMT (dr); |
c3e7ee41 | 1113 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
720f5239 IR |
1114 | |
1115 | switch (alignment_support_scheme) | |
ebfd146a IR |
1116 | { |
1117 | case dr_aligned: | |
1118 | { | |
92345349 BS |
1119 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, |
1120 | stmt_info, 0, vect_body); | |
ebfd146a | 1121 | |
73fbfcad | 1122 | if (dump_enabled_p ()) |
78c60e3d | 1123 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1124 | "vect_model_load_cost: aligned.\n"); |
ebfd146a IR |
1125 | |
1126 | break; | |
1127 | } | |
1128 | case dr_unaligned_supported: | |
1129 | { | |
720f5239 | 1130 | /* Here, we assign an additional cost for the unaligned load. */ |
92345349 | 1131 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
c3e7ee41 | 1132 | unaligned_load, stmt_info, |
92345349 | 1133 | DR_MISALIGNMENT (dr), vect_body); |
c3e7ee41 | 1134 | |
73fbfcad | 1135 | if (dump_enabled_p ()) |
78c60e3d SS |
1136 | dump_printf_loc (MSG_NOTE, vect_location, |
1137 | "vect_model_load_cost: unaligned supported by " | |
e645e942 | 1138 | "hardware.\n"); |
ebfd146a IR |
1139 | |
1140 | break; | |
1141 | } | |
1142 | case dr_explicit_realign: | |
1143 | { | |
92345349 BS |
1144 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2, |
1145 | vector_load, stmt_info, 0, vect_body); | |
1146 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, | |
1147 | vec_perm, stmt_info, 0, vect_body); | |
ebfd146a IR |
1148 | |
1149 | /* FIXME: If the misalignment remains fixed across the iterations of | |
1150 | the containing loop, the following cost should be added to the | |
92345349 | 1151 | prologue costs. */ |
ebfd146a | 1152 | if (targetm.vectorize.builtin_mask_for_load) |
92345349 BS |
1153 | *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt, |
1154 | stmt_info, 0, vect_body); | |
ebfd146a | 1155 | |
73fbfcad | 1156 | if (dump_enabled_p ()) |
e645e942 TJ |
1157 | dump_printf_loc (MSG_NOTE, vect_location, |
1158 | "vect_model_load_cost: explicit realign\n"); | |
8bd37302 | 1159 | |
ebfd146a IR |
1160 | break; |
1161 | } | |
1162 | case dr_explicit_realign_optimized: | |
1163 | { | |
73fbfcad | 1164 | if (dump_enabled_p ()) |
e645e942 | 1165 | dump_printf_loc (MSG_NOTE, vect_location, |
78c60e3d | 1166 | "vect_model_load_cost: unaligned software " |
e645e942 | 1167 | "pipelined.\n"); |
ebfd146a IR |
1168 | |
1169 | /* Unaligned software pipeline has a load of an address, an initial | |
ff802fa1 | 1170 | load, and possibly a mask operation to "prime" the loop. However, |
0d0293ac | 1171 | if this is an access in a group of loads, which provide grouped |
ebfd146a | 1172 | access, then the above cost should only be considered for one |
ff802fa1 | 1173 | access in the group. Inside the loop, there is a load op |
ebfd146a IR |
1174 | and a realignment op. */ |
1175 | ||
92345349 | 1176 | if (add_realign_cost && record_prologue_costs) |
ebfd146a | 1177 | { |
92345349 BS |
1178 | *prologue_cost += record_stmt_cost (prologue_cost_vec, 2, |
1179 | vector_stmt, stmt_info, | |
1180 | 0, vect_prologue); | |
ebfd146a | 1181 | if (targetm.vectorize.builtin_mask_for_load) |
92345349 BS |
1182 | *prologue_cost += record_stmt_cost (prologue_cost_vec, 1, |
1183 | vector_stmt, stmt_info, | |
1184 | 0, vect_prologue); | |
ebfd146a IR |
1185 | } |
1186 | ||
92345349 BS |
1187 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, |
1188 | stmt_info, 0, vect_body); | |
1189 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm, | |
1190 | stmt_info, 0, vect_body); | |
8bd37302 | 1191 | |
73fbfcad | 1192 | if (dump_enabled_p ()) |
78c60e3d | 1193 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 TJ |
1194 | "vect_model_load_cost: explicit realign optimized" |
1195 | "\n"); | |
8bd37302 | 1196 | |
ebfd146a IR |
1197 | break; |
1198 | } | |
1199 | ||
38eec4c6 UW |
1200 | case dr_unaligned_unsupported: |
1201 | { | |
1202 | *inside_cost = VECT_MAX_COST; | |
1203 | ||
73fbfcad | 1204 | if (dump_enabled_p ()) |
78c60e3d | 1205 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 1206 | "vect_model_load_cost: unsupported access.\n"); |
38eec4c6 UW |
1207 | break; |
1208 | } | |
1209 | ||
ebfd146a IR |
1210 | default: |
1211 | gcc_unreachable (); | |
1212 | } | |
ebfd146a IR |
1213 | } |
1214 | ||
418b7df3 RG |
1215 | /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in |
1216 | the loop preheader for the vectorized stmt STMT. */ | |
ebfd146a | 1217 | |
418b7df3 | 1218 | static void |
355fe088 | 1219 | vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi) |
ebfd146a | 1220 | { |
ebfd146a | 1221 | if (gsi) |
418b7df3 | 1222 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
ebfd146a IR |
1223 | else |
1224 | { | |
418b7df3 | 1225 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); |
ebfd146a | 1226 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); |
b8698a0f | 1227 | |
a70d6342 IR |
1228 | if (loop_vinfo) |
1229 | { | |
1230 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
418b7df3 RG |
1231 | basic_block new_bb; |
1232 | edge pe; | |
a70d6342 IR |
1233 | |
1234 | if (nested_in_vect_loop_p (loop, stmt)) | |
1235 | loop = loop->inner; | |
b8698a0f | 1236 | |
a70d6342 | 1237 | pe = loop_preheader_edge (loop); |
418b7df3 | 1238 | new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); |
a70d6342 IR |
1239 | gcc_assert (!new_bb); |
1240 | } | |
1241 | else | |
1242 | { | |
1243 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo); | |
1244 | basic_block bb; | |
1245 | gimple_stmt_iterator gsi_bb_start; | |
1246 | ||
1247 | gcc_assert (bb_vinfo); | |
1248 | bb = BB_VINFO_BB (bb_vinfo); | |
12aaf609 | 1249 | gsi_bb_start = gsi_after_labels (bb); |
418b7df3 | 1250 | gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT); |
a70d6342 | 1251 | } |
ebfd146a IR |
1252 | } |
1253 | ||
73fbfcad | 1254 | if (dump_enabled_p ()) |
ebfd146a | 1255 | { |
78c60e3d SS |
1256 | dump_printf_loc (MSG_NOTE, vect_location, |
1257 | "created new init_stmt: "); | |
1258 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); | |
ebfd146a | 1259 | } |
418b7df3 RG |
1260 | } |
1261 | ||
1262 | /* Function vect_init_vector. | |
ebfd146a | 1263 | |
5467ee52 RG |
1264 | Insert a new stmt (INIT_STMT) that initializes a new variable of type |
1265 | TYPE with the value VAL. If TYPE is a vector type and VAL does not have | |
1266 | vector type a vector with all elements equal to VAL is created first. | |
1267 | Place the initialization at BSI if it is not NULL. Otherwise, place the | |
1268 | initialization at the loop preheader. | |
418b7df3 RG |
1269 | Return the DEF of INIT_STMT. |
1270 | It will be used in the vectorization of STMT. */ | |
1271 | ||
1272 | tree | |
355fe088 | 1273 | vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi) |
418b7df3 | 1274 | { |
355fe088 | 1275 | gimple *init_stmt; |
418b7df3 RG |
1276 | tree new_temp; |
1277 | ||
e412ece4 RB |
1278 | /* We abuse this function to push sth to a SSA name with initial 'val'. */ |
1279 | if (! useless_type_conversion_p (type, TREE_TYPE (val))) | |
418b7df3 | 1280 | { |
e412ece4 RB |
1281 | gcc_assert (TREE_CODE (type) == VECTOR_TYPE); |
1282 | if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val))) | |
418b7df3 | 1283 | { |
5a308cf1 IE |
1284 | /* Scalar boolean value should be transformed into |
1285 | all zeros or all ones value before building a vector. */ | |
1286 | if (VECTOR_BOOLEAN_TYPE_P (type)) | |
1287 | { | |
b3d51f23 IE |
1288 | tree true_val = build_all_ones_cst (TREE_TYPE (type)); |
1289 | tree false_val = build_zero_cst (TREE_TYPE (type)); | |
5a308cf1 IE |
1290 | |
1291 | if (CONSTANT_CLASS_P (val)) | |
1292 | val = integer_zerop (val) ? false_val : true_val; | |
1293 | else | |
1294 | { | |
1295 | new_temp = make_ssa_name (TREE_TYPE (type)); | |
1296 | init_stmt = gimple_build_assign (new_temp, COND_EXPR, | |
1297 | val, true_val, false_val); | |
1298 | vect_init_vector_1 (stmt, init_stmt, gsi); | |
1299 | val = new_temp; | |
1300 | } | |
1301 | } | |
1302 | else if (CONSTANT_CLASS_P (val)) | |
42fd8198 | 1303 | val = fold_convert (TREE_TYPE (type), val); |
418b7df3 RG |
1304 | else |
1305 | { | |
b731b390 | 1306 | new_temp = make_ssa_name (TREE_TYPE (type)); |
e412ece4 RB |
1307 | if (! INTEGRAL_TYPE_P (TREE_TYPE (val))) |
1308 | init_stmt = gimple_build_assign (new_temp, | |
1309 | fold_build1 (VIEW_CONVERT_EXPR, | |
1310 | TREE_TYPE (type), | |
1311 | val)); | |
1312 | else | |
1313 | init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val); | |
418b7df3 | 1314 | vect_init_vector_1 (stmt, init_stmt, gsi); |
5467ee52 | 1315 | val = new_temp; |
418b7df3 RG |
1316 | } |
1317 | } | |
5467ee52 | 1318 | val = build_vector_from_val (type, val); |
418b7df3 RG |
1319 | } |
1320 | ||
0e22bb5a RB |
1321 | new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_"); |
1322 | init_stmt = gimple_build_assign (new_temp, val); | |
418b7df3 | 1323 | vect_init_vector_1 (stmt, init_stmt, gsi); |
0e22bb5a | 1324 | return new_temp; |
ebfd146a IR |
1325 | } |
1326 | ||
c83a894c | 1327 | /* Function vect_get_vec_def_for_operand_1. |
a70d6342 | 1328 | |
c83a894c AH |
1329 | For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type |
1330 | DT that will be used in the vectorized stmt. */ | |
ebfd146a IR |
1331 | |
1332 | tree | |
c83a894c | 1333 | vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt) |
ebfd146a IR |
1334 | { |
1335 | tree vec_oprnd; | |
355fe088 | 1336 | gimple *vec_stmt; |
ebfd146a | 1337 | stmt_vec_info def_stmt_info = NULL; |
ebfd146a IR |
1338 | |
1339 | switch (dt) | |
1340 | { | |
81c40241 | 1341 | /* operand is a constant or a loop invariant. */ |
ebfd146a | 1342 | case vect_constant_def: |
81c40241 | 1343 | case vect_external_def: |
c83a894c AH |
1344 | /* Code should use vect_get_vec_def_for_operand. */ |
1345 | gcc_unreachable (); | |
ebfd146a | 1346 | |
81c40241 | 1347 | /* operand is defined inside the loop. */ |
8644a673 | 1348 | case vect_internal_def: |
ebfd146a | 1349 | { |
ebfd146a IR |
1350 | /* Get the def from the vectorized stmt. */ |
1351 | def_stmt_info = vinfo_for_stmt (def_stmt); | |
83197f37 | 1352 | |
ebfd146a | 1353 | vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info); |
83197f37 IR |
1354 | /* Get vectorized pattern statement. */ |
1355 | if (!vec_stmt | |
1356 | && STMT_VINFO_IN_PATTERN_P (def_stmt_info) | |
1357 | && !STMT_VINFO_RELEVANT (def_stmt_info)) | |
1358 | vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt ( | |
1359 | STMT_VINFO_RELATED_STMT (def_stmt_info))); | |
ebfd146a IR |
1360 | gcc_assert (vec_stmt); |
1361 | if (gimple_code (vec_stmt) == GIMPLE_PHI) | |
1362 | vec_oprnd = PHI_RESULT (vec_stmt); | |
1363 | else if (is_gimple_call (vec_stmt)) | |
1364 | vec_oprnd = gimple_call_lhs (vec_stmt); | |
1365 | else | |
1366 | vec_oprnd = gimple_assign_lhs (vec_stmt); | |
1367 | return vec_oprnd; | |
1368 | } | |
1369 | ||
c78e3652 | 1370 | /* operand is defined by a loop header phi. */ |
ebfd146a | 1371 | case vect_reduction_def: |
06066f92 | 1372 | case vect_double_reduction_def: |
7c5222ff | 1373 | case vect_nested_cycle: |
ebfd146a IR |
1374 | case vect_induction_def: |
1375 | { | |
1376 | gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI); | |
1377 | ||
1378 | /* Get the def from the vectorized stmt. */ | |
1379 | def_stmt_info = vinfo_for_stmt (def_stmt); | |
1380 | vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info); | |
6dbbece6 RG |
1381 | if (gimple_code (vec_stmt) == GIMPLE_PHI) |
1382 | vec_oprnd = PHI_RESULT (vec_stmt); | |
1383 | else | |
1384 | vec_oprnd = gimple_get_lhs (vec_stmt); | |
ebfd146a IR |
1385 | return vec_oprnd; |
1386 | } | |
1387 | ||
1388 | default: | |
1389 | gcc_unreachable (); | |
1390 | } | |
1391 | } | |
1392 | ||
1393 | ||
c83a894c AH |
1394 | /* Function vect_get_vec_def_for_operand. |
1395 | ||
1396 | OP is an operand in STMT. This function returns a (vector) def that will be | |
1397 | used in the vectorized stmt for STMT. | |
1398 | ||
1399 | In the case that OP is an SSA_NAME which is defined in the loop, then | |
1400 | STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def. | |
1401 | ||
1402 | In case OP is an invariant or constant, a new stmt that creates a vector def | |
1403 | needs to be introduced. VECTYPE may be used to specify a required type for | |
1404 | vector invariant. */ | |
1405 | ||
1406 | tree | |
1407 | vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype) | |
1408 | { | |
1409 | gimple *def_stmt; | |
1410 | enum vect_def_type dt; | |
1411 | bool is_simple_use; | |
1412 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); | |
1413 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); | |
1414 | ||
1415 | if (dump_enabled_p ()) | |
1416 | { | |
1417 | dump_printf_loc (MSG_NOTE, vect_location, | |
1418 | "vect_get_vec_def_for_operand: "); | |
1419 | dump_generic_expr (MSG_NOTE, TDF_SLIM, op); | |
1420 | dump_printf (MSG_NOTE, "\n"); | |
1421 | } | |
1422 | ||
1423 | is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt); | |
1424 | gcc_assert (is_simple_use); | |
1425 | if (def_stmt && dump_enabled_p ()) | |
1426 | { | |
1427 | dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = "); | |
1428 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); | |
1429 | } | |
1430 | ||
1431 | if (dt == vect_constant_def || dt == vect_external_def) | |
1432 | { | |
1433 | tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo); | |
1434 | tree vector_type; | |
1435 | ||
1436 | if (vectype) | |
1437 | vector_type = vectype; | |
2568d8a1 | 1438 | else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op)) |
c83a894c AH |
1439 | && VECTOR_BOOLEAN_TYPE_P (stmt_vectype)) |
1440 | vector_type = build_same_sized_truth_vector_type (stmt_vectype); | |
1441 | else | |
1442 | vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); | |
1443 | ||
1444 | gcc_assert (vector_type); | |
1445 | return vect_init_vector (stmt, op, vector_type, NULL); | |
1446 | } | |
1447 | else | |
1448 | return vect_get_vec_def_for_operand_1 (def_stmt, dt); | |
1449 | } | |
1450 | ||
1451 | ||
ebfd146a IR |
1452 | /* Function vect_get_vec_def_for_stmt_copy |
1453 | ||
ff802fa1 | 1454 | Return a vector-def for an operand. This function is used when the |
b8698a0f L |
1455 | vectorized stmt to be created (by the caller to this function) is a "copy" |
1456 | created in case the vectorized result cannot fit in one vector, and several | |
ff802fa1 | 1457 | copies of the vector-stmt are required. In this case the vector-def is |
ebfd146a | 1458 | retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field |
b8698a0f | 1459 | of the stmt that defines VEC_OPRND. |
ebfd146a IR |
1460 | DT is the type of the vector def VEC_OPRND. |
1461 | ||
1462 | Context: | |
1463 | In case the vectorization factor (VF) is bigger than the number | |
1464 | of elements that can fit in a vectype (nunits), we have to generate | |
ff802fa1 | 1465 | more than one vector stmt to vectorize the scalar stmt. This situation |
b8698a0f | 1466 | arises when there are multiple data-types operated upon in the loop; the |
ebfd146a IR |
1467 | smallest data-type determines the VF, and as a result, when vectorizing |
1468 | stmts operating on wider types we need to create 'VF/nunits' "copies" of the | |
1469 | vector stmt (each computing a vector of 'nunits' results, and together | |
b8698a0f | 1470 | computing 'VF' results in each iteration). This function is called when |
ebfd146a IR |
1471 | vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in |
1472 | which VF=16 and nunits=4, so the number of copies required is 4): | |
1473 | ||
1474 | scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT | |
b8698a0f | 1475 | |
ebfd146a IR |
1476 | S1: x = load VS1.0: vx.0 = memref0 VS1.1 |
1477 | VS1.1: vx.1 = memref1 VS1.2 | |
1478 | VS1.2: vx.2 = memref2 VS1.3 | |
b8698a0f | 1479 | VS1.3: vx.3 = memref3 |
ebfd146a IR |
1480 | |
1481 | S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1 | |
1482 | VSnew.1: vz1 = vx.1 + ... VSnew.2 | |
1483 | VSnew.2: vz2 = vx.2 + ... VSnew.3 | |
1484 | VSnew.3: vz3 = vx.3 + ... | |
1485 | ||
1486 | The vectorization of S1 is explained in vectorizable_load. | |
1487 | The vectorization of S2: | |
b8698a0f L |
1488 | To create the first vector-stmt out of the 4 copies - VSnew.0 - |
1489 | the function 'vect_get_vec_def_for_operand' is called to | |
ff802fa1 | 1490 | get the relevant vector-def for each operand of S2. For operand x it |
ebfd146a IR |
1491 | returns the vector-def 'vx.0'. |
1492 | ||
b8698a0f L |
1493 | To create the remaining copies of the vector-stmt (VSnew.j), this |
1494 | function is called to get the relevant vector-def for each operand. It is | |
1495 | obtained from the respective VS1.j stmt, which is recorded in the | |
ebfd146a IR |
1496 | STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND. |
1497 | ||
b8698a0f L |
1498 | For example, to obtain the vector-def 'vx.1' in order to create the |
1499 | vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'. | |
1500 | Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the | |
ebfd146a IR |
1501 | STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1', |
1502 | and return its def ('vx.1'). | |
1503 | Overall, to create the above sequence this function will be called 3 times: | |
1504 | vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0); | |
1505 | vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1); | |
1506 | vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */ | |
1507 | ||
1508 | tree | |
1509 | vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd) | |
1510 | { | |
355fe088 | 1511 | gimple *vec_stmt_for_operand; |
ebfd146a IR |
1512 | stmt_vec_info def_stmt_info; |
1513 | ||
1514 | /* Do nothing; can reuse same def. */ | |
8644a673 | 1515 | if (dt == vect_external_def || dt == vect_constant_def ) |
ebfd146a IR |
1516 | return vec_oprnd; |
1517 | ||
1518 | vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd); | |
1519 | def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand); | |
1520 | gcc_assert (def_stmt_info); | |
1521 | vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info); | |
1522 | gcc_assert (vec_stmt_for_operand); | |
ebfd146a IR |
1523 | if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI) |
1524 | vec_oprnd = PHI_RESULT (vec_stmt_for_operand); | |
1525 | else | |
1526 | vec_oprnd = gimple_get_lhs (vec_stmt_for_operand); | |
1527 | return vec_oprnd; | |
1528 | } | |
1529 | ||
1530 | ||
1531 | /* Get vectorized definitions for the operands to create a copy of an original | |
ff802fa1 | 1532 | stmt. See vect_get_vec_def_for_stmt_copy () for details. */ |
ebfd146a | 1533 | |
c78e3652 | 1534 | void |
b8698a0f | 1535 | vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt, |
9771b263 DN |
1536 | vec<tree> *vec_oprnds0, |
1537 | vec<tree> *vec_oprnds1) | |
ebfd146a | 1538 | { |
9771b263 | 1539 | tree vec_oprnd = vec_oprnds0->pop (); |
ebfd146a IR |
1540 | |
1541 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd); | |
9771b263 | 1542 | vec_oprnds0->quick_push (vec_oprnd); |
ebfd146a | 1543 | |
9771b263 | 1544 | if (vec_oprnds1 && vec_oprnds1->length ()) |
ebfd146a | 1545 | { |
9771b263 | 1546 | vec_oprnd = vec_oprnds1->pop (); |
ebfd146a | 1547 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd); |
9771b263 | 1548 | vec_oprnds1->quick_push (vec_oprnd); |
ebfd146a IR |
1549 | } |
1550 | } | |
1551 | ||
1552 | ||
c78e3652 | 1553 | /* Get vectorized definitions for OP0 and OP1. */ |
ebfd146a | 1554 | |
c78e3652 | 1555 | void |
355fe088 | 1556 | vect_get_vec_defs (tree op0, tree op1, gimple *stmt, |
9771b263 DN |
1557 | vec<tree> *vec_oprnds0, |
1558 | vec<tree> *vec_oprnds1, | |
306b0c92 | 1559 | slp_tree slp_node) |
ebfd146a IR |
1560 | { |
1561 | if (slp_node) | |
d092494c IR |
1562 | { |
1563 | int nops = (op1 == NULL_TREE) ? 1 : 2; | |
ef062b13 TS |
1564 | auto_vec<tree> ops (nops); |
1565 | auto_vec<vec<tree> > vec_defs (nops); | |
d092494c | 1566 | |
9771b263 | 1567 | ops.quick_push (op0); |
d092494c | 1568 | if (op1) |
9771b263 | 1569 | ops.quick_push (op1); |
d092494c | 1570 | |
306b0c92 | 1571 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
d092494c | 1572 | |
37b5ec8f | 1573 | *vec_oprnds0 = vec_defs[0]; |
d092494c | 1574 | if (op1) |
37b5ec8f | 1575 | *vec_oprnds1 = vec_defs[1]; |
d092494c | 1576 | } |
ebfd146a IR |
1577 | else |
1578 | { | |
1579 | tree vec_oprnd; | |
1580 | ||
9771b263 | 1581 | vec_oprnds0->create (1); |
81c40241 | 1582 | vec_oprnd = vect_get_vec_def_for_operand (op0, stmt); |
9771b263 | 1583 | vec_oprnds0->quick_push (vec_oprnd); |
ebfd146a IR |
1584 | |
1585 | if (op1) | |
1586 | { | |
9771b263 | 1587 | vec_oprnds1->create (1); |
81c40241 | 1588 | vec_oprnd = vect_get_vec_def_for_operand (op1, stmt); |
9771b263 | 1589 | vec_oprnds1->quick_push (vec_oprnd); |
ebfd146a IR |
1590 | } |
1591 | } | |
1592 | } | |
1593 | ||
1594 | ||
1595 | /* Function vect_finish_stmt_generation. | |
1596 | ||
1597 | Insert a new stmt. */ | |
1598 | ||
1599 | void | |
355fe088 | 1600 | vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt, |
ebfd146a IR |
1601 | gimple_stmt_iterator *gsi) |
1602 | { | |
1603 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
310213d4 | 1604 | vec_info *vinfo = stmt_info->vinfo; |
ebfd146a IR |
1605 | |
1606 | gcc_assert (gimple_code (stmt) != GIMPLE_LABEL); | |
1607 | ||
54e8e2c3 RG |
1608 | if (!gsi_end_p (*gsi) |
1609 | && gimple_has_mem_ops (vec_stmt)) | |
1610 | { | |
355fe088 | 1611 | gimple *at_stmt = gsi_stmt (*gsi); |
54e8e2c3 RG |
1612 | tree vuse = gimple_vuse (at_stmt); |
1613 | if (vuse && TREE_CODE (vuse) == SSA_NAME) | |
1614 | { | |
1615 | tree vdef = gimple_vdef (at_stmt); | |
1616 | gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt)); | |
1617 | /* If we have an SSA vuse and insert a store, update virtual | |
1618 | SSA form to avoid triggering the renamer. Do so only | |
1619 | if we can easily see all uses - which is what almost always | |
1620 | happens with the way vectorized stmts are inserted. */ | |
1621 | if ((vdef && TREE_CODE (vdef) == SSA_NAME) | |
1622 | && ((is_gimple_assign (vec_stmt) | |
1623 | && !is_gimple_reg (gimple_assign_lhs (vec_stmt))) | |
1624 | || (is_gimple_call (vec_stmt) | |
1625 | && !(gimple_call_flags (vec_stmt) | |
1626 | & (ECF_CONST|ECF_PURE|ECF_NOVOPS))))) | |
1627 | { | |
1628 | tree new_vdef = copy_ssa_name (vuse, vec_stmt); | |
1629 | gimple_set_vdef (vec_stmt, new_vdef); | |
1630 | SET_USE (gimple_vuse_op (at_stmt), new_vdef); | |
1631 | } | |
1632 | } | |
1633 | } | |
ebfd146a IR |
1634 | gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT); |
1635 | ||
310213d4 | 1636 | set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo)); |
ebfd146a | 1637 | |
73fbfcad | 1638 | if (dump_enabled_p ()) |
ebfd146a | 1639 | { |
78c60e3d SS |
1640 | dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: "); |
1641 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0); | |
ebfd146a IR |
1642 | } |
1643 | ||
ad885386 | 1644 | gimple_set_location (vec_stmt, gimple_location (stmt)); |
8e91d222 JJ |
1645 | |
1646 | /* While EH edges will generally prevent vectorization, stmt might | |
1647 | e.g. be in a must-not-throw region. Ensure newly created stmts | |
1648 | that could throw are part of the same region. */ | |
1649 | int lp_nr = lookup_stmt_eh_lp (stmt); | |
1650 | if (lp_nr != 0 && stmt_could_throw_p (vec_stmt)) | |
1651 | add_stmt_to_eh_lp (vec_stmt, lp_nr); | |
ebfd146a IR |
1652 | } |
1653 | ||
70439f0d RS |
1654 | /* We want to vectorize a call to combined function CFN with function |
1655 | decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN | |
1656 | as the types of all inputs. Check whether this is possible using | |
1657 | an internal function, returning its code if so or IFN_LAST if not. */ | |
ebfd146a | 1658 | |
70439f0d RS |
1659 | static internal_fn |
1660 | vectorizable_internal_function (combined_fn cfn, tree fndecl, | |
1661 | tree vectype_out, tree vectype_in) | |
ebfd146a | 1662 | { |
70439f0d RS |
1663 | internal_fn ifn; |
1664 | if (internal_fn_p (cfn)) | |
1665 | ifn = as_internal_fn (cfn); | |
1666 | else | |
1667 | ifn = associated_internal_fn (fndecl); | |
1668 | if (ifn != IFN_LAST && direct_internal_fn_p (ifn)) | |
1669 | { | |
1670 | const direct_internal_fn_info &info = direct_internal_fn (ifn); | |
1671 | if (info.vectorizable) | |
1672 | { | |
1673 | tree type0 = (info.type0 < 0 ? vectype_out : vectype_in); | |
1674 | tree type1 = (info.type1 < 0 ? vectype_out : vectype_in); | |
d95ab70a RS |
1675 | if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1), |
1676 | OPTIMIZE_FOR_SPEED)) | |
70439f0d RS |
1677 | return ifn; |
1678 | } | |
1679 | } | |
1680 | return IFN_LAST; | |
ebfd146a IR |
1681 | } |
1682 | ||
5ce9450f | 1683 | |
355fe088 | 1684 | static tree permute_vec_elements (tree, tree, tree, gimple *, |
5ce9450f JJ |
1685 | gimple_stmt_iterator *); |
1686 | ||
62da9e14 RS |
1687 | /* STMT is a non-strided load or store, meaning that it accesses |
1688 | elements with a known constant step. Return -1 if that step | |
1689 | is negative, 0 if it is zero, and 1 if it is greater than zero. */ | |
1690 | ||
1691 | static int | |
1692 | compare_step_with_zero (gimple *stmt) | |
1693 | { | |
1694 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1695 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
1696 | tree step; | |
1697 | if (loop_vinfo && nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt)) | |
1698 | step = STMT_VINFO_DR_STEP (stmt_info); | |
1699 | else | |
1700 | step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info)); | |
1701 | return tree_int_cst_compare (step, size_zero_node); | |
1702 | } | |
1703 | ||
1704 | /* If the target supports a permute mask that reverses the elements in | |
1705 | a vector of type VECTYPE, return that mask, otherwise return null. */ | |
1706 | ||
1707 | static tree | |
1708 | perm_mask_for_reverse (tree vectype) | |
1709 | { | |
1710 | int i, nunits; | |
1711 | unsigned char *sel; | |
1712 | ||
1713 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
1714 | sel = XALLOCAVEC (unsigned char, nunits); | |
1715 | ||
1716 | for (i = 0; i < nunits; ++i) | |
1717 | sel[i] = nunits - 1 - i; | |
1718 | ||
1719 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
1720 | return NULL_TREE; | |
1721 | return vect_gen_perm_mask_checked (vectype, sel); | |
1722 | } | |
5ce9450f | 1723 | |
2de001ee RS |
1724 | /* A subroutine of get_load_store_type, with a subset of the same |
1725 | arguments. Handle the case where STMT is part of a grouped load | |
1726 | or store. | |
1727 | ||
1728 | For stores, the statements in the group are all consecutive | |
1729 | and there is no gap at the end. For loads, the statements in the | |
1730 | group might not be consecutive; there can be gaps between statements | |
1731 | as well as at the end. */ | |
1732 | ||
1733 | static bool | |
1734 | get_group_load_store_type (gimple *stmt, tree vectype, bool slp, | |
1735 | vec_load_store_type vls_type, | |
1736 | vect_memory_access_type *memory_access_type) | |
1737 | { | |
1738 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1739 | vec_info *vinfo = stmt_info->vinfo; | |
1740 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
1741 | struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; | |
1742 | gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info); | |
1743 | unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); | |
1744 | bool single_element_p = (stmt == first_stmt | |
1745 | && !GROUP_NEXT_ELEMENT (stmt_info)); | |
1746 | unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt)); | |
522fcdd7 | 1747 | unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype); |
2de001ee RS |
1748 | |
1749 | /* True if the vectorized statements would access beyond the last | |
1750 | statement in the group. */ | |
1751 | bool overrun_p = false; | |
1752 | ||
1753 | /* True if we can cope with such overrun by peeling for gaps, so that | |
1754 | there is at least one final scalar iteration after the vector loop. */ | |
1755 | bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner); | |
1756 | ||
1757 | /* There can only be a gap at the end of the group if the stride is | |
1758 | known at compile time. */ | |
1759 | gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0); | |
1760 | ||
1761 | /* Stores can't yet have gaps. */ | |
1762 | gcc_assert (slp || vls_type == VLS_LOAD || gap == 0); | |
1763 | ||
1764 | if (slp) | |
1765 | { | |
1766 | if (STMT_VINFO_STRIDED_P (stmt_info)) | |
1767 | { | |
1768 | /* Try to use consecutive accesses of GROUP_SIZE elements, | |
1769 | separated by the stride, until we have a complete vector. | |
1770 | Fall back to scalar accesses if that isn't possible. */ | |
1771 | if (nunits % group_size == 0) | |
1772 | *memory_access_type = VMAT_STRIDED_SLP; | |
1773 | else | |
1774 | *memory_access_type = VMAT_ELEMENTWISE; | |
1775 | } | |
1776 | else | |
1777 | { | |
1778 | overrun_p = loop_vinfo && gap != 0; | |
1779 | if (overrun_p && vls_type != VLS_LOAD) | |
1780 | { | |
1781 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1782 | "Grouped store with gaps requires" | |
1783 | " non-consecutive accesses\n"); | |
1784 | return false; | |
1785 | } | |
f9ef2c76 RB |
1786 | /* If the access is aligned an overrun is fine. */ |
1787 | if (overrun_p | |
1788 | && aligned_access_p | |
1789 | (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))) | |
1790 | overrun_p = false; | |
2de001ee RS |
1791 | if (overrun_p && !can_overrun_p) |
1792 | { | |
1793 | if (dump_enabled_p ()) | |
1794 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1795 | "Peeling for outer loop is not supported\n"); | |
1796 | return false; | |
1797 | } | |
1798 | *memory_access_type = VMAT_CONTIGUOUS; | |
1799 | } | |
1800 | } | |
1801 | else | |
1802 | { | |
1803 | /* We can always handle this case using elementwise accesses, | |
1804 | but see if something more efficient is available. */ | |
1805 | *memory_access_type = VMAT_ELEMENTWISE; | |
1806 | ||
1807 | /* If there is a gap at the end of the group then these optimizations | |
1808 | would access excess elements in the last iteration. */ | |
1809 | bool would_overrun_p = (gap != 0); | |
522fcdd7 RB |
1810 | /* If the access is aligned an overrun is fine, but only if the |
1811 | overrun is not inside an unused vector (if the gap is as large | |
1812 | or larger than a vector). */ | |
f9ef2c76 | 1813 | if (would_overrun_p |
522fcdd7 RB |
1814 | && gap < nunits |
1815 | && aligned_access_p | |
1816 | (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))) | |
f9ef2c76 | 1817 | would_overrun_p = false; |
2de001ee | 1818 | if (!STMT_VINFO_STRIDED_P (stmt_info) |
62da9e14 RS |
1819 | && (can_overrun_p || !would_overrun_p) |
1820 | && compare_step_with_zero (stmt) > 0) | |
2de001ee RS |
1821 | { |
1822 | /* First try using LOAD/STORE_LANES. */ | |
1823 | if (vls_type == VLS_LOAD | |
1824 | ? vect_load_lanes_supported (vectype, group_size) | |
1825 | : vect_store_lanes_supported (vectype, group_size)) | |
1826 | { | |
1827 | *memory_access_type = VMAT_LOAD_STORE_LANES; | |
1828 | overrun_p = would_overrun_p; | |
1829 | } | |
1830 | ||
1831 | /* If that fails, try using permuting loads. */ | |
1832 | if (*memory_access_type == VMAT_ELEMENTWISE | |
1833 | && (vls_type == VLS_LOAD | |
1834 | ? vect_grouped_load_supported (vectype, single_element_p, | |
1835 | group_size) | |
1836 | : vect_grouped_store_supported (vectype, group_size))) | |
1837 | { | |
1838 | *memory_access_type = VMAT_CONTIGUOUS_PERMUTE; | |
1839 | overrun_p = would_overrun_p; | |
1840 | } | |
1841 | } | |
1842 | } | |
1843 | ||
1844 | if (vls_type != VLS_LOAD && first_stmt == stmt) | |
1845 | { | |
1846 | /* STMT is the leader of the group. Check the operands of all the | |
1847 | stmts of the group. */ | |
1848 | gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info); | |
1849 | while (next_stmt) | |
1850 | { | |
1851 | gcc_assert (gimple_assign_single_p (next_stmt)); | |
1852 | tree op = gimple_assign_rhs1 (next_stmt); | |
1853 | gimple *def_stmt; | |
1854 | enum vect_def_type dt; | |
1855 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt)) | |
1856 | { | |
1857 | if (dump_enabled_p ()) | |
1858 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1859 | "use not simple.\n"); | |
1860 | return false; | |
1861 | } | |
1862 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); | |
1863 | } | |
1864 | } | |
1865 | ||
1866 | if (overrun_p) | |
1867 | { | |
1868 | gcc_assert (can_overrun_p); | |
1869 | if (dump_enabled_p ()) | |
1870 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1871 | "Data access with gaps requires scalar " | |
1872 | "epilogue loop\n"); | |
1873 | LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; | |
1874 | } | |
1875 | ||
1876 | return true; | |
1877 | } | |
1878 | ||
62da9e14 RS |
1879 | /* A subroutine of get_load_store_type, with a subset of the same |
1880 | arguments. Handle the case where STMT is a load or store that | |
1881 | accesses consecutive elements with a negative step. */ | |
1882 | ||
1883 | static vect_memory_access_type | |
1884 | get_negative_load_store_type (gimple *stmt, tree vectype, | |
1885 | vec_load_store_type vls_type, | |
1886 | unsigned int ncopies) | |
1887 | { | |
1888 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1889 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
1890 | dr_alignment_support alignment_support_scheme; | |
1891 | ||
1892 | if (ncopies > 1) | |
1893 | { | |
1894 | if (dump_enabled_p ()) | |
1895 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1896 | "multiple types with negative step.\n"); | |
1897 | return VMAT_ELEMENTWISE; | |
1898 | } | |
1899 | ||
1900 | alignment_support_scheme = vect_supportable_dr_alignment (dr, false); | |
1901 | if (alignment_support_scheme != dr_aligned | |
1902 | && alignment_support_scheme != dr_unaligned_supported) | |
1903 | { | |
1904 | if (dump_enabled_p ()) | |
1905 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1906 | "negative step but alignment required.\n"); | |
1907 | return VMAT_ELEMENTWISE; | |
1908 | } | |
1909 | ||
1910 | if (vls_type == VLS_STORE_INVARIANT) | |
1911 | { | |
1912 | if (dump_enabled_p ()) | |
1913 | dump_printf_loc (MSG_NOTE, vect_location, | |
1914 | "negative step with invariant source;" | |
1915 | " no permute needed.\n"); | |
1916 | return VMAT_CONTIGUOUS_DOWN; | |
1917 | } | |
1918 | ||
1919 | if (!perm_mask_for_reverse (vectype)) | |
1920 | { | |
1921 | if (dump_enabled_p ()) | |
1922 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1923 | "negative step and reversing not supported.\n"); | |
1924 | return VMAT_ELEMENTWISE; | |
1925 | } | |
1926 | ||
1927 | return VMAT_CONTIGUOUS_REVERSE; | |
1928 | } | |
1929 | ||
2de001ee RS |
1930 | /* Analyze load or store statement STMT of type VLS_TYPE. Return true |
1931 | if there is a memory access type that the vectorized form can use, | |
1932 | storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers | |
1933 | or scatters, fill in GS_INFO accordingly. | |
1934 | ||
1935 | SLP says whether we're performing SLP rather than loop vectorization. | |
62da9e14 RS |
1936 | VECTYPE is the vector type that the vectorized statements will use. |
1937 | NCOPIES is the number of vector statements that will be needed. */ | |
2de001ee RS |
1938 | |
1939 | static bool | |
1940 | get_load_store_type (gimple *stmt, tree vectype, bool slp, | |
62da9e14 | 1941 | vec_load_store_type vls_type, unsigned int ncopies, |
2de001ee RS |
1942 | vect_memory_access_type *memory_access_type, |
1943 | gather_scatter_info *gs_info) | |
1944 | { | |
1945 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1946 | vec_info *vinfo = stmt_info->vinfo; | |
1947 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
1948 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
1949 | { | |
1950 | *memory_access_type = VMAT_GATHER_SCATTER; | |
1951 | gimple *def_stmt; | |
1952 | if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info)) | |
1953 | gcc_unreachable (); | |
1954 | else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt, | |
1955 | &gs_info->offset_dt, | |
1956 | &gs_info->offset_vectype)) | |
1957 | { | |
1958 | if (dump_enabled_p ()) | |
1959 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1960 | "%s index use not simple.\n", | |
1961 | vls_type == VLS_LOAD ? "gather" : "scatter"); | |
1962 | return false; | |
1963 | } | |
1964 | } | |
1965 | else if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) | |
1966 | { | |
1967 | if (!get_group_load_store_type (stmt, vectype, slp, vls_type, | |
1968 | memory_access_type)) | |
1969 | return false; | |
1970 | } | |
1971 | else if (STMT_VINFO_STRIDED_P (stmt_info)) | |
1972 | { | |
1973 | gcc_assert (!slp); | |
1974 | *memory_access_type = VMAT_ELEMENTWISE; | |
1975 | } | |
1976 | else | |
62da9e14 RS |
1977 | { |
1978 | int cmp = compare_step_with_zero (stmt); | |
1979 | if (cmp < 0) | |
1980 | *memory_access_type = get_negative_load_store_type | |
1981 | (stmt, vectype, vls_type, ncopies); | |
1982 | else if (cmp == 0) | |
1983 | { | |
1984 | gcc_assert (vls_type == VLS_LOAD); | |
1985 | *memory_access_type = VMAT_INVARIANT; | |
1986 | } | |
1987 | else | |
1988 | *memory_access_type = VMAT_CONTIGUOUS; | |
1989 | } | |
2de001ee RS |
1990 | |
1991 | /* FIXME: At the moment the cost model seems to underestimate the | |
1992 | cost of using elementwise accesses. This check preserves the | |
1993 | traditional behavior until that can be fixed. */ | |
1994 | if (*memory_access_type == VMAT_ELEMENTWISE | |
1995 | && !STMT_VINFO_STRIDED_P (stmt_info)) | |
1996 | { | |
1997 | if (dump_enabled_p ()) | |
1998 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1999 | "not falling back to elementwise accesses\n"); | |
2000 | return false; | |
2001 | } | |
2002 | return true; | |
2003 | } | |
2004 | ||
5ce9450f JJ |
2005 | /* Function vectorizable_mask_load_store. |
2006 | ||
2007 | Check if STMT performs a conditional load or store that can be vectorized. | |
2008 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
2009 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
2010 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
2011 | ||
2012 | static bool | |
355fe088 TS |
2013 | vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi, |
2014 | gimple **vec_stmt, slp_tree slp_node) | |
5ce9450f JJ |
2015 | { |
2016 | tree vec_dest = NULL; | |
2017 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
2018 | stmt_vec_info prev_stmt_info; | |
2019 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
2020 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2021 | bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); | |
2022 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
2023 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
57e2f6ad | 2024 | tree rhs_vectype = NULL_TREE; |
045c1278 | 2025 | tree mask_vectype; |
5ce9450f | 2026 | tree elem_type; |
355fe088 | 2027 | gimple *new_stmt; |
5ce9450f JJ |
2028 | tree dummy; |
2029 | tree dataref_ptr = NULL_TREE; | |
355fe088 | 2030 | gimple *ptr_incr; |
5ce9450f JJ |
2031 | int nunits = TYPE_VECTOR_SUBPARTS (vectype); |
2032 | int ncopies; | |
2033 | int i, j; | |
2034 | bool inv_p; | |
134c85ca | 2035 | gather_scatter_info gs_info; |
2de001ee | 2036 | vec_load_store_type vls_type; |
5ce9450f | 2037 | tree mask; |
355fe088 | 2038 | gimple *def_stmt; |
5ce9450f JJ |
2039 | enum vect_def_type dt; |
2040 | ||
2041 | if (slp_node != NULL) | |
2042 | return false; | |
2043 | ||
2044 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
2045 | gcc_assert (ncopies >= 1); | |
2046 | ||
5ce9450f | 2047 | mask = gimple_call_arg (stmt, 2); |
045c1278 | 2048 | |
2568d8a1 | 2049 | if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask))) |
5ce9450f JJ |
2050 | return false; |
2051 | ||
2052 | /* FORNOW. This restriction should be relaxed. */ | |
2053 | if (nested_in_vect_loop && ncopies > 1) | |
2054 | { | |
2055 | if (dump_enabled_p ()) | |
2056 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2057 | "multiple types in nested loop."); | |
2058 | return false; | |
2059 | } | |
2060 | ||
2061 | if (!STMT_VINFO_RELEVANT_P (stmt_info)) | |
2062 | return false; | |
2063 | ||
66c16fd9 RB |
2064 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
2065 | && ! vec_stmt) | |
5ce9450f JJ |
2066 | return false; |
2067 | ||
2068 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
2069 | return false; | |
2070 | ||
2071 | elem_type = TREE_TYPE (vectype); | |
2072 | ||
045c1278 IE |
2073 | if (TREE_CODE (mask) != SSA_NAME) |
2074 | return false; | |
2075 | ||
2076 | if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype)) | |
2077 | return false; | |
2078 | ||
2079 | if (!mask_vectype) | |
2080 | mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype)); | |
2081 | ||
dc6a3147 IE |
2082 | if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype) |
2083 | || TYPE_VECTOR_SUBPARTS (mask_vectype) != TYPE_VECTOR_SUBPARTS (vectype)) | |
045c1278 IE |
2084 | return false; |
2085 | ||
2de001ee | 2086 | if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE) |
57e2f6ad IE |
2087 | { |
2088 | tree rhs = gimple_call_arg (stmt, 3); | |
2089 | if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype)) | |
2090 | return false; | |
2de001ee RS |
2091 | if (dt == vect_constant_def || dt == vect_external_def) |
2092 | vls_type = VLS_STORE_INVARIANT; | |
2093 | else | |
2094 | vls_type = VLS_STORE; | |
57e2f6ad | 2095 | } |
2de001ee RS |
2096 | else |
2097 | vls_type = VLS_LOAD; | |
57e2f6ad | 2098 | |
2de001ee | 2099 | vect_memory_access_type memory_access_type; |
62da9e14 | 2100 | if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies, |
2de001ee RS |
2101 | &memory_access_type, &gs_info)) |
2102 | return false; | |
03b9e8e4 | 2103 | |
2de001ee RS |
2104 | if (memory_access_type == VMAT_GATHER_SCATTER) |
2105 | { | |
134c85ca | 2106 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
03b9e8e4 JJ |
2107 | tree masktype |
2108 | = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))); | |
2109 | if (TREE_CODE (masktype) == INTEGER_TYPE) | |
2110 | { | |
2111 | if (dump_enabled_p ()) | |
2112 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2113 | "masked gather with integer mask not supported."); | |
2114 | return false; | |
2115 | } | |
5ce9450f | 2116 | } |
2de001ee RS |
2117 | else if (memory_access_type != VMAT_CONTIGUOUS) |
2118 | { | |
2119 | if (dump_enabled_p ()) | |
2120 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2121 | "unsupported access type for masked %s.\n", | |
2122 | vls_type == VLS_LOAD ? "load" : "store"); | |
2123 | return false; | |
2124 | } | |
5ce9450f | 2125 | else if (!VECTOR_MODE_P (TYPE_MODE (vectype)) |
045c1278 IE |
2126 | || !can_vec_mask_load_store_p (TYPE_MODE (vectype), |
2127 | TYPE_MODE (mask_vectype), | |
2de001ee | 2128 | vls_type == VLS_LOAD) |
57e2f6ad IE |
2129 | || (rhs_vectype |
2130 | && !useless_type_conversion_p (vectype, rhs_vectype))) | |
5ce9450f JJ |
2131 | return false; |
2132 | ||
5ce9450f JJ |
2133 | if (!vec_stmt) /* transformation not required. */ |
2134 | { | |
2de001ee | 2135 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; |
5ce9450f | 2136 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; |
2de001ee RS |
2137 | if (vls_type == VLS_LOAD) |
2138 | vect_model_load_cost (stmt_info, ncopies, memory_access_type, | |
2139 | NULL, NULL, NULL); | |
5ce9450f | 2140 | else |
2de001ee RS |
2141 | vect_model_store_cost (stmt_info, ncopies, memory_access_type, |
2142 | dt, NULL, NULL, NULL); | |
5ce9450f JJ |
2143 | return true; |
2144 | } | |
2de001ee | 2145 | gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); |
5ce9450f | 2146 | |
67b8dbac | 2147 | /* Transform. */ |
5ce9450f | 2148 | |
2de001ee | 2149 | if (memory_access_type == VMAT_GATHER_SCATTER) |
5ce9450f JJ |
2150 | { |
2151 | tree vec_oprnd0 = NULL_TREE, op; | |
134c85ca | 2152 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
5ce9450f | 2153 | tree rettype, srctype, ptrtype, idxtype, masktype, scaletype; |
acdcd61b | 2154 | tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale; |
5ce9450f | 2155 | tree perm_mask = NULL_TREE, prev_res = NULL_TREE; |
acdcd61b | 2156 | tree mask_perm_mask = NULL_TREE; |
5ce9450f JJ |
2157 | edge pe = loop_preheader_edge (loop); |
2158 | gimple_seq seq; | |
2159 | basic_block new_bb; | |
2160 | enum { NARROW, NONE, WIDEN } modifier; | |
134c85ca | 2161 | int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); |
5ce9450f | 2162 | |
134c85ca | 2163 | rettype = TREE_TYPE (TREE_TYPE (gs_info.decl)); |
acdcd61b JJ |
2164 | srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); |
2165 | ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2166 | idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2167 | masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2168 | scaletype = TREE_VALUE (arglist); | |
2169 | gcc_checking_assert (types_compatible_p (srctype, rettype) | |
2170 | && types_compatible_p (srctype, masktype)); | |
2171 | ||
5ce9450f JJ |
2172 | if (nunits == gather_off_nunits) |
2173 | modifier = NONE; | |
2174 | else if (nunits == gather_off_nunits / 2) | |
2175 | { | |
2176 | unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits); | |
2177 | modifier = WIDEN; | |
2178 | ||
2179 | for (i = 0; i < gather_off_nunits; ++i) | |
2180 | sel[i] = i | nunits; | |
2181 | ||
134c85ca | 2182 | perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel); |
5ce9450f JJ |
2183 | } |
2184 | else if (nunits == gather_off_nunits * 2) | |
2185 | { | |
2186 | unsigned char *sel = XALLOCAVEC (unsigned char, nunits); | |
2187 | modifier = NARROW; | |
2188 | ||
2189 | for (i = 0; i < nunits; ++i) | |
2190 | sel[i] = i < gather_off_nunits | |
2191 | ? i : i + nunits - gather_off_nunits; | |
2192 | ||
557be5a8 | 2193 | perm_mask = vect_gen_perm_mask_checked (vectype, sel); |
5ce9450f | 2194 | ncopies *= 2; |
acdcd61b JJ |
2195 | for (i = 0; i < nunits; ++i) |
2196 | sel[i] = i | gather_off_nunits; | |
557be5a8 | 2197 | mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel); |
5ce9450f JJ |
2198 | } |
2199 | else | |
2200 | gcc_unreachable (); | |
2201 | ||
5ce9450f JJ |
2202 | vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype); |
2203 | ||
134c85ca | 2204 | ptr = fold_convert (ptrtype, gs_info.base); |
5ce9450f JJ |
2205 | if (!is_gimple_min_invariant (ptr)) |
2206 | { | |
2207 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
2208 | new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
2209 | gcc_assert (!new_bb); | |
2210 | } | |
2211 | ||
134c85ca | 2212 | scale = build_int_cst (scaletype, gs_info.scale); |
5ce9450f JJ |
2213 | |
2214 | prev_stmt_info = NULL; | |
2215 | for (j = 0; j < ncopies; ++j) | |
2216 | { | |
2217 | if (modifier == WIDEN && (j & 1)) | |
2218 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, | |
2219 | perm_mask, stmt, gsi); | |
2220 | else if (j == 0) | |
2221 | op = vec_oprnd0 | |
134c85ca | 2222 | = vect_get_vec_def_for_operand (gs_info.offset, stmt); |
5ce9450f JJ |
2223 | else |
2224 | op = vec_oprnd0 | |
134c85ca | 2225 | = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0); |
5ce9450f JJ |
2226 | |
2227 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
2228 | { | |
2229 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)) | |
2230 | == TYPE_VECTOR_SUBPARTS (idxtype)); | |
0e22bb5a | 2231 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); |
5ce9450f JJ |
2232 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); |
2233 | new_stmt | |
0d0e4a03 | 2234 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
5ce9450f JJ |
2235 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2236 | op = var; | |
2237 | } | |
2238 | ||
acdcd61b JJ |
2239 | if (mask_perm_mask && (j & 1)) |
2240 | mask_op = permute_vec_elements (mask_op, mask_op, | |
2241 | mask_perm_mask, stmt, gsi); | |
5ce9450f JJ |
2242 | else |
2243 | { | |
acdcd61b | 2244 | if (j == 0) |
81c40241 | 2245 | vec_mask = vect_get_vec_def_for_operand (mask, stmt); |
acdcd61b JJ |
2246 | else |
2247 | { | |
81c40241 | 2248 | vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt); |
acdcd61b JJ |
2249 | vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask); |
2250 | } | |
5ce9450f | 2251 | |
acdcd61b JJ |
2252 | mask_op = vec_mask; |
2253 | if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask))) | |
2254 | { | |
2255 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)) | |
2256 | == TYPE_VECTOR_SUBPARTS (masktype)); | |
0e22bb5a | 2257 | var = vect_get_new_ssa_name (masktype, vect_simple_var); |
acdcd61b JJ |
2258 | mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op); |
2259 | new_stmt | |
0d0e4a03 | 2260 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op); |
acdcd61b JJ |
2261 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2262 | mask_op = var; | |
2263 | } | |
5ce9450f JJ |
2264 | } |
2265 | ||
2266 | new_stmt | |
134c85ca | 2267 | = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op, |
5ce9450f JJ |
2268 | scale); |
2269 | ||
2270 | if (!useless_type_conversion_p (vectype, rettype)) | |
2271 | { | |
2272 | gcc_assert (TYPE_VECTOR_SUBPARTS (vectype) | |
2273 | == TYPE_VECTOR_SUBPARTS (rettype)); | |
0e22bb5a | 2274 | op = vect_get_new_ssa_name (rettype, vect_simple_var); |
5ce9450f JJ |
2275 | gimple_call_set_lhs (new_stmt, op); |
2276 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
b731b390 | 2277 | var = make_ssa_name (vec_dest); |
5ce9450f | 2278 | op = build1 (VIEW_CONVERT_EXPR, vectype, op); |
0d0e4a03 | 2279 | new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
5ce9450f JJ |
2280 | } |
2281 | else | |
2282 | { | |
2283 | var = make_ssa_name (vec_dest, new_stmt); | |
2284 | gimple_call_set_lhs (new_stmt, var); | |
2285 | } | |
2286 | ||
2287 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2288 | ||
2289 | if (modifier == NARROW) | |
2290 | { | |
2291 | if ((j & 1) == 0) | |
2292 | { | |
2293 | prev_res = var; | |
2294 | continue; | |
2295 | } | |
2296 | var = permute_vec_elements (prev_res, var, | |
2297 | perm_mask, stmt, gsi); | |
2298 | new_stmt = SSA_NAME_DEF_STMT (var); | |
2299 | } | |
2300 | ||
2301 | if (prev_stmt_info == NULL) | |
2302 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
2303 | else | |
2304 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2305 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2306 | } | |
3efe2e2c JJ |
2307 | |
2308 | /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed | |
2309 | from the IL. */ | |
e6f5c25d IE |
2310 | if (STMT_VINFO_RELATED_STMT (stmt_info)) |
2311 | { | |
2312 | stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
2313 | stmt_info = vinfo_for_stmt (stmt); | |
2314 | } | |
3efe2e2c JJ |
2315 | tree lhs = gimple_call_lhs (stmt); |
2316 | new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); | |
2317 | set_vinfo_for_stmt (new_stmt, stmt_info); | |
2318 | set_vinfo_for_stmt (stmt, NULL); | |
2319 | STMT_VINFO_STMT (stmt_info) = new_stmt; | |
2320 | gsi_replace (gsi, new_stmt, true); | |
5ce9450f JJ |
2321 | return true; |
2322 | } | |
2de001ee | 2323 | else if (vls_type != VLS_LOAD) |
5ce9450f JJ |
2324 | { |
2325 | tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE; | |
2326 | prev_stmt_info = NULL; | |
2d4dc223 | 2327 | LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true; |
5ce9450f JJ |
2328 | for (i = 0; i < ncopies; i++) |
2329 | { | |
2330 | unsigned align, misalign; | |
2331 | ||
2332 | if (i == 0) | |
2333 | { | |
2334 | tree rhs = gimple_call_arg (stmt, 3); | |
81c40241 RB |
2335 | vec_rhs = vect_get_vec_def_for_operand (rhs, stmt); |
2336 | vec_mask = vect_get_vec_def_for_operand (mask, stmt); | |
5ce9450f JJ |
2337 | /* We should have catched mismatched types earlier. */ |
2338 | gcc_assert (useless_type_conversion_p (vectype, | |
2339 | TREE_TYPE (vec_rhs))); | |
2340 | dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL, | |
2341 | NULL_TREE, &dummy, gsi, | |
2342 | &ptr_incr, false, &inv_p); | |
2343 | gcc_assert (!inv_p); | |
2344 | } | |
2345 | else | |
2346 | { | |
81c40241 | 2347 | vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt); |
5ce9450f | 2348 | vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs); |
81c40241 | 2349 | vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt); |
5ce9450f JJ |
2350 | vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask); |
2351 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, | |
2352 | TYPE_SIZE_UNIT (vectype)); | |
2353 | } | |
2354 | ||
2355 | align = TYPE_ALIGN_UNIT (vectype); | |
2356 | if (aligned_access_p (dr)) | |
2357 | misalign = 0; | |
2358 | else if (DR_MISALIGNMENT (dr) == -1) | |
2359 | { | |
2360 | align = TYPE_ALIGN_UNIT (elem_type); | |
2361 | misalign = 0; | |
2362 | } | |
2363 | else | |
2364 | misalign = DR_MISALIGNMENT (dr); | |
2365 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, | |
2366 | misalign); | |
08554c26 | 2367 | tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), |
146ec50f | 2368 | misalign ? least_bit_hwi (misalign) : align); |
5ce9450f JJ |
2369 | new_stmt |
2370 | = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr, | |
08554c26 | 2371 | ptr, vec_mask, vec_rhs); |
5ce9450f JJ |
2372 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2373 | if (i == 0) | |
2374 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
2375 | else | |
2376 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2377 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2378 | } | |
2379 | } | |
2380 | else | |
2381 | { | |
2382 | tree vec_mask = NULL_TREE; | |
2383 | prev_stmt_info = NULL; | |
2384 | vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype); | |
2385 | for (i = 0; i < ncopies; i++) | |
2386 | { | |
2387 | unsigned align, misalign; | |
2388 | ||
2389 | if (i == 0) | |
2390 | { | |
81c40241 | 2391 | vec_mask = vect_get_vec_def_for_operand (mask, stmt); |
5ce9450f JJ |
2392 | dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL, |
2393 | NULL_TREE, &dummy, gsi, | |
2394 | &ptr_incr, false, &inv_p); | |
2395 | gcc_assert (!inv_p); | |
2396 | } | |
2397 | else | |
2398 | { | |
81c40241 | 2399 | vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt); |
5ce9450f JJ |
2400 | vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask); |
2401 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, | |
2402 | TYPE_SIZE_UNIT (vectype)); | |
2403 | } | |
2404 | ||
2405 | align = TYPE_ALIGN_UNIT (vectype); | |
2406 | if (aligned_access_p (dr)) | |
2407 | misalign = 0; | |
2408 | else if (DR_MISALIGNMENT (dr) == -1) | |
2409 | { | |
2410 | align = TYPE_ALIGN_UNIT (elem_type); | |
2411 | misalign = 0; | |
2412 | } | |
2413 | else | |
2414 | misalign = DR_MISALIGNMENT (dr); | |
2415 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, | |
2416 | misalign); | |
08554c26 | 2417 | tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), |
146ec50f | 2418 | misalign ? least_bit_hwi (misalign) : align); |
5ce9450f JJ |
2419 | new_stmt |
2420 | = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr, | |
08554c26 | 2421 | ptr, vec_mask); |
b731b390 | 2422 | gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest)); |
5ce9450f JJ |
2423 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2424 | if (i == 0) | |
2425 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
2426 | else | |
2427 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2428 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2429 | } | |
2430 | } | |
2431 | ||
2de001ee | 2432 | if (vls_type == VLS_LOAD) |
3efe2e2c JJ |
2433 | { |
2434 | /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed | |
2435 | from the IL. */ | |
e6f5c25d IE |
2436 | if (STMT_VINFO_RELATED_STMT (stmt_info)) |
2437 | { | |
2438 | stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
2439 | stmt_info = vinfo_for_stmt (stmt); | |
2440 | } | |
3efe2e2c JJ |
2441 | tree lhs = gimple_call_lhs (stmt); |
2442 | new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); | |
2443 | set_vinfo_for_stmt (new_stmt, stmt_info); | |
2444 | set_vinfo_for_stmt (stmt, NULL); | |
2445 | STMT_VINFO_STMT (stmt_info) = new_stmt; | |
2446 | gsi_replace (gsi, new_stmt, true); | |
2447 | } | |
2448 | ||
5ce9450f JJ |
2449 | return true; |
2450 | } | |
2451 | ||
37b14185 RB |
2452 | /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */ |
2453 | ||
2454 | static bool | |
2455 | vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi, | |
2456 | gimple **vec_stmt, slp_tree slp_node, | |
2457 | tree vectype_in, enum vect_def_type *dt) | |
2458 | { | |
2459 | tree op, vectype; | |
2460 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
2461 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
2462 | unsigned ncopies, nunits; | |
2463 | ||
2464 | op = gimple_call_arg (stmt, 0); | |
2465 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2466 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
2467 | ||
2468 | /* Multiple types in SLP are handled by creating the appropriate number of | |
2469 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
2470 | case of SLP. */ | |
2471 | if (slp_node) | |
2472 | ncopies = 1; | |
2473 | else | |
2474 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
2475 | ||
2476 | gcc_assert (ncopies >= 1); | |
2477 | ||
2478 | tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in); | |
2479 | if (! char_vectype) | |
2480 | return false; | |
2481 | ||
2482 | unsigned char *elts | |
2483 | = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (char_vectype)); | |
2484 | unsigned char *elt = elts; | |
2485 | unsigned word_bytes = TYPE_VECTOR_SUBPARTS (char_vectype) / nunits; | |
2486 | for (unsigned i = 0; i < nunits; ++i) | |
2487 | for (unsigned j = 0; j < word_bytes; ++j) | |
2488 | *elt++ = (i + 1) * word_bytes - j - 1; | |
2489 | ||
2490 | if (! can_vec_perm_p (TYPE_MODE (char_vectype), false, elts)) | |
2491 | return false; | |
2492 | ||
2493 | if (! vec_stmt) | |
2494 | { | |
2495 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; | |
2496 | if (dump_enabled_p ()) | |
2497 | dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ===" | |
2498 | "\n"); | |
2499 | if (! PURE_SLP_STMT (stmt_info)) | |
2500 | { | |
2501 | add_stmt_cost (stmt_info->vinfo->target_cost_data, | |
2502 | 1, vector_stmt, stmt_info, 0, vect_prologue); | |
2503 | add_stmt_cost (stmt_info->vinfo->target_cost_data, | |
2504 | ncopies, vec_perm, stmt_info, 0, vect_body); | |
2505 | } | |
2506 | return true; | |
2507 | } | |
2508 | ||
2509 | tree *telts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (char_vectype)); | |
2510 | for (unsigned i = 0; i < TYPE_VECTOR_SUBPARTS (char_vectype); ++i) | |
2511 | telts[i] = build_int_cst (char_type_node, elts[i]); | |
2512 | tree bswap_vconst = build_vector (char_vectype, telts); | |
2513 | ||
2514 | /* Transform. */ | |
2515 | vec<tree> vec_oprnds = vNULL; | |
2516 | gimple *new_stmt = NULL; | |
2517 | stmt_vec_info prev_stmt_info = NULL; | |
2518 | for (unsigned j = 0; j < ncopies; j++) | |
2519 | { | |
2520 | /* Handle uses. */ | |
2521 | if (j == 0) | |
306b0c92 | 2522 | vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node); |
37b14185 RB |
2523 | else |
2524 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL); | |
2525 | ||
2526 | /* Arguments are ready. create the new vector stmt. */ | |
2527 | unsigned i; | |
2528 | tree vop; | |
2529 | FOR_EACH_VEC_ELT (vec_oprnds, i, vop) | |
2530 | { | |
2531 | tree tem = make_ssa_name (char_vectype); | |
2532 | new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
2533 | char_vectype, vop)); | |
2534 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2535 | tree tem2 = make_ssa_name (char_vectype); | |
2536 | new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR, | |
2537 | tem, tem, bswap_vconst); | |
2538 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2539 | tem = make_ssa_name (vectype); | |
2540 | new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
2541 | vectype, tem2)); | |
2542 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2543 | if (slp_node) | |
2544 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); | |
2545 | } | |
2546 | ||
2547 | if (slp_node) | |
2548 | continue; | |
2549 | ||
2550 | if (j == 0) | |
2551 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
2552 | else | |
2553 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2554 | ||
2555 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2556 | } | |
2557 | ||
2558 | vec_oprnds.release (); | |
2559 | return true; | |
2560 | } | |
2561 | ||
b1b6836e RS |
2562 | /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have |
2563 | integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT | |
2564 | in a single step. On success, store the binary pack code in | |
2565 | *CONVERT_CODE. */ | |
2566 | ||
2567 | static bool | |
2568 | simple_integer_narrowing (tree vectype_out, tree vectype_in, | |
2569 | tree_code *convert_code) | |
2570 | { | |
2571 | if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out)) | |
2572 | || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in))) | |
2573 | return false; | |
2574 | ||
2575 | tree_code code; | |
2576 | int multi_step_cvt = 0; | |
2577 | auto_vec <tree, 8> interm_types; | |
2578 | if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in, | |
2579 | &code, &multi_step_cvt, | |
2580 | &interm_types) | |
2581 | || multi_step_cvt) | |
2582 | return false; | |
2583 | ||
2584 | *convert_code = code; | |
2585 | return true; | |
2586 | } | |
5ce9450f | 2587 | |
ebfd146a IR |
2588 | /* Function vectorizable_call. |
2589 | ||
538dd0b7 | 2590 | Check if GS performs a function call that can be vectorized. |
b8698a0f | 2591 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized |
ebfd146a IR |
2592 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
2593 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
2594 | ||
2595 | static bool | |
355fe088 | 2596 | vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt, |
190c2236 | 2597 | slp_tree slp_node) |
ebfd146a | 2598 | { |
538dd0b7 | 2599 | gcall *stmt; |
ebfd146a IR |
2600 | tree vec_dest; |
2601 | tree scalar_dest; | |
2602 | tree op, type; | |
2603 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; | |
538dd0b7 | 2604 | stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info; |
ebfd146a IR |
2605 | tree vectype_out, vectype_in; |
2606 | int nunits_in; | |
2607 | int nunits_out; | |
2608 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
190c2236 | 2609 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 2610 | vec_info *vinfo = stmt_info->vinfo; |
81c40241 | 2611 | tree fndecl, new_temp, rhs_type; |
355fe088 | 2612 | gimple *def_stmt; |
0502fb85 UB |
2613 | enum vect_def_type dt[3] |
2614 | = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 2615 | int ndts = 3; |
355fe088 | 2616 | gimple *new_stmt = NULL; |
ebfd146a | 2617 | int ncopies, j; |
6e1aa848 | 2618 | vec<tree> vargs = vNULL; |
ebfd146a IR |
2619 | enum { NARROW, NONE, WIDEN } modifier; |
2620 | size_t i, nargs; | |
9d5e7640 | 2621 | tree lhs; |
ebfd146a | 2622 | |
190c2236 | 2623 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
2624 | return false; |
2625 | ||
66c16fd9 RB |
2626 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
2627 | && ! vec_stmt) | |
ebfd146a IR |
2628 | return false; |
2629 | ||
538dd0b7 DM |
2630 | /* Is GS a vectorizable call? */ |
2631 | stmt = dyn_cast <gcall *> (gs); | |
2632 | if (!stmt) | |
ebfd146a IR |
2633 | return false; |
2634 | ||
5ce9450f JJ |
2635 | if (gimple_call_internal_p (stmt) |
2636 | && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD | |
2637 | || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)) | |
2638 | return vectorizable_mask_load_store (stmt, gsi, vec_stmt, | |
2639 | slp_node); | |
2640 | ||
0136f8f0 AH |
2641 | if (gimple_call_lhs (stmt) == NULL_TREE |
2642 | || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) | |
ebfd146a IR |
2643 | return false; |
2644 | ||
0136f8f0 | 2645 | gcc_checking_assert (!stmt_can_throw_internal (stmt)); |
5a2c1986 | 2646 | |
b690cc0f RG |
2647 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
2648 | ||
ebfd146a IR |
2649 | /* Process function arguments. */ |
2650 | rhs_type = NULL_TREE; | |
b690cc0f | 2651 | vectype_in = NULL_TREE; |
ebfd146a IR |
2652 | nargs = gimple_call_num_args (stmt); |
2653 | ||
1b1562a5 MM |
2654 | /* Bail out if the function has more than three arguments, we do not have |
2655 | interesting builtin functions to vectorize with more than two arguments | |
2656 | except for fma. No arguments is also not good. */ | |
2657 | if (nargs == 0 || nargs > 3) | |
ebfd146a IR |
2658 | return false; |
2659 | ||
74bf76ed JJ |
2660 | /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */ |
2661 | if (gimple_call_internal_p (stmt) | |
2662 | && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE) | |
2663 | { | |
2664 | nargs = 0; | |
2665 | rhs_type = unsigned_type_node; | |
2666 | } | |
2667 | ||
ebfd146a IR |
2668 | for (i = 0; i < nargs; i++) |
2669 | { | |
b690cc0f RG |
2670 | tree opvectype; |
2671 | ||
ebfd146a IR |
2672 | op = gimple_call_arg (stmt, i); |
2673 | ||
2674 | /* We can only handle calls with arguments of the same type. */ | |
2675 | if (rhs_type | |
8533c9d8 | 2676 | && !types_compatible_p (rhs_type, TREE_TYPE (op))) |
ebfd146a | 2677 | { |
73fbfcad | 2678 | if (dump_enabled_p ()) |
78c60e3d | 2679 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 2680 | "argument types differ.\n"); |
ebfd146a IR |
2681 | return false; |
2682 | } | |
b690cc0f RG |
2683 | if (!rhs_type) |
2684 | rhs_type = TREE_TYPE (op); | |
ebfd146a | 2685 | |
81c40241 | 2686 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype)) |
ebfd146a | 2687 | { |
73fbfcad | 2688 | if (dump_enabled_p ()) |
78c60e3d | 2689 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 2690 | "use not simple.\n"); |
ebfd146a IR |
2691 | return false; |
2692 | } | |
ebfd146a | 2693 | |
b690cc0f RG |
2694 | if (!vectype_in) |
2695 | vectype_in = opvectype; | |
2696 | else if (opvectype | |
2697 | && opvectype != vectype_in) | |
2698 | { | |
73fbfcad | 2699 | if (dump_enabled_p ()) |
78c60e3d | 2700 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 2701 | "argument vector types differ.\n"); |
b690cc0f RG |
2702 | return false; |
2703 | } | |
2704 | } | |
2705 | /* If all arguments are external or constant defs use a vector type with | |
2706 | the same size as the output vector type. */ | |
ebfd146a | 2707 | if (!vectype_in) |
b690cc0f | 2708 | vectype_in = get_same_sized_vectype (rhs_type, vectype_out); |
7d8930a0 IR |
2709 | if (vec_stmt) |
2710 | gcc_assert (vectype_in); | |
2711 | if (!vectype_in) | |
2712 | { | |
73fbfcad | 2713 | if (dump_enabled_p ()) |
7d8930a0 | 2714 | { |
78c60e3d SS |
2715 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2716 | "no vectype for scalar type "); | |
2717 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
e645e942 | 2718 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
7d8930a0 IR |
2719 | } |
2720 | ||
2721 | return false; | |
2722 | } | |
ebfd146a IR |
2723 | |
2724 | /* FORNOW */ | |
b690cc0f RG |
2725 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); |
2726 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
ebfd146a IR |
2727 | if (nunits_in == nunits_out / 2) |
2728 | modifier = NARROW; | |
2729 | else if (nunits_out == nunits_in) | |
2730 | modifier = NONE; | |
2731 | else if (nunits_out == nunits_in / 2) | |
2732 | modifier = WIDEN; | |
2733 | else | |
2734 | return false; | |
2735 | ||
70439f0d RS |
2736 | /* We only handle functions that do not read or clobber memory. */ |
2737 | if (gimple_vuse (stmt)) | |
2738 | { | |
2739 | if (dump_enabled_p ()) | |
2740 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2741 | "function reads from or writes to memory.\n"); | |
2742 | return false; | |
2743 | } | |
2744 | ||
ebfd146a IR |
2745 | /* For now, we only vectorize functions if a target specific builtin |
2746 | is available. TODO -- in some cases, it might be profitable to | |
2747 | insert the calls for pieces of the vector, in order to be able | |
2748 | to vectorize other operations in the loop. */ | |
70439f0d RS |
2749 | fndecl = NULL_TREE; |
2750 | internal_fn ifn = IFN_LAST; | |
2751 | combined_fn cfn = gimple_call_combined_fn (stmt); | |
2752 | tree callee = gimple_call_fndecl (stmt); | |
2753 | ||
2754 | /* First try using an internal function. */ | |
b1b6836e RS |
2755 | tree_code convert_code = ERROR_MARK; |
2756 | if (cfn != CFN_LAST | |
2757 | && (modifier == NONE | |
2758 | || (modifier == NARROW | |
2759 | && simple_integer_narrowing (vectype_out, vectype_in, | |
2760 | &convert_code)))) | |
70439f0d RS |
2761 | ifn = vectorizable_internal_function (cfn, callee, vectype_out, |
2762 | vectype_in); | |
2763 | ||
2764 | /* If that fails, try asking for a target-specific built-in function. */ | |
2765 | if (ifn == IFN_LAST) | |
2766 | { | |
2767 | if (cfn != CFN_LAST) | |
2768 | fndecl = targetm.vectorize.builtin_vectorized_function | |
2769 | (cfn, vectype_out, vectype_in); | |
2770 | else | |
2771 | fndecl = targetm.vectorize.builtin_md_vectorized_function | |
2772 | (callee, vectype_out, vectype_in); | |
2773 | } | |
2774 | ||
2775 | if (ifn == IFN_LAST && !fndecl) | |
ebfd146a | 2776 | { |
70439f0d | 2777 | if (cfn == CFN_GOMP_SIMD_LANE |
74bf76ed JJ |
2778 | && !slp_node |
2779 | && loop_vinfo | |
2780 | && LOOP_VINFO_LOOP (loop_vinfo)->simduid | |
2781 | && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME | |
2782 | && LOOP_VINFO_LOOP (loop_vinfo)->simduid | |
2783 | == SSA_NAME_VAR (gimple_call_arg (stmt, 0))) | |
2784 | { | |
2785 | /* We can handle IFN_GOMP_SIMD_LANE by returning a | |
2786 | { 0, 1, 2, ... vf - 1 } vector. */ | |
2787 | gcc_assert (nargs == 0); | |
2788 | } | |
37b14185 RB |
2789 | else if (modifier == NONE |
2790 | && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16) | |
2791 | || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32) | |
2792 | || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64))) | |
2793 | return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node, | |
2794 | vectype_in, dt); | |
74bf76ed JJ |
2795 | else |
2796 | { | |
2797 | if (dump_enabled_p ()) | |
2798 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
e645e942 | 2799 | "function is not vectorizable.\n"); |
74bf76ed JJ |
2800 | return false; |
2801 | } | |
ebfd146a IR |
2802 | } |
2803 | ||
fce57248 | 2804 | if (slp_node) |
190c2236 | 2805 | ncopies = 1; |
b1b6836e | 2806 | else if (modifier == NARROW && ifn == IFN_LAST) |
ebfd146a IR |
2807 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; |
2808 | else | |
2809 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; | |
2810 | ||
2811 | /* Sanity check: make sure that at least one copy of the vectorized stmt | |
2812 | needs to be generated. */ | |
2813 | gcc_assert (ncopies >= 1); | |
2814 | ||
2815 | if (!vec_stmt) /* transformation not required. */ | |
2816 | { | |
2817 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; | |
73fbfcad | 2818 | if (dump_enabled_p ()) |
e645e942 TJ |
2819 | dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ===" |
2820 | "\n"); | |
4fc5ebf1 | 2821 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
b1b6836e RS |
2822 | if (ifn != IFN_LAST && modifier == NARROW && !slp_node) |
2823 | add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2, | |
2824 | vec_promote_demote, stmt_info, 0, vect_body); | |
2825 | ||
ebfd146a IR |
2826 | return true; |
2827 | } | |
2828 | ||
67b8dbac | 2829 | /* Transform. */ |
ebfd146a | 2830 | |
73fbfcad | 2831 | if (dump_enabled_p ()) |
e645e942 | 2832 | dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n"); |
ebfd146a IR |
2833 | |
2834 | /* Handle def. */ | |
2835 | scalar_dest = gimple_call_lhs (stmt); | |
2836 | vec_dest = vect_create_destination_var (scalar_dest, vectype_out); | |
2837 | ||
2838 | prev_stmt_info = NULL; | |
b1b6836e | 2839 | if (modifier == NONE || ifn != IFN_LAST) |
ebfd146a | 2840 | { |
b1b6836e | 2841 | tree prev_res = NULL_TREE; |
ebfd146a IR |
2842 | for (j = 0; j < ncopies; ++j) |
2843 | { | |
2844 | /* Build argument list for the vectorized call. */ | |
2845 | if (j == 0) | |
9771b263 | 2846 | vargs.create (nargs); |
ebfd146a | 2847 | else |
9771b263 | 2848 | vargs.truncate (0); |
ebfd146a | 2849 | |
190c2236 JJ |
2850 | if (slp_node) |
2851 | { | |
ef062b13 | 2852 | auto_vec<vec<tree> > vec_defs (nargs); |
9771b263 | 2853 | vec<tree> vec_oprnds0; |
190c2236 JJ |
2854 | |
2855 | for (i = 0; i < nargs; i++) | |
9771b263 | 2856 | vargs.quick_push (gimple_call_arg (stmt, i)); |
306b0c92 | 2857 | vect_get_slp_defs (vargs, slp_node, &vec_defs); |
37b5ec8f | 2858 | vec_oprnds0 = vec_defs[0]; |
190c2236 JJ |
2859 | |
2860 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 2861 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0) |
190c2236 JJ |
2862 | { |
2863 | size_t k; | |
2864 | for (k = 0; k < nargs; k++) | |
2865 | { | |
37b5ec8f | 2866 | vec<tree> vec_oprndsk = vec_defs[k]; |
9771b263 | 2867 | vargs[k] = vec_oprndsk[i]; |
190c2236 | 2868 | } |
b1b6836e RS |
2869 | if (modifier == NARROW) |
2870 | { | |
2871 | tree half_res = make_ssa_name (vectype_in); | |
2872 | new_stmt = gimple_build_call_internal_vec (ifn, vargs); | |
2873 | gimple_call_set_lhs (new_stmt, half_res); | |
2874 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2875 | if ((i & 1) == 0) | |
2876 | { | |
2877 | prev_res = half_res; | |
2878 | continue; | |
2879 | } | |
2880 | new_temp = make_ssa_name (vec_dest); | |
2881 | new_stmt = gimple_build_assign (new_temp, convert_code, | |
2882 | prev_res, half_res); | |
2883 | } | |
70439f0d | 2884 | else |
b1b6836e RS |
2885 | { |
2886 | if (ifn != IFN_LAST) | |
2887 | new_stmt = gimple_build_call_internal_vec (ifn, vargs); | |
2888 | else | |
2889 | new_stmt = gimple_build_call_vec (fndecl, vargs); | |
2890 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
2891 | gimple_call_set_lhs (new_stmt, new_temp); | |
2892 | } | |
190c2236 | 2893 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
9771b263 | 2894 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
190c2236 JJ |
2895 | } |
2896 | ||
2897 | for (i = 0; i < nargs; i++) | |
2898 | { | |
37b5ec8f | 2899 | vec<tree> vec_oprndsi = vec_defs[i]; |
9771b263 | 2900 | vec_oprndsi.release (); |
190c2236 | 2901 | } |
190c2236 JJ |
2902 | continue; |
2903 | } | |
2904 | ||
ebfd146a IR |
2905 | for (i = 0; i < nargs; i++) |
2906 | { | |
2907 | op = gimple_call_arg (stmt, i); | |
2908 | if (j == 0) | |
2909 | vec_oprnd0 | |
81c40241 | 2910 | = vect_get_vec_def_for_operand (op, stmt); |
ebfd146a | 2911 | else |
63827fb8 IR |
2912 | { |
2913 | vec_oprnd0 = gimple_call_arg (new_stmt, i); | |
2914 | vec_oprnd0 | |
2915 | = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); | |
2916 | } | |
ebfd146a | 2917 | |
9771b263 | 2918 | vargs.quick_push (vec_oprnd0); |
ebfd146a IR |
2919 | } |
2920 | ||
74bf76ed JJ |
2921 | if (gimple_call_internal_p (stmt) |
2922 | && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE) | |
2923 | { | |
2924 | tree *v = XALLOCAVEC (tree, nunits_out); | |
2925 | int k; | |
2926 | for (k = 0; k < nunits_out; ++k) | |
2927 | v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k); | |
2928 | tree cst = build_vector (vectype_out, v); | |
2929 | tree new_var | |
0e22bb5a | 2930 | = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_"); |
355fe088 | 2931 | gimple *init_stmt = gimple_build_assign (new_var, cst); |
74bf76ed | 2932 | vect_init_vector_1 (stmt, init_stmt, NULL); |
b731b390 | 2933 | new_temp = make_ssa_name (vec_dest); |
0e22bb5a | 2934 | new_stmt = gimple_build_assign (new_temp, new_var); |
74bf76ed | 2935 | } |
b1b6836e RS |
2936 | else if (modifier == NARROW) |
2937 | { | |
2938 | tree half_res = make_ssa_name (vectype_in); | |
2939 | new_stmt = gimple_build_call_internal_vec (ifn, vargs); | |
2940 | gimple_call_set_lhs (new_stmt, half_res); | |
2941 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2942 | if ((j & 1) == 0) | |
2943 | { | |
2944 | prev_res = half_res; | |
2945 | continue; | |
2946 | } | |
2947 | new_temp = make_ssa_name (vec_dest); | |
2948 | new_stmt = gimple_build_assign (new_temp, convert_code, | |
2949 | prev_res, half_res); | |
2950 | } | |
74bf76ed JJ |
2951 | else |
2952 | { | |
70439f0d RS |
2953 | if (ifn != IFN_LAST) |
2954 | new_stmt = gimple_build_call_internal_vec (ifn, vargs); | |
2955 | else | |
2956 | new_stmt = gimple_build_call_vec (fndecl, vargs); | |
74bf76ed JJ |
2957 | new_temp = make_ssa_name (vec_dest, new_stmt); |
2958 | gimple_call_set_lhs (new_stmt, new_temp); | |
2959 | } | |
ebfd146a IR |
2960 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2961 | ||
b1b6836e | 2962 | if (j == (modifier == NARROW ? 1 : 0)) |
ebfd146a IR |
2963 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; |
2964 | else | |
2965 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2966 | ||
2967 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2968 | } | |
b1b6836e RS |
2969 | } |
2970 | else if (modifier == NARROW) | |
2971 | { | |
ebfd146a IR |
2972 | for (j = 0; j < ncopies; ++j) |
2973 | { | |
2974 | /* Build argument list for the vectorized call. */ | |
2975 | if (j == 0) | |
9771b263 | 2976 | vargs.create (nargs * 2); |
ebfd146a | 2977 | else |
9771b263 | 2978 | vargs.truncate (0); |
ebfd146a | 2979 | |
190c2236 JJ |
2980 | if (slp_node) |
2981 | { | |
ef062b13 | 2982 | auto_vec<vec<tree> > vec_defs (nargs); |
9771b263 | 2983 | vec<tree> vec_oprnds0; |
190c2236 JJ |
2984 | |
2985 | for (i = 0; i < nargs; i++) | |
9771b263 | 2986 | vargs.quick_push (gimple_call_arg (stmt, i)); |
306b0c92 | 2987 | vect_get_slp_defs (vargs, slp_node, &vec_defs); |
37b5ec8f | 2988 | vec_oprnds0 = vec_defs[0]; |
190c2236 JJ |
2989 | |
2990 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 2991 | for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2) |
190c2236 JJ |
2992 | { |
2993 | size_t k; | |
9771b263 | 2994 | vargs.truncate (0); |
190c2236 JJ |
2995 | for (k = 0; k < nargs; k++) |
2996 | { | |
37b5ec8f | 2997 | vec<tree> vec_oprndsk = vec_defs[k]; |
9771b263 DN |
2998 | vargs.quick_push (vec_oprndsk[i]); |
2999 | vargs.quick_push (vec_oprndsk[i + 1]); | |
190c2236 | 3000 | } |
70439f0d RS |
3001 | if (ifn != IFN_LAST) |
3002 | new_stmt = gimple_build_call_internal_vec (ifn, vargs); | |
3003 | else | |
3004 | new_stmt = gimple_build_call_vec (fndecl, vargs); | |
190c2236 JJ |
3005 | new_temp = make_ssa_name (vec_dest, new_stmt); |
3006 | gimple_call_set_lhs (new_stmt, new_temp); | |
3007 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
9771b263 | 3008 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
190c2236 JJ |
3009 | } |
3010 | ||
3011 | for (i = 0; i < nargs; i++) | |
3012 | { | |
37b5ec8f | 3013 | vec<tree> vec_oprndsi = vec_defs[i]; |
9771b263 | 3014 | vec_oprndsi.release (); |
190c2236 | 3015 | } |
190c2236 JJ |
3016 | continue; |
3017 | } | |
3018 | ||
ebfd146a IR |
3019 | for (i = 0; i < nargs; i++) |
3020 | { | |
3021 | op = gimple_call_arg (stmt, i); | |
3022 | if (j == 0) | |
3023 | { | |
3024 | vec_oprnd0 | |
81c40241 | 3025 | = vect_get_vec_def_for_operand (op, stmt); |
ebfd146a | 3026 | vec_oprnd1 |
63827fb8 | 3027 | = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); |
ebfd146a IR |
3028 | } |
3029 | else | |
3030 | { | |
336ecb65 | 3031 | vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1); |
ebfd146a | 3032 | vec_oprnd0 |
63827fb8 | 3033 | = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1); |
ebfd146a | 3034 | vec_oprnd1 |
63827fb8 | 3035 | = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); |
ebfd146a IR |
3036 | } |
3037 | ||
9771b263 DN |
3038 | vargs.quick_push (vec_oprnd0); |
3039 | vargs.quick_push (vec_oprnd1); | |
ebfd146a IR |
3040 | } |
3041 | ||
b1b6836e | 3042 | new_stmt = gimple_build_call_vec (fndecl, vargs); |
ebfd146a IR |
3043 | new_temp = make_ssa_name (vec_dest, new_stmt); |
3044 | gimple_call_set_lhs (new_stmt, new_temp); | |
ebfd146a IR |
3045 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3046 | ||
3047 | if (j == 0) | |
3048 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; | |
3049 | else | |
3050 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
3051 | ||
3052 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
3053 | } | |
3054 | ||
3055 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
ebfd146a | 3056 | } |
b1b6836e RS |
3057 | else |
3058 | /* No current target implements this case. */ | |
3059 | return false; | |
ebfd146a | 3060 | |
9771b263 | 3061 | vargs.release (); |
ebfd146a | 3062 | |
ebfd146a IR |
3063 | /* The call in STMT might prevent it from being removed in dce. |
3064 | We however cannot remove it here, due to the way the ssa name | |
3065 | it defines is mapped to the new definition. So just replace | |
3066 | rhs of the statement with something harmless. */ | |
3067 | ||
dd34c087 JJ |
3068 | if (slp_node) |
3069 | return true; | |
3070 | ||
ebfd146a | 3071 | type = TREE_TYPE (scalar_dest); |
9d5e7640 IR |
3072 | if (is_pattern_stmt_p (stmt_info)) |
3073 | lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info)); | |
3074 | else | |
3075 | lhs = gimple_call_lhs (stmt); | |
3cc2fa2a | 3076 | |
9d5e7640 | 3077 | new_stmt = gimple_build_assign (lhs, build_zero_cst (type)); |
ebfd146a | 3078 | set_vinfo_for_stmt (new_stmt, stmt_info); |
dd34c087 | 3079 | set_vinfo_for_stmt (stmt, NULL); |
ebfd146a IR |
3080 | STMT_VINFO_STMT (stmt_info) = new_stmt; |
3081 | gsi_replace (gsi, new_stmt, false); | |
ebfd146a IR |
3082 | |
3083 | return true; | |
3084 | } | |
3085 | ||
3086 | ||
0136f8f0 AH |
3087 | struct simd_call_arg_info |
3088 | { | |
3089 | tree vectype; | |
3090 | tree op; | |
0136f8f0 | 3091 | HOST_WIDE_INT linear_step; |
34e82342 | 3092 | enum vect_def_type dt; |
0136f8f0 | 3093 | unsigned int align; |
17b658af | 3094 | bool simd_lane_linear; |
0136f8f0 AH |
3095 | }; |
3096 | ||
17b658af JJ |
3097 | /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME, |
3098 | is linear within simd lane (but not within whole loop), note it in | |
3099 | *ARGINFO. */ | |
3100 | ||
3101 | static void | |
3102 | vect_simd_lane_linear (tree op, struct loop *loop, | |
3103 | struct simd_call_arg_info *arginfo) | |
3104 | { | |
355fe088 | 3105 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
17b658af JJ |
3106 | |
3107 | if (!is_gimple_assign (def_stmt) | |
3108 | || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR | |
3109 | || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt))) | |
3110 | return; | |
3111 | ||
3112 | tree base = gimple_assign_rhs1 (def_stmt); | |
3113 | HOST_WIDE_INT linear_step = 0; | |
3114 | tree v = gimple_assign_rhs2 (def_stmt); | |
3115 | while (TREE_CODE (v) == SSA_NAME) | |
3116 | { | |
3117 | tree t; | |
3118 | def_stmt = SSA_NAME_DEF_STMT (v); | |
3119 | if (is_gimple_assign (def_stmt)) | |
3120 | switch (gimple_assign_rhs_code (def_stmt)) | |
3121 | { | |
3122 | case PLUS_EXPR: | |
3123 | t = gimple_assign_rhs2 (def_stmt); | |
3124 | if (linear_step || TREE_CODE (t) != INTEGER_CST) | |
3125 | return; | |
3126 | base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t); | |
3127 | v = gimple_assign_rhs1 (def_stmt); | |
3128 | continue; | |
3129 | case MULT_EXPR: | |
3130 | t = gimple_assign_rhs2 (def_stmt); | |
3131 | if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t)) | |
3132 | return; | |
3133 | linear_step = tree_to_shwi (t); | |
3134 | v = gimple_assign_rhs1 (def_stmt); | |
3135 | continue; | |
3136 | CASE_CONVERT: | |
3137 | t = gimple_assign_rhs1 (def_stmt); | |
3138 | if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE | |
3139 | || (TYPE_PRECISION (TREE_TYPE (v)) | |
3140 | < TYPE_PRECISION (TREE_TYPE (t)))) | |
3141 | return; | |
3142 | if (!linear_step) | |
3143 | linear_step = 1; | |
3144 | v = t; | |
3145 | continue; | |
3146 | default: | |
3147 | return; | |
3148 | } | |
8e4284d0 | 3149 | else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE) |
17b658af JJ |
3150 | && loop->simduid |
3151 | && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME | |
3152 | && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0)) | |
3153 | == loop->simduid)) | |
3154 | { | |
3155 | if (!linear_step) | |
3156 | linear_step = 1; | |
3157 | arginfo->linear_step = linear_step; | |
3158 | arginfo->op = base; | |
3159 | arginfo->simd_lane_linear = true; | |
3160 | return; | |
3161 | } | |
3162 | } | |
3163 | } | |
3164 | ||
0136f8f0 AH |
3165 | /* Function vectorizable_simd_clone_call. |
3166 | ||
3167 | Check if STMT performs a function call that can be vectorized | |
3168 | by calling a simd clone of the function. | |
3169 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
3170 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. | |
3171 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
3172 | ||
3173 | static bool | |
355fe088 TS |
3174 | vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, |
3175 | gimple **vec_stmt, slp_tree slp_node) | |
0136f8f0 AH |
3176 | { |
3177 | tree vec_dest; | |
3178 | tree scalar_dest; | |
3179 | tree op, type; | |
3180 | tree vec_oprnd0 = NULL_TREE; | |
3181 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info; | |
3182 | tree vectype; | |
3183 | unsigned int nunits; | |
3184 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
3185 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
310213d4 | 3186 | vec_info *vinfo = stmt_info->vinfo; |
0136f8f0 | 3187 | struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; |
81c40241 | 3188 | tree fndecl, new_temp; |
355fe088 TS |
3189 | gimple *def_stmt; |
3190 | gimple *new_stmt = NULL; | |
0136f8f0 | 3191 | int ncopies, j; |
00426f9a | 3192 | auto_vec<simd_call_arg_info> arginfo; |
0136f8f0 AH |
3193 | vec<tree> vargs = vNULL; |
3194 | size_t i, nargs; | |
3195 | tree lhs, rtype, ratype; | |
3196 | vec<constructor_elt, va_gc> *ret_ctor_elts; | |
3197 | ||
3198 | /* Is STMT a vectorizable call? */ | |
3199 | if (!is_gimple_call (stmt)) | |
3200 | return false; | |
3201 | ||
3202 | fndecl = gimple_call_fndecl (stmt); | |
3203 | if (fndecl == NULL_TREE) | |
3204 | return false; | |
3205 | ||
d52f5295 | 3206 | struct cgraph_node *node = cgraph_node::get (fndecl); |
0136f8f0 AH |
3207 | if (node == NULL || node->simd_clones == NULL) |
3208 | return false; | |
3209 | ||
3210 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
3211 | return false; | |
3212 | ||
66c16fd9 RB |
3213 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
3214 | && ! vec_stmt) | |
0136f8f0 AH |
3215 | return false; |
3216 | ||
3217 | if (gimple_call_lhs (stmt) | |
3218 | && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) | |
3219 | return false; | |
3220 | ||
3221 | gcc_checking_assert (!stmt_can_throw_internal (stmt)); | |
3222 | ||
3223 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
3224 | ||
3225 | if (loop_vinfo && nested_in_vect_loop_p (loop, stmt)) | |
3226 | return false; | |
3227 | ||
3228 | /* FORNOW */ | |
fce57248 | 3229 | if (slp_node) |
0136f8f0 AH |
3230 | return false; |
3231 | ||
3232 | /* Process function arguments. */ | |
3233 | nargs = gimple_call_num_args (stmt); | |
3234 | ||
3235 | /* Bail out if the function has zero arguments. */ | |
3236 | if (nargs == 0) | |
3237 | return false; | |
3238 | ||
00426f9a | 3239 | arginfo.reserve (nargs, true); |
0136f8f0 AH |
3240 | |
3241 | for (i = 0; i < nargs; i++) | |
3242 | { | |
3243 | simd_call_arg_info thisarginfo; | |
3244 | affine_iv iv; | |
3245 | ||
3246 | thisarginfo.linear_step = 0; | |
3247 | thisarginfo.align = 0; | |
3248 | thisarginfo.op = NULL_TREE; | |
17b658af | 3249 | thisarginfo.simd_lane_linear = false; |
0136f8f0 AH |
3250 | |
3251 | op = gimple_call_arg (stmt, i); | |
81c40241 RB |
3252 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt, |
3253 | &thisarginfo.vectype) | |
0136f8f0 AH |
3254 | || thisarginfo.dt == vect_uninitialized_def) |
3255 | { | |
3256 | if (dump_enabled_p ()) | |
3257 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3258 | "use not simple.\n"); | |
0136f8f0 AH |
3259 | return false; |
3260 | } | |
3261 | ||
3262 | if (thisarginfo.dt == vect_constant_def | |
3263 | || thisarginfo.dt == vect_external_def) | |
3264 | gcc_assert (thisarginfo.vectype == NULL_TREE); | |
3265 | else | |
3266 | gcc_assert (thisarginfo.vectype != NULL_TREE); | |
3267 | ||
6c9e85fb JJ |
3268 | /* For linear arguments, the analyze phase should have saved |
3269 | the base and step in STMT_VINFO_SIMD_CLONE_INFO. */ | |
17b658af JJ |
3270 | if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length () |
3271 | && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]) | |
6c9e85fb JJ |
3272 | { |
3273 | gcc_assert (vec_stmt); | |
3274 | thisarginfo.linear_step | |
17b658af | 3275 | = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]); |
6c9e85fb | 3276 | thisarginfo.op |
17b658af JJ |
3277 | = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1]; |
3278 | thisarginfo.simd_lane_linear | |
3279 | = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3] | |
3280 | == boolean_true_node); | |
6c9e85fb JJ |
3281 | /* If loop has been peeled for alignment, we need to adjust it. */ |
3282 | tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo); | |
3283 | tree n2 = LOOP_VINFO_NITERS (loop_vinfo); | |
17b658af | 3284 | if (n1 != n2 && !thisarginfo.simd_lane_linear) |
6c9e85fb JJ |
3285 | { |
3286 | tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2); | |
17b658af | 3287 | tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]; |
6c9e85fb JJ |
3288 | tree opt = TREE_TYPE (thisarginfo.op); |
3289 | bias = fold_convert (TREE_TYPE (step), bias); | |
3290 | bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step); | |
3291 | thisarginfo.op | |
3292 | = fold_build2 (POINTER_TYPE_P (opt) | |
3293 | ? POINTER_PLUS_EXPR : PLUS_EXPR, opt, | |
3294 | thisarginfo.op, bias); | |
3295 | } | |
3296 | } | |
3297 | else if (!vec_stmt | |
3298 | && thisarginfo.dt != vect_constant_def | |
3299 | && thisarginfo.dt != vect_external_def | |
3300 | && loop_vinfo | |
3301 | && TREE_CODE (op) == SSA_NAME | |
3302 | && simple_iv (loop, loop_containing_stmt (stmt), op, | |
3303 | &iv, false) | |
3304 | && tree_fits_shwi_p (iv.step)) | |
0136f8f0 AH |
3305 | { |
3306 | thisarginfo.linear_step = tree_to_shwi (iv.step); | |
3307 | thisarginfo.op = iv.base; | |
3308 | } | |
3309 | else if ((thisarginfo.dt == vect_constant_def | |
3310 | || thisarginfo.dt == vect_external_def) | |
3311 | && POINTER_TYPE_P (TREE_TYPE (op))) | |
3312 | thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT; | |
17b658af JJ |
3313 | /* Addresses of array elements indexed by GOMP_SIMD_LANE are |
3314 | linear too. */ | |
3315 | if (POINTER_TYPE_P (TREE_TYPE (op)) | |
3316 | && !thisarginfo.linear_step | |
3317 | && !vec_stmt | |
3318 | && thisarginfo.dt != vect_constant_def | |
3319 | && thisarginfo.dt != vect_external_def | |
3320 | && loop_vinfo | |
3321 | && !slp_node | |
3322 | && TREE_CODE (op) == SSA_NAME) | |
3323 | vect_simd_lane_linear (op, loop, &thisarginfo); | |
0136f8f0 AH |
3324 | |
3325 | arginfo.quick_push (thisarginfo); | |
3326 | } | |
3327 | ||
3328 | unsigned int badness = 0; | |
3329 | struct cgraph_node *bestn = NULL; | |
6c9e85fb JJ |
3330 | if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ()) |
3331 | bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]); | |
0136f8f0 AH |
3332 | else |
3333 | for (struct cgraph_node *n = node->simd_clones; n != NULL; | |
3334 | n = n->simdclone->next_clone) | |
3335 | { | |
3336 | unsigned int this_badness = 0; | |
3337 | if (n->simdclone->simdlen | |
3338 | > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
3339 | || n->simdclone->nargs != nargs) | |
3340 | continue; | |
3341 | if (n->simdclone->simdlen | |
3342 | < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)) | |
3343 | this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)) | |
3344 | - exact_log2 (n->simdclone->simdlen)) * 1024; | |
3345 | if (n->simdclone->inbranch) | |
3346 | this_badness += 2048; | |
3347 | int target_badness = targetm.simd_clone.usable (n); | |
3348 | if (target_badness < 0) | |
3349 | continue; | |
3350 | this_badness += target_badness * 512; | |
3351 | /* FORNOW: Have to add code to add the mask argument. */ | |
3352 | if (n->simdclone->inbranch) | |
3353 | continue; | |
3354 | for (i = 0; i < nargs; i++) | |
3355 | { | |
3356 | switch (n->simdclone->args[i].arg_type) | |
3357 | { | |
3358 | case SIMD_CLONE_ARG_TYPE_VECTOR: | |
3359 | if (!useless_type_conversion_p | |
3360 | (n->simdclone->args[i].orig_type, | |
3361 | TREE_TYPE (gimple_call_arg (stmt, i)))) | |
3362 | i = -1; | |
3363 | else if (arginfo[i].dt == vect_constant_def | |
3364 | || arginfo[i].dt == vect_external_def | |
3365 | || arginfo[i].linear_step) | |
3366 | this_badness += 64; | |
3367 | break; | |
3368 | case SIMD_CLONE_ARG_TYPE_UNIFORM: | |
3369 | if (arginfo[i].dt != vect_constant_def | |
3370 | && arginfo[i].dt != vect_external_def) | |
3371 | i = -1; | |
3372 | break; | |
3373 | case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: | |
d9a6bd32 | 3374 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: |
0136f8f0 AH |
3375 | if (arginfo[i].dt == vect_constant_def |
3376 | || arginfo[i].dt == vect_external_def | |
3377 | || (arginfo[i].linear_step | |
3378 | != n->simdclone->args[i].linear_step)) | |
3379 | i = -1; | |
3380 | break; | |
3381 | case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: | |
d9a6bd32 JJ |
3382 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: |
3383 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: | |
e01d41e5 JJ |
3384 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: |
3385 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: | |
3386 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: | |
0136f8f0 AH |
3387 | /* FORNOW */ |
3388 | i = -1; | |
3389 | break; | |
3390 | case SIMD_CLONE_ARG_TYPE_MASK: | |
3391 | gcc_unreachable (); | |
3392 | } | |
3393 | if (i == (size_t) -1) | |
3394 | break; | |
3395 | if (n->simdclone->args[i].alignment > arginfo[i].align) | |
3396 | { | |
3397 | i = -1; | |
3398 | break; | |
3399 | } | |
3400 | if (arginfo[i].align) | |
3401 | this_badness += (exact_log2 (arginfo[i].align) | |
3402 | - exact_log2 (n->simdclone->args[i].alignment)); | |
3403 | } | |
3404 | if (i == (size_t) -1) | |
3405 | continue; | |
3406 | if (bestn == NULL || this_badness < badness) | |
3407 | { | |
3408 | bestn = n; | |
3409 | badness = this_badness; | |
3410 | } | |
3411 | } | |
3412 | ||
3413 | if (bestn == NULL) | |
00426f9a | 3414 | return false; |
0136f8f0 AH |
3415 | |
3416 | for (i = 0; i < nargs; i++) | |
3417 | if ((arginfo[i].dt == vect_constant_def | |
3418 | || arginfo[i].dt == vect_external_def) | |
3419 | && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) | |
3420 | { | |
3421 | arginfo[i].vectype | |
3422 | = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt, | |
3423 | i))); | |
3424 | if (arginfo[i].vectype == NULL | |
3425 | || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype) | |
3426 | > bestn->simdclone->simdlen)) | |
00426f9a | 3427 | return false; |
0136f8f0 AH |
3428 | } |
3429 | ||
3430 | fndecl = bestn->decl; | |
3431 | nunits = bestn->simdclone->simdlen; | |
3432 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
3433 | ||
3434 | /* If the function isn't const, only allow it in simd loops where user | |
3435 | has asserted that at least nunits consecutive iterations can be | |
3436 | performed using SIMD instructions. */ | |
3437 | if ((loop == NULL || (unsigned) loop->safelen < nunits) | |
3438 | && gimple_vuse (stmt)) | |
00426f9a | 3439 | return false; |
0136f8f0 AH |
3440 | |
3441 | /* Sanity check: make sure that at least one copy of the vectorized stmt | |
3442 | needs to be generated. */ | |
3443 | gcc_assert (ncopies >= 1); | |
3444 | ||
3445 | if (!vec_stmt) /* transformation not required. */ | |
3446 | { | |
6c9e85fb JJ |
3447 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl); |
3448 | for (i = 0; i < nargs; i++) | |
7adb26f2 JJ |
3449 | if ((bestn->simdclone->args[i].arg_type |
3450 | == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) | |
3451 | || (bestn->simdclone->args[i].arg_type | |
3452 | == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)) | |
6c9e85fb | 3453 | { |
17b658af | 3454 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3 |
6c9e85fb JJ |
3455 | + 1); |
3456 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op); | |
3457 | tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op)) | |
3458 | ? size_type_node : TREE_TYPE (arginfo[i].op); | |
3459 | tree ls = build_int_cst (lst, arginfo[i].linear_step); | |
3460 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls); | |
17b658af JJ |
3461 | tree sll = arginfo[i].simd_lane_linear |
3462 | ? boolean_true_node : boolean_false_node; | |
3463 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll); | |
6c9e85fb | 3464 | } |
0136f8f0 AH |
3465 | STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type; |
3466 | if (dump_enabled_p ()) | |
3467 | dump_printf_loc (MSG_NOTE, vect_location, | |
3468 | "=== vectorizable_simd_clone_call ===\n"); | |
3469 | /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */ | |
0136f8f0 AH |
3470 | return true; |
3471 | } | |
3472 | ||
67b8dbac | 3473 | /* Transform. */ |
0136f8f0 AH |
3474 | |
3475 | if (dump_enabled_p ()) | |
3476 | dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n"); | |
3477 | ||
3478 | /* Handle def. */ | |
3479 | scalar_dest = gimple_call_lhs (stmt); | |
3480 | vec_dest = NULL_TREE; | |
3481 | rtype = NULL_TREE; | |
3482 | ratype = NULL_TREE; | |
3483 | if (scalar_dest) | |
3484 | { | |
3485 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
3486 | rtype = TREE_TYPE (TREE_TYPE (fndecl)); | |
3487 | if (TREE_CODE (rtype) == ARRAY_TYPE) | |
3488 | { | |
3489 | ratype = rtype; | |
3490 | rtype = TREE_TYPE (ratype); | |
3491 | } | |
3492 | } | |
3493 | ||
3494 | prev_stmt_info = NULL; | |
3495 | for (j = 0; j < ncopies; ++j) | |
3496 | { | |
3497 | /* Build argument list for the vectorized call. */ | |
3498 | if (j == 0) | |
3499 | vargs.create (nargs); | |
3500 | else | |
3501 | vargs.truncate (0); | |
3502 | ||
3503 | for (i = 0; i < nargs; i++) | |
3504 | { | |
3505 | unsigned int k, l, m, o; | |
3506 | tree atype; | |
3507 | op = gimple_call_arg (stmt, i); | |
3508 | switch (bestn->simdclone->args[i].arg_type) | |
3509 | { | |
3510 | case SIMD_CLONE_ARG_TYPE_VECTOR: | |
3511 | atype = bestn->simdclone->args[i].vector_type; | |
3512 | o = nunits / TYPE_VECTOR_SUBPARTS (atype); | |
3513 | for (m = j * o; m < (j + 1) * o; m++) | |
3514 | { | |
3515 | if (TYPE_VECTOR_SUBPARTS (atype) | |
3516 | < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)) | |
3517 | { | |
3518 | unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype)); | |
3519 | k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype) | |
3520 | / TYPE_VECTOR_SUBPARTS (atype)); | |
3521 | gcc_assert ((k & (k - 1)) == 0); | |
3522 | if (m == 0) | |
3523 | vec_oprnd0 | |
81c40241 | 3524 | = vect_get_vec_def_for_operand (op, stmt); |
0136f8f0 AH |
3525 | else |
3526 | { | |
3527 | vec_oprnd0 = arginfo[i].op; | |
3528 | if ((m & (k - 1)) == 0) | |
3529 | vec_oprnd0 | |
3530 | = vect_get_vec_def_for_stmt_copy (arginfo[i].dt, | |
3531 | vec_oprnd0); | |
3532 | } | |
3533 | arginfo[i].op = vec_oprnd0; | |
3534 | vec_oprnd0 | |
3535 | = build3 (BIT_FIELD_REF, atype, vec_oprnd0, | |
3536 | size_int (prec), | |
3537 | bitsize_int ((m & (k - 1)) * prec)); | |
3538 | new_stmt | |
b731b390 | 3539 | = gimple_build_assign (make_ssa_name (atype), |
0136f8f0 AH |
3540 | vec_oprnd0); |
3541 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3542 | vargs.safe_push (gimple_assign_lhs (new_stmt)); | |
3543 | } | |
3544 | else | |
3545 | { | |
3546 | k = (TYPE_VECTOR_SUBPARTS (atype) | |
3547 | / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)); | |
3548 | gcc_assert ((k & (k - 1)) == 0); | |
3549 | vec<constructor_elt, va_gc> *ctor_elts; | |
3550 | if (k != 1) | |
3551 | vec_alloc (ctor_elts, k); | |
3552 | else | |
3553 | ctor_elts = NULL; | |
3554 | for (l = 0; l < k; l++) | |
3555 | { | |
3556 | if (m == 0 && l == 0) | |
3557 | vec_oprnd0 | |
81c40241 | 3558 | = vect_get_vec_def_for_operand (op, stmt); |
0136f8f0 AH |
3559 | else |
3560 | vec_oprnd0 | |
3561 | = vect_get_vec_def_for_stmt_copy (arginfo[i].dt, | |
3562 | arginfo[i].op); | |
3563 | arginfo[i].op = vec_oprnd0; | |
3564 | if (k == 1) | |
3565 | break; | |
3566 | CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE, | |
3567 | vec_oprnd0); | |
3568 | } | |
3569 | if (k == 1) | |
3570 | vargs.safe_push (vec_oprnd0); | |
3571 | else | |
3572 | { | |
3573 | vec_oprnd0 = build_constructor (atype, ctor_elts); | |
3574 | new_stmt | |
b731b390 | 3575 | = gimple_build_assign (make_ssa_name (atype), |
0136f8f0 AH |
3576 | vec_oprnd0); |
3577 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3578 | vargs.safe_push (gimple_assign_lhs (new_stmt)); | |
3579 | } | |
3580 | } | |
3581 | } | |
3582 | break; | |
3583 | case SIMD_CLONE_ARG_TYPE_UNIFORM: | |
3584 | vargs.safe_push (op); | |
3585 | break; | |
3586 | case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: | |
7adb26f2 | 3587 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: |
0136f8f0 AH |
3588 | if (j == 0) |
3589 | { | |
3590 | gimple_seq stmts; | |
3591 | arginfo[i].op | |
3592 | = force_gimple_operand (arginfo[i].op, &stmts, true, | |
3593 | NULL_TREE); | |
3594 | if (stmts != NULL) | |
3595 | { | |
3596 | basic_block new_bb; | |
3597 | edge pe = loop_preheader_edge (loop); | |
3598 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
3599 | gcc_assert (!new_bb); | |
3600 | } | |
17b658af JJ |
3601 | if (arginfo[i].simd_lane_linear) |
3602 | { | |
3603 | vargs.safe_push (arginfo[i].op); | |
3604 | break; | |
3605 | } | |
b731b390 | 3606 | tree phi_res = copy_ssa_name (op); |
538dd0b7 | 3607 | gphi *new_phi = create_phi_node (phi_res, loop->header); |
0136f8f0 | 3608 | set_vinfo_for_stmt (new_phi, |
310213d4 | 3609 | new_stmt_vec_info (new_phi, loop_vinfo)); |
0136f8f0 AH |
3610 | add_phi_arg (new_phi, arginfo[i].op, |
3611 | loop_preheader_edge (loop), UNKNOWN_LOCATION); | |
3612 | enum tree_code code | |
3613 | = POINTER_TYPE_P (TREE_TYPE (op)) | |
3614 | ? POINTER_PLUS_EXPR : PLUS_EXPR; | |
3615 | tree type = POINTER_TYPE_P (TREE_TYPE (op)) | |
3616 | ? sizetype : TREE_TYPE (op); | |
807e902e KZ |
3617 | widest_int cst |
3618 | = wi::mul (bestn->simdclone->args[i].linear_step, | |
3619 | ncopies * nunits); | |
3620 | tree tcst = wide_int_to_tree (type, cst); | |
b731b390 | 3621 | tree phi_arg = copy_ssa_name (op); |
0d0e4a03 JJ |
3622 | new_stmt |
3623 | = gimple_build_assign (phi_arg, code, phi_res, tcst); | |
0136f8f0 AH |
3624 | gimple_stmt_iterator si = gsi_after_labels (loop->header); |
3625 | gsi_insert_after (&si, new_stmt, GSI_NEW_STMT); | |
3626 | set_vinfo_for_stmt (new_stmt, | |
310213d4 | 3627 | new_stmt_vec_info (new_stmt, loop_vinfo)); |
0136f8f0 AH |
3628 | add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop), |
3629 | UNKNOWN_LOCATION); | |
3630 | arginfo[i].op = phi_res; | |
3631 | vargs.safe_push (phi_res); | |
3632 | } | |
3633 | else | |
3634 | { | |
3635 | enum tree_code code | |
3636 | = POINTER_TYPE_P (TREE_TYPE (op)) | |
3637 | ? POINTER_PLUS_EXPR : PLUS_EXPR; | |
3638 | tree type = POINTER_TYPE_P (TREE_TYPE (op)) | |
3639 | ? sizetype : TREE_TYPE (op); | |
807e902e KZ |
3640 | widest_int cst |
3641 | = wi::mul (bestn->simdclone->args[i].linear_step, | |
3642 | j * nunits); | |
3643 | tree tcst = wide_int_to_tree (type, cst); | |
b731b390 | 3644 | new_temp = make_ssa_name (TREE_TYPE (op)); |
0d0e4a03 JJ |
3645 | new_stmt = gimple_build_assign (new_temp, code, |
3646 | arginfo[i].op, tcst); | |
0136f8f0 AH |
3647 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3648 | vargs.safe_push (new_temp); | |
3649 | } | |
3650 | break; | |
7adb26f2 JJ |
3651 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: |
3652 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: | |
0136f8f0 | 3653 | case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: |
e01d41e5 JJ |
3654 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: |
3655 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: | |
3656 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: | |
0136f8f0 AH |
3657 | default: |
3658 | gcc_unreachable (); | |
3659 | } | |
3660 | } | |
3661 | ||
3662 | new_stmt = gimple_build_call_vec (fndecl, vargs); | |
3663 | if (vec_dest) | |
3664 | { | |
3665 | gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits); | |
3666 | if (ratype) | |
b731b390 | 3667 | new_temp = create_tmp_var (ratype); |
0136f8f0 AH |
3668 | else if (TYPE_VECTOR_SUBPARTS (vectype) |
3669 | == TYPE_VECTOR_SUBPARTS (rtype)) | |
3670 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
3671 | else | |
3672 | new_temp = make_ssa_name (rtype, new_stmt); | |
3673 | gimple_call_set_lhs (new_stmt, new_temp); | |
3674 | } | |
3675 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3676 | ||
3677 | if (vec_dest) | |
3678 | { | |
3679 | if (TYPE_VECTOR_SUBPARTS (vectype) < nunits) | |
3680 | { | |
3681 | unsigned int k, l; | |
3682 | unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype)); | |
3683 | k = nunits / TYPE_VECTOR_SUBPARTS (vectype); | |
3684 | gcc_assert ((k & (k - 1)) == 0); | |
3685 | for (l = 0; l < k; l++) | |
3686 | { | |
3687 | tree t; | |
3688 | if (ratype) | |
3689 | { | |
3690 | t = build_fold_addr_expr (new_temp); | |
3691 | t = build2 (MEM_REF, vectype, t, | |
3692 | build_int_cst (TREE_TYPE (t), | |
3693 | l * prec / BITS_PER_UNIT)); | |
3694 | } | |
3695 | else | |
3696 | t = build3 (BIT_FIELD_REF, vectype, new_temp, | |
3697 | size_int (prec), bitsize_int (l * prec)); | |
3698 | new_stmt | |
b731b390 | 3699 | = gimple_build_assign (make_ssa_name (vectype), t); |
0136f8f0 AH |
3700 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3701 | if (j == 0 && l == 0) | |
3702 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
3703 | else | |
3704 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
3705 | ||
3706 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
3707 | } | |
3708 | ||
3709 | if (ratype) | |
3710 | { | |
3711 | tree clobber = build_constructor (ratype, NULL); | |
3712 | TREE_THIS_VOLATILE (clobber) = 1; | |
3713 | new_stmt = gimple_build_assign (new_temp, clobber); | |
3714 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3715 | } | |
3716 | continue; | |
3717 | } | |
3718 | else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits) | |
3719 | { | |
3720 | unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype) | |
3721 | / TYPE_VECTOR_SUBPARTS (rtype)); | |
3722 | gcc_assert ((k & (k - 1)) == 0); | |
3723 | if ((j & (k - 1)) == 0) | |
3724 | vec_alloc (ret_ctor_elts, k); | |
3725 | if (ratype) | |
3726 | { | |
3727 | unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype); | |
3728 | for (m = 0; m < o; m++) | |
3729 | { | |
3730 | tree tem = build4 (ARRAY_REF, rtype, new_temp, | |
3731 | size_int (m), NULL_TREE, NULL_TREE); | |
3732 | new_stmt | |
b731b390 | 3733 | = gimple_build_assign (make_ssa_name (rtype), tem); |
0136f8f0 AH |
3734 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3735 | CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, | |
3736 | gimple_assign_lhs (new_stmt)); | |
3737 | } | |
3738 | tree clobber = build_constructor (ratype, NULL); | |
3739 | TREE_THIS_VOLATILE (clobber) = 1; | |
3740 | new_stmt = gimple_build_assign (new_temp, clobber); | |
3741 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3742 | } | |
3743 | else | |
3744 | CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp); | |
3745 | if ((j & (k - 1)) != k - 1) | |
3746 | continue; | |
3747 | vec_oprnd0 = build_constructor (vectype, ret_ctor_elts); | |
3748 | new_stmt | |
b731b390 | 3749 | = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0); |
0136f8f0 AH |
3750 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3751 | ||
3752 | if ((unsigned) j == k - 1) | |
3753 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
3754 | else | |
3755 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
3756 | ||
3757 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
3758 | continue; | |
3759 | } | |
3760 | else if (ratype) | |
3761 | { | |
3762 | tree t = build_fold_addr_expr (new_temp); | |
3763 | t = build2 (MEM_REF, vectype, t, | |
3764 | build_int_cst (TREE_TYPE (t), 0)); | |
3765 | new_stmt | |
b731b390 | 3766 | = gimple_build_assign (make_ssa_name (vec_dest), t); |
0136f8f0 AH |
3767 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3768 | tree clobber = build_constructor (ratype, NULL); | |
3769 | TREE_THIS_VOLATILE (clobber) = 1; | |
3770 | vect_finish_stmt_generation (stmt, | |
3771 | gimple_build_assign (new_temp, | |
3772 | clobber), gsi); | |
3773 | } | |
3774 | } | |
3775 | ||
3776 | if (j == 0) | |
3777 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
3778 | else | |
3779 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
3780 | ||
3781 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
3782 | } | |
3783 | ||
3784 | vargs.release (); | |
3785 | ||
3786 | /* The call in STMT might prevent it from being removed in dce. | |
3787 | We however cannot remove it here, due to the way the ssa name | |
3788 | it defines is mapped to the new definition. So just replace | |
3789 | rhs of the statement with something harmless. */ | |
3790 | ||
3791 | if (slp_node) | |
3792 | return true; | |
3793 | ||
3794 | if (scalar_dest) | |
3795 | { | |
3796 | type = TREE_TYPE (scalar_dest); | |
3797 | if (is_pattern_stmt_p (stmt_info)) | |
3798 | lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info)); | |
3799 | else | |
3800 | lhs = gimple_call_lhs (stmt); | |
3801 | new_stmt = gimple_build_assign (lhs, build_zero_cst (type)); | |
3802 | } | |
3803 | else | |
3804 | new_stmt = gimple_build_nop (); | |
3805 | set_vinfo_for_stmt (new_stmt, stmt_info); | |
3806 | set_vinfo_for_stmt (stmt, NULL); | |
3807 | STMT_VINFO_STMT (stmt_info) = new_stmt; | |
2865f32a | 3808 | gsi_replace (gsi, new_stmt, true); |
0136f8f0 AH |
3809 | unlink_stmt_vdef (stmt); |
3810 | ||
3811 | return true; | |
3812 | } | |
3813 | ||
3814 | ||
ebfd146a IR |
3815 | /* Function vect_gen_widened_results_half |
3816 | ||
3817 | Create a vector stmt whose code, type, number of arguments, and result | |
b8698a0f | 3818 | variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are |
ff802fa1 | 3819 | VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI. |
ebfd146a IR |
3820 | In the case that CODE is a CALL_EXPR, this means that a call to DECL |
3821 | needs to be created (DECL is a function-decl of a target-builtin). | |
3822 | STMT is the original scalar stmt that we are vectorizing. */ | |
3823 | ||
355fe088 | 3824 | static gimple * |
ebfd146a IR |
3825 | vect_gen_widened_results_half (enum tree_code code, |
3826 | tree decl, | |
3827 | tree vec_oprnd0, tree vec_oprnd1, int op_type, | |
3828 | tree vec_dest, gimple_stmt_iterator *gsi, | |
355fe088 | 3829 | gimple *stmt) |
b8698a0f | 3830 | { |
355fe088 | 3831 | gimple *new_stmt; |
b8698a0f L |
3832 | tree new_temp; |
3833 | ||
3834 | /* Generate half of the widened result: */ | |
3835 | if (code == CALL_EXPR) | |
3836 | { | |
3837 | /* Target specific support */ | |
ebfd146a IR |
3838 | if (op_type == binary_op) |
3839 | new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1); | |
3840 | else | |
3841 | new_stmt = gimple_build_call (decl, 1, vec_oprnd0); | |
3842 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
3843 | gimple_call_set_lhs (new_stmt, new_temp); | |
b8698a0f L |
3844 | } |
3845 | else | |
ebfd146a | 3846 | { |
b8698a0f L |
3847 | /* Generic support */ |
3848 | gcc_assert (op_type == TREE_CODE_LENGTH (code)); | |
ebfd146a IR |
3849 | if (op_type != binary_op) |
3850 | vec_oprnd1 = NULL; | |
0d0e4a03 | 3851 | new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1); |
ebfd146a IR |
3852 | new_temp = make_ssa_name (vec_dest, new_stmt); |
3853 | gimple_assign_set_lhs (new_stmt, new_temp); | |
b8698a0f | 3854 | } |
ebfd146a IR |
3855 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3856 | ||
ebfd146a IR |
3857 | return new_stmt; |
3858 | } | |
3859 | ||
4a00c761 JJ |
3860 | |
3861 | /* Get vectorized definitions for loop-based vectorization. For the first | |
3862 | operand we call vect_get_vec_def_for_operand() (with OPRND containing | |
3863 | scalar operand), and for the rest we get a copy with | |
3864 | vect_get_vec_def_for_stmt_copy() using the previous vector definition | |
3865 | (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details. | |
3866 | The vectors are collected into VEC_OPRNDS. */ | |
3867 | ||
3868 | static void | |
355fe088 | 3869 | vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt, |
9771b263 | 3870 | vec<tree> *vec_oprnds, int multi_step_cvt) |
4a00c761 JJ |
3871 | { |
3872 | tree vec_oprnd; | |
3873 | ||
3874 | /* Get first vector operand. */ | |
3875 | /* All the vector operands except the very first one (that is scalar oprnd) | |
3876 | are stmt copies. */ | |
3877 | if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE) | |
81c40241 | 3878 | vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt); |
4a00c761 JJ |
3879 | else |
3880 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd); | |
3881 | ||
9771b263 | 3882 | vec_oprnds->quick_push (vec_oprnd); |
4a00c761 JJ |
3883 | |
3884 | /* Get second vector operand. */ | |
3885 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd); | |
9771b263 | 3886 | vec_oprnds->quick_push (vec_oprnd); |
4a00c761 JJ |
3887 | |
3888 | *oprnd = vec_oprnd; | |
3889 | ||
3890 | /* For conversion in multiple steps, continue to get operands | |
3891 | recursively. */ | |
3892 | if (multi_step_cvt) | |
3893 | vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1); | |
3894 | } | |
3895 | ||
3896 | ||
3897 | /* Create vectorized demotion statements for vector operands from VEC_OPRNDS. | |
3898 | For multi-step conversions store the resulting vectors and call the function | |
3899 | recursively. */ | |
3900 | ||
3901 | static void | |
9771b263 | 3902 | vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds, |
355fe088 | 3903 | int multi_step_cvt, gimple *stmt, |
9771b263 | 3904 | vec<tree> vec_dsts, |
4a00c761 JJ |
3905 | gimple_stmt_iterator *gsi, |
3906 | slp_tree slp_node, enum tree_code code, | |
3907 | stmt_vec_info *prev_stmt_info) | |
3908 | { | |
3909 | unsigned int i; | |
3910 | tree vop0, vop1, new_tmp, vec_dest; | |
355fe088 | 3911 | gimple *new_stmt; |
4a00c761 JJ |
3912 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
3913 | ||
9771b263 | 3914 | vec_dest = vec_dsts.pop (); |
4a00c761 | 3915 | |
9771b263 | 3916 | for (i = 0; i < vec_oprnds->length (); i += 2) |
4a00c761 JJ |
3917 | { |
3918 | /* Create demotion operation. */ | |
9771b263 DN |
3919 | vop0 = (*vec_oprnds)[i]; |
3920 | vop1 = (*vec_oprnds)[i + 1]; | |
0d0e4a03 | 3921 | new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); |
4a00c761 JJ |
3922 | new_tmp = make_ssa_name (vec_dest, new_stmt); |
3923 | gimple_assign_set_lhs (new_stmt, new_tmp); | |
3924 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3925 | ||
3926 | if (multi_step_cvt) | |
3927 | /* Store the resulting vector for next recursive call. */ | |
9771b263 | 3928 | (*vec_oprnds)[i/2] = new_tmp; |
4a00c761 JJ |
3929 | else |
3930 | { | |
3931 | /* This is the last step of the conversion sequence. Store the | |
3932 | vectors in SLP_NODE or in vector info of the scalar statement | |
3933 | (or in STMT_VINFO_RELATED_STMT chain). */ | |
3934 | if (slp_node) | |
9771b263 | 3935 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
4a00c761 | 3936 | else |
c689ce1e RB |
3937 | { |
3938 | if (!*prev_stmt_info) | |
3939 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; | |
3940 | else | |
3941 | STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt; | |
4a00c761 | 3942 | |
c689ce1e RB |
3943 | *prev_stmt_info = vinfo_for_stmt (new_stmt); |
3944 | } | |
4a00c761 JJ |
3945 | } |
3946 | } | |
3947 | ||
3948 | /* For multi-step demotion operations we first generate demotion operations | |
3949 | from the source type to the intermediate types, and then combine the | |
3950 | results (stored in VEC_OPRNDS) in demotion operation to the destination | |
3951 | type. */ | |
3952 | if (multi_step_cvt) | |
3953 | { | |
3954 | /* At each level of recursion we have half of the operands we had at the | |
3955 | previous level. */ | |
9771b263 | 3956 | vec_oprnds->truncate ((i+1)/2); |
4a00c761 JJ |
3957 | vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1, |
3958 | stmt, vec_dsts, gsi, slp_node, | |
3959 | VEC_PACK_TRUNC_EXPR, | |
3960 | prev_stmt_info); | |
3961 | } | |
3962 | ||
9771b263 | 3963 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
3964 | } |
3965 | ||
3966 | ||
3967 | /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0 | |
3968 | and VEC_OPRNDS1 (for binary operations). For multi-step conversions store | |
3969 | the resulting vectors and call the function recursively. */ | |
3970 | ||
3971 | static void | |
9771b263 DN |
3972 | vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0, |
3973 | vec<tree> *vec_oprnds1, | |
355fe088 | 3974 | gimple *stmt, tree vec_dest, |
4a00c761 JJ |
3975 | gimple_stmt_iterator *gsi, |
3976 | enum tree_code code1, | |
3977 | enum tree_code code2, tree decl1, | |
3978 | tree decl2, int op_type) | |
3979 | { | |
3980 | int i; | |
3981 | tree vop0, vop1, new_tmp1, new_tmp2; | |
355fe088 | 3982 | gimple *new_stmt1, *new_stmt2; |
6e1aa848 | 3983 | vec<tree> vec_tmp = vNULL; |
4a00c761 | 3984 | |
9771b263 DN |
3985 | vec_tmp.create (vec_oprnds0->length () * 2); |
3986 | FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0) | |
4a00c761 JJ |
3987 | { |
3988 | if (op_type == binary_op) | |
9771b263 | 3989 | vop1 = (*vec_oprnds1)[i]; |
4a00c761 JJ |
3990 | else |
3991 | vop1 = NULL_TREE; | |
3992 | ||
3993 | /* Generate the two halves of promotion operation. */ | |
3994 | new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1, | |
3995 | op_type, vec_dest, gsi, stmt); | |
3996 | new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1, | |
3997 | op_type, vec_dest, gsi, stmt); | |
3998 | if (is_gimple_call (new_stmt1)) | |
3999 | { | |
4000 | new_tmp1 = gimple_call_lhs (new_stmt1); | |
4001 | new_tmp2 = gimple_call_lhs (new_stmt2); | |
4002 | } | |
4003 | else | |
4004 | { | |
4005 | new_tmp1 = gimple_assign_lhs (new_stmt1); | |
4006 | new_tmp2 = gimple_assign_lhs (new_stmt2); | |
4007 | } | |
4008 | ||
4009 | /* Store the results for the next step. */ | |
9771b263 DN |
4010 | vec_tmp.quick_push (new_tmp1); |
4011 | vec_tmp.quick_push (new_tmp2); | |
4a00c761 JJ |
4012 | } |
4013 | ||
689eaba3 | 4014 | vec_oprnds0->release (); |
4a00c761 JJ |
4015 | *vec_oprnds0 = vec_tmp; |
4016 | } | |
4017 | ||
4018 | ||
b8698a0f L |
4019 | /* Check if STMT performs a conversion operation, that can be vectorized. |
4020 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
4a00c761 | 4021 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. |
ebfd146a IR |
4022 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ |
4023 | ||
4024 | static bool | |
355fe088 TS |
4025 | vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, |
4026 | gimple **vec_stmt, slp_tree slp_node) | |
ebfd146a IR |
4027 | { |
4028 | tree vec_dest; | |
4029 | tree scalar_dest; | |
4a00c761 | 4030 | tree op0, op1 = NULL_TREE; |
ebfd146a IR |
4031 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; |
4032 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
4033 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4034 | enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; | |
4a00c761 | 4035 | enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK; |
ebfd146a IR |
4036 | tree decl1 = NULL_TREE, decl2 = NULL_TREE; |
4037 | tree new_temp; | |
355fe088 | 4038 | gimple *def_stmt; |
ebfd146a | 4039 | enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
4fc5ebf1 | 4040 | int ndts = 2; |
355fe088 | 4041 | gimple *new_stmt = NULL; |
ebfd146a IR |
4042 | stmt_vec_info prev_stmt_info; |
4043 | int nunits_in; | |
4044 | int nunits_out; | |
4045 | tree vectype_out, vectype_in; | |
4a00c761 JJ |
4046 | int ncopies, i, j; |
4047 | tree lhs_type, rhs_type; | |
ebfd146a | 4048 | enum { NARROW, NONE, WIDEN } modifier; |
6e1aa848 DN |
4049 | vec<tree> vec_oprnds0 = vNULL; |
4050 | vec<tree> vec_oprnds1 = vNULL; | |
ebfd146a | 4051 | tree vop0; |
4a00c761 | 4052 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 4053 | vec_info *vinfo = stmt_info->vinfo; |
4a00c761 | 4054 | int multi_step_cvt = 0; |
6e1aa848 | 4055 | vec<tree> interm_types = vNULL; |
4a00c761 JJ |
4056 | tree last_oprnd, intermediate_type, cvt_type = NULL_TREE; |
4057 | int op_type; | |
ef4bddc2 | 4058 | machine_mode rhs_mode; |
4a00c761 | 4059 | unsigned short fltsz; |
ebfd146a IR |
4060 | |
4061 | /* Is STMT a vectorizable conversion? */ | |
4062 | ||
4a00c761 | 4063 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
4064 | return false; |
4065 | ||
66c16fd9 RB |
4066 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
4067 | && ! vec_stmt) | |
ebfd146a IR |
4068 | return false; |
4069 | ||
4070 | if (!is_gimple_assign (stmt)) | |
4071 | return false; | |
4072 | ||
4073 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
4074 | return false; | |
4075 | ||
4076 | code = gimple_assign_rhs_code (stmt); | |
4a00c761 JJ |
4077 | if (!CONVERT_EXPR_CODE_P (code) |
4078 | && code != FIX_TRUNC_EXPR | |
4079 | && code != FLOAT_EXPR | |
4080 | && code != WIDEN_MULT_EXPR | |
4081 | && code != WIDEN_LSHIFT_EXPR) | |
ebfd146a IR |
4082 | return false; |
4083 | ||
4a00c761 JJ |
4084 | op_type = TREE_CODE_LENGTH (code); |
4085 | ||
ebfd146a | 4086 | /* Check types of lhs and rhs. */ |
b690cc0f | 4087 | scalar_dest = gimple_assign_lhs (stmt); |
4a00c761 | 4088 | lhs_type = TREE_TYPE (scalar_dest); |
b690cc0f RG |
4089 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
4090 | ||
ebfd146a IR |
4091 | op0 = gimple_assign_rhs1 (stmt); |
4092 | rhs_type = TREE_TYPE (op0); | |
4a00c761 JJ |
4093 | |
4094 | if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) | |
4095 | && !((INTEGRAL_TYPE_P (lhs_type) | |
4096 | && INTEGRAL_TYPE_P (rhs_type)) | |
4097 | || (SCALAR_FLOAT_TYPE_P (lhs_type) | |
4098 | && SCALAR_FLOAT_TYPE_P (rhs_type)))) | |
4099 | return false; | |
4100 | ||
e6f5c25d IE |
4101 | if (!VECTOR_BOOLEAN_TYPE_P (vectype_out) |
4102 | && ((INTEGRAL_TYPE_P (lhs_type) | |
4103 | && (TYPE_PRECISION (lhs_type) | |
4104 | != GET_MODE_PRECISION (TYPE_MODE (lhs_type)))) | |
4105 | || (INTEGRAL_TYPE_P (rhs_type) | |
4106 | && (TYPE_PRECISION (rhs_type) | |
4107 | != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))) | |
4a00c761 | 4108 | { |
73fbfcad | 4109 | if (dump_enabled_p ()) |
78c60e3d | 4110 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 TJ |
4111 | "type conversion to/from bit-precision unsupported." |
4112 | "\n"); | |
4a00c761 JJ |
4113 | return false; |
4114 | } | |
4115 | ||
b690cc0f | 4116 | /* Check the operands of the operation. */ |
81c40241 | 4117 | if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in)) |
b690cc0f | 4118 | { |
73fbfcad | 4119 | if (dump_enabled_p ()) |
78c60e3d | 4120 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4121 | "use not simple.\n"); |
b690cc0f RG |
4122 | return false; |
4123 | } | |
4a00c761 JJ |
4124 | if (op_type == binary_op) |
4125 | { | |
4126 | bool ok; | |
4127 | ||
4128 | op1 = gimple_assign_rhs2 (stmt); | |
4129 | gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR); | |
4130 | /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of | |
4131 | OP1. */ | |
4132 | if (CONSTANT_CLASS_P (op0)) | |
81c40241 | 4133 | ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in); |
4a00c761 | 4134 | else |
81c40241 | 4135 | ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]); |
4a00c761 JJ |
4136 | |
4137 | if (!ok) | |
4138 | { | |
73fbfcad | 4139 | if (dump_enabled_p ()) |
78c60e3d | 4140 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4141 | "use not simple.\n"); |
4a00c761 JJ |
4142 | return false; |
4143 | } | |
4144 | } | |
4145 | ||
b690cc0f RG |
4146 | /* If op0 is an external or constant defs use a vector type of |
4147 | the same size as the output vector type. */ | |
ebfd146a | 4148 | if (!vectype_in) |
b690cc0f | 4149 | vectype_in = get_same_sized_vectype (rhs_type, vectype_out); |
7d8930a0 IR |
4150 | if (vec_stmt) |
4151 | gcc_assert (vectype_in); | |
4152 | if (!vectype_in) | |
4153 | { | |
73fbfcad | 4154 | if (dump_enabled_p ()) |
4a00c761 | 4155 | { |
78c60e3d SS |
4156 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
4157 | "no vectype for scalar type "); | |
4158 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
e645e942 | 4159 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
4a00c761 | 4160 | } |
7d8930a0 IR |
4161 | |
4162 | return false; | |
4163 | } | |
ebfd146a | 4164 | |
e6f5c25d IE |
4165 | if (VECTOR_BOOLEAN_TYPE_P (vectype_out) |
4166 | && !VECTOR_BOOLEAN_TYPE_P (vectype_in)) | |
4167 | { | |
4168 | if (dump_enabled_p ()) | |
4169 | { | |
4170 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
4171 | "can't convert between boolean and non " | |
4172 | "boolean vectors"); | |
4173 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
4174 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
4175 | } | |
4176 | ||
4177 | return false; | |
4178 | } | |
4179 | ||
b690cc0f RG |
4180 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); |
4181 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
4a00c761 | 4182 | if (nunits_in < nunits_out) |
ebfd146a IR |
4183 | modifier = NARROW; |
4184 | else if (nunits_out == nunits_in) | |
4185 | modifier = NONE; | |
ebfd146a | 4186 | else |
4a00c761 | 4187 | modifier = WIDEN; |
ebfd146a | 4188 | |
ff802fa1 IR |
4189 | /* Multiple types in SLP are handled by creating the appropriate number of |
4190 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
4191 | case of SLP. */ | |
fce57248 | 4192 | if (slp_node) |
ebfd146a | 4193 | ncopies = 1; |
4a00c761 JJ |
4194 | else if (modifier == NARROW) |
4195 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; | |
4196 | else | |
4197 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; | |
b8698a0f | 4198 | |
ebfd146a IR |
4199 | /* Sanity check: make sure that at least one copy of the vectorized stmt |
4200 | needs to be generated. */ | |
4201 | gcc_assert (ncopies >= 1); | |
4202 | ||
ebfd146a | 4203 | /* Supportable by target? */ |
4a00c761 | 4204 | switch (modifier) |
ebfd146a | 4205 | { |
4a00c761 JJ |
4206 | case NONE: |
4207 | if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) | |
4208 | return false; | |
4209 | if (supportable_convert_operation (code, vectype_out, vectype_in, | |
4210 | &decl1, &code1)) | |
4211 | break; | |
4212 | /* FALLTHRU */ | |
4213 | unsupported: | |
73fbfcad | 4214 | if (dump_enabled_p ()) |
78c60e3d | 4215 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4216 | "conversion not supported by target.\n"); |
ebfd146a | 4217 | return false; |
ebfd146a | 4218 | |
4a00c761 JJ |
4219 | case WIDEN: |
4220 | if (supportable_widening_operation (code, stmt, vectype_out, vectype_in, | |
a86ec597 RH |
4221 | &code1, &code2, &multi_step_cvt, |
4222 | &interm_types)) | |
4a00c761 JJ |
4223 | { |
4224 | /* Binary widening operation can only be supported directly by the | |
4225 | architecture. */ | |
4226 | gcc_assert (!(multi_step_cvt && op_type == binary_op)); | |
4227 | break; | |
4228 | } | |
4229 | ||
4230 | if (code != FLOAT_EXPR | |
4231 | || (GET_MODE_SIZE (TYPE_MODE (lhs_type)) | |
4232 | <= GET_MODE_SIZE (TYPE_MODE (rhs_type)))) | |
4233 | goto unsupported; | |
4234 | ||
4235 | rhs_mode = TYPE_MODE (rhs_type); | |
4236 | fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type)); | |
4237 | for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type)); | |
4238 | rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz; | |
4239 | rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode)) | |
4240 | { | |
4241 | cvt_type | |
4242 | = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0); | |
4243 | cvt_type = get_same_sized_vectype (cvt_type, vectype_in); | |
4244 | if (cvt_type == NULL_TREE) | |
4245 | goto unsupported; | |
4246 | ||
4247 | if (GET_MODE_SIZE (rhs_mode) == fltsz) | |
4248 | { | |
4249 | if (!supportable_convert_operation (code, vectype_out, | |
4250 | cvt_type, &decl1, &codecvt1)) | |
4251 | goto unsupported; | |
4252 | } | |
4253 | else if (!supportable_widening_operation (code, stmt, vectype_out, | |
a86ec597 RH |
4254 | cvt_type, &codecvt1, |
4255 | &codecvt2, &multi_step_cvt, | |
4a00c761 JJ |
4256 | &interm_types)) |
4257 | continue; | |
4258 | else | |
4259 | gcc_assert (multi_step_cvt == 0); | |
4260 | ||
4261 | if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type, | |
a86ec597 RH |
4262 | vectype_in, &code1, &code2, |
4263 | &multi_step_cvt, &interm_types)) | |
4a00c761 JJ |
4264 | break; |
4265 | } | |
4266 | ||
4267 | if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz) | |
4268 | goto unsupported; | |
4269 | ||
4270 | if (GET_MODE_SIZE (rhs_mode) == fltsz) | |
4271 | codecvt2 = ERROR_MARK; | |
4272 | else | |
4273 | { | |
4274 | multi_step_cvt++; | |
9771b263 | 4275 | interm_types.safe_push (cvt_type); |
4a00c761 JJ |
4276 | cvt_type = NULL_TREE; |
4277 | } | |
4278 | break; | |
4279 | ||
4280 | case NARROW: | |
4281 | gcc_assert (op_type == unary_op); | |
4282 | if (supportable_narrowing_operation (code, vectype_out, vectype_in, | |
4283 | &code1, &multi_step_cvt, | |
4284 | &interm_types)) | |
4285 | break; | |
4286 | ||
4287 | if (code != FIX_TRUNC_EXPR | |
4288 | || (GET_MODE_SIZE (TYPE_MODE (lhs_type)) | |
4289 | >= GET_MODE_SIZE (TYPE_MODE (rhs_type)))) | |
4290 | goto unsupported; | |
4291 | ||
4292 | rhs_mode = TYPE_MODE (rhs_type); | |
4293 | cvt_type | |
4294 | = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0); | |
4295 | cvt_type = get_same_sized_vectype (cvt_type, vectype_in); | |
4296 | if (cvt_type == NULL_TREE) | |
4297 | goto unsupported; | |
4298 | if (!supportable_convert_operation (code, cvt_type, vectype_in, | |
4299 | &decl1, &codecvt1)) | |
4300 | goto unsupported; | |
4301 | if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type, | |
4302 | &code1, &multi_step_cvt, | |
4303 | &interm_types)) | |
4304 | break; | |
4305 | goto unsupported; | |
4306 | ||
4307 | default: | |
4308 | gcc_unreachable (); | |
ebfd146a IR |
4309 | } |
4310 | ||
4311 | if (!vec_stmt) /* transformation not required. */ | |
4312 | { | |
73fbfcad | 4313 | if (dump_enabled_p ()) |
78c60e3d | 4314 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4315 | "=== vectorizable_conversion ===\n"); |
4a00c761 | 4316 | if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR) |
8bd37302 BS |
4317 | { |
4318 | STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type; | |
4fc5ebf1 | 4319 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
8bd37302 | 4320 | } |
4a00c761 JJ |
4321 | else if (modifier == NARROW) |
4322 | { | |
4323 | STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type; | |
8bd37302 | 4324 | vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt); |
4a00c761 JJ |
4325 | } |
4326 | else | |
4327 | { | |
4328 | STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type; | |
8bd37302 | 4329 | vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt); |
4a00c761 | 4330 | } |
9771b263 | 4331 | interm_types.release (); |
ebfd146a IR |
4332 | return true; |
4333 | } | |
4334 | ||
67b8dbac | 4335 | /* Transform. */ |
73fbfcad | 4336 | if (dump_enabled_p ()) |
78c60e3d | 4337 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4338 | "transform conversion. ncopies = %d.\n", ncopies); |
ebfd146a | 4339 | |
4a00c761 JJ |
4340 | if (op_type == binary_op) |
4341 | { | |
4342 | if (CONSTANT_CLASS_P (op0)) | |
4343 | op0 = fold_convert (TREE_TYPE (op1), op0); | |
4344 | else if (CONSTANT_CLASS_P (op1)) | |
4345 | op1 = fold_convert (TREE_TYPE (op0), op1); | |
4346 | } | |
4347 | ||
4348 | /* In case of multi-step conversion, we first generate conversion operations | |
4349 | to the intermediate types, and then from that types to the final one. | |
4350 | We create vector destinations for the intermediate type (TYPES) received | |
4351 | from supportable_*_operation, and store them in the correct order | |
4352 | for future use in vect_create_vectorized_*_stmts (). */ | |
8c681247 | 4353 | auto_vec<tree> vec_dsts (multi_step_cvt + 1); |
82294ec1 JJ |
4354 | vec_dest = vect_create_destination_var (scalar_dest, |
4355 | (cvt_type && modifier == WIDEN) | |
4356 | ? cvt_type : vectype_out); | |
9771b263 | 4357 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4358 | |
4359 | if (multi_step_cvt) | |
4360 | { | |
9771b263 DN |
4361 | for (i = interm_types.length () - 1; |
4362 | interm_types.iterate (i, &intermediate_type); i--) | |
4a00c761 JJ |
4363 | { |
4364 | vec_dest = vect_create_destination_var (scalar_dest, | |
4365 | intermediate_type); | |
9771b263 | 4366 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4367 | } |
4368 | } | |
ebfd146a | 4369 | |
4a00c761 | 4370 | if (cvt_type) |
82294ec1 JJ |
4371 | vec_dest = vect_create_destination_var (scalar_dest, |
4372 | modifier == WIDEN | |
4373 | ? vectype_out : cvt_type); | |
4a00c761 JJ |
4374 | |
4375 | if (!slp_node) | |
4376 | { | |
30862efc | 4377 | if (modifier == WIDEN) |
4a00c761 | 4378 | { |
c3284718 | 4379 | vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1); |
4a00c761 | 4380 | if (op_type == binary_op) |
9771b263 | 4381 | vec_oprnds1.create (1); |
4a00c761 | 4382 | } |
30862efc | 4383 | else if (modifier == NARROW) |
9771b263 DN |
4384 | vec_oprnds0.create ( |
4385 | 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1)); | |
4a00c761 JJ |
4386 | } |
4387 | else if (code == WIDEN_LSHIFT_EXPR) | |
9771b263 | 4388 | vec_oprnds1.create (slp_node->vec_stmts_size); |
ebfd146a | 4389 | |
4a00c761 | 4390 | last_oprnd = op0; |
ebfd146a IR |
4391 | prev_stmt_info = NULL; |
4392 | switch (modifier) | |
4393 | { | |
4394 | case NONE: | |
4395 | for (j = 0; j < ncopies; j++) | |
4396 | { | |
ebfd146a | 4397 | if (j == 0) |
306b0c92 | 4398 | vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node); |
ebfd146a IR |
4399 | else |
4400 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL); | |
4401 | ||
9771b263 | 4402 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 JJ |
4403 | { |
4404 | /* Arguments are ready, create the new vector stmt. */ | |
4405 | if (code1 == CALL_EXPR) | |
4406 | { | |
4407 | new_stmt = gimple_build_call (decl1, 1, vop0); | |
4408 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4409 | gimple_call_set_lhs (new_stmt, new_temp); | |
4410 | } | |
4411 | else | |
4412 | { | |
4413 | gcc_assert (TREE_CODE_LENGTH (code1) == unary_op); | |
0d0e4a03 | 4414 | new_stmt = gimple_build_assign (vec_dest, code1, vop0); |
4a00c761 JJ |
4415 | new_temp = make_ssa_name (vec_dest, new_stmt); |
4416 | gimple_assign_set_lhs (new_stmt, new_temp); | |
4417 | } | |
4418 | ||
4419 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
4420 | if (slp_node) | |
9771b263 | 4421 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
225ce44b RB |
4422 | else |
4423 | { | |
4424 | if (!prev_stmt_info) | |
4425 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
4426 | else | |
4427 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
4428 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
4429 | } | |
4a00c761 | 4430 | } |
ebfd146a IR |
4431 | } |
4432 | break; | |
4433 | ||
4434 | case WIDEN: | |
4435 | /* In case the vectorization factor (VF) is bigger than the number | |
4436 | of elements that we can fit in a vectype (nunits), we have to | |
4437 | generate more than one vector stmt - i.e - we need to "unroll" | |
4438 | the vector stmt by a factor VF/nunits. */ | |
4439 | for (j = 0; j < ncopies; j++) | |
4440 | { | |
4a00c761 | 4441 | /* Handle uses. */ |
ebfd146a | 4442 | if (j == 0) |
4a00c761 JJ |
4443 | { |
4444 | if (slp_node) | |
4445 | { | |
4446 | if (code == WIDEN_LSHIFT_EXPR) | |
4447 | { | |
4448 | unsigned int k; | |
ebfd146a | 4449 | |
4a00c761 JJ |
4450 | vec_oprnd1 = op1; |
4451 | /* Store vec_oprnd1 for every vector stmt to be created | |
4452 | for SLP_NODE. We check during the analysis that all | |
4453 | the shift arguments are the same. */ | |
4454 | for (k = 0; k < slp_node->vec_stmts_size - 1; k++) | |
9771b263 | 4455 | vec_oprnds1.quick_push (vec_oprnd1); |
4a00c761 JJ |
4456 | |
4457 | vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, | |
306b0c92 | 4458 | slp_node); |
4a00c761 JJ |
4459 | } |
4460 | else | |
4461 | vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, | |
306b0c92 | 4462 | &vec_oprnds1, slp_node); |
4a00c761 JJ |
4463 | } |
4464 | else | |
4465 | { | |
81c40241 | 4466 | vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt); |
9771b263 | 4467 | vec_oprnds0.quick_push (vec_oprnd0); |
4a00c761 JJ |
4468 | if (op_type == binary_op) |
4469 | { | |
4470 | if (code == WIDEN_LSHIFT_EXPR) | |
4471 | vec_oprnd1 = op1; | |
4472 | else | |
81c40241 | 4473 | vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt); |
9771b263 | 4474 | vec_oprnds1.quick_push (vec_oprnd1); |
4a00c761 JJ |
4475 | } |
4476 | } | |
4477 | } | |
ebfd146a | 4478 | else |
4a00c761 JJ |
4479 | { |
4480 | vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); | |
9771b263 DN |
4481 | vec_oprnds0.truncate (0); |
4482 | vec_oprnds0.quick_push (vec_oprnd0); | |
4a00c761 JJ |
4483 | if (op_type == binary_op) |
4484 | { | |
4485 | if (code == WIDEN_LSHIFT_EXPR) | |
4486 | vec_oprnd1 = op1; | |
4487 | else | |
4488 | vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], | |
4489 | vec_oprnd1); | |
9771b263 DN |
4490 | vec_oprnds1.truncate (0); |
4491 | vec_oprnds1.quick_push (vec_oprnd1); | |
4a00c761 JJ |
4492 | } |
4493 | } | |
ebfd146a | 4494 | |
4a00c761 JJ |
4495 | /* Arguments are ready. Create the new vector stmts. */ |
4496 | for (i = multi_step_cvt; i >= 0; i--) | |
4497 | { | |
9771b263 | 4498 | tree this_dest = vec_dsts[i]; |
4a00c761 JJ |
4499 | enum tree_code c1 = code1, c2 = code2; |
4500 | if (i == 0 && codecvt2 != ERROR_MARK) | |
4501 | { | |
4502 | c1 = codecvt1; | |
4503 | c2 = codecvt2; | |
4504 | } | |
4505 | vect_create_vectorized_promotion_stmts (&vec_oprnds0, | |
4506 | &vec_oprnds1, | |
4507 | stmt, this_dest, gsi, | |
4508 | c1, c2, decl1, decl2, | |
4509 | op_type); | |
4510 | } | |
4511 | ||
9771b263 | 4512 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 JJ |
4513 | { |
4514 | if (cvt_type) | |
4515 | { | |
4516 | if (codecvt1 == CALL_EXPR) | |
4517 | { | |
4518 | new_stmt = gimple_build_call (decl1, 1, vop0); | |
4519 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4520 | gimple_call_set_lhs (new_stmt, new_temp); | |
4521 | } | |
4522 | else | |
4523 | { | |
4524 | gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op); | |
b731b390 | 4525 | new_temp = make_ssa_name (vec_dest); |
0d0e4a03 JJ |
4526 | new_stmt = gimple_build_assign (new_temp, codecvt1, |
4527 | vop0); | |
4a00c761 JJ |
4528 | } |
4529 | ||
4530 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
4531 | } | |
4532 | else | |
4533 | new_stmt = SSA_NAME_DEF_STMT (vop0); | |
4534 | ||
4535 | if (slp_node) | |
9771b263 | 4536 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
4a00c761 | 4537 | else |
c689ce1e RB |
4538 | { |
4539 | if (!prev_stmt_info) | |
4540 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; | |
4541 | else | |
4542 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
4543 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
4544 | } | |
4a00c761 | 4545 | } |
ebfd146a | 4546 | } |
4a00c761 JJ |
4547 | |
4548 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
ebfd146a IR |
4549 | break; |
4550 | ||
4551 | case NARROW: | |
4552 | /* In case the vectorization factor (VF) is bigger than the number | |
4553 | of elements that we can fit in a vectype (nunits), we have to | |
4554 | generate more than one vector stmt - i.e - we need to "unroll" | |
4555 | the vector stmt by a factor VF/nunits. */ | |
4556 | for (j = 0; j < ncopies; j++) | |
4557 | { | |
4558 | /* Handle uses. */ | |
4a00c761 JJ |
4559 | if (slp_node) |
4560 | vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, | |
306b0c92 | 4561 | slp_node); |
ebfd146a IR |
4562 | else |
4563 | { | |
9771b263 | 4564 | vec_oprnds0.truncate (0); |
4a00c761 JJ |
4565 | vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0, |
4566 | vect_pow2 (multi_step_cvt) - 1); | |
ebfd146a IR |
4567 | } |
4568 | ||
4a00c761 JJ |
4569 | /* Arguments are ready. Create the new vector stmts. */ |
4570 | if (cvt_type) | |
9771b263 | 4571 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 JJ |
4572 | { |
4573 | if (codecvt1 == CALL_EXPR) | |
4574 | { | |
4575 | new_stmt = gimple_build_call (decl1, 1, vop0); | |
4576 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4577 | gimple_call_set_lhs (new_stmt, new_temp); | |
4578 | } | |
4579 | else | |
4580 | { | |
4581 | gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op); | |
b731b390 | 4582 | new_temp = make_ssa_name (vec_dest); |
0d0e4a03 JJ |
4583 | new_stmt = gimple_build_assign (new_temp, codecvt1, |
4584 | vop0); | |
4a00c761 | 4585 | } |
ebfd146a | 4586 | |
4a00c761 | 4587 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
9771b263 | 4588 | vec_oprnds0[i] = new_temp; |
4a00c761 | 4589 | } |
ebfd146a | 4590 | |
4a00c761 JJ |
4591 | vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt, |
4592 | stmt, vec_dsts, gsi, | |
4593 | slp_node, code1, | |
4594 | &prev_stmt_info); | |
ebfd146a IR |
4595 | } |
4596 | ||
4597 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
4a00c761 | 4598 | break; |
ebfd146a IR |
4599 | } |
4600 | ||
9771b263 DN |
4601 | vec_oprnds0.release (); |
4602 | vec_oprnds1.release (); | |
9771b263 | 4603 | interm_types.release (); |
ebfd146a IR |
4604 | |
4605 | return true; | |
4606 | } | |
ff802fa1 IR |
4607 | |
4608 | ||
ebfd146a IR |
4609 | /* Function vectorizable_assignment. |
4610 | ||
b8698a0f L |
4611 | Check if STMT performs an assignment (copy) that can be vectorized. |
4612 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
ebfd146a IR |
4613 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
4614 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
4615 | ||
4616 | static bool | |
355fe088 TS |
4617 | vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi, |
4618 | gimple **vec_stmt, slp_tree slp_node) | |
ebfd146a IR |
4619 | { |
4620 | tree vec_dest; | |
4621 | tree scalar_dest; | |
4622 | tree op; | |
4623 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
ebfd146a IR |
4624 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
4625 | tree new_temp; | |
355fe088 | 4626 | gimple *def_stmt; |
4fc5ebf1 JG |
4627 | enum vect_def_type dt[1] = {vect_unknown_def_type}; |
4628 | int ndts = 1; | |
ebfd146a | 4629 | int ncopies; |
f18b55bd | 4630 | int i, j; |
6e1aa848 | 4631 | vec<tree> vec_oprnds = vNULL; |
ebfd146a | 4632 | tree vop; |
a70d6342 | 4633 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 4634 | vec_info *vinfo = stmt_info->vinfo; |
355fe088 | 4635 | gimple *new_stmt = NULL; |
f18b55bd | 4636 | stmt_vec_info prev_stmt_info = NULL; |
fde9c428 RG |
4637 | enum tree_code code; |
4638 | tree vectype_in; | |
ebfd146a | 4639 | |
a70d6342 | 4640 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
4641 | return false; |
4642 | ||
66c16fd9 RB |
4643 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
4644 | && ! vec_stmt) | |
ebfd146a IR |
4645 | return false; |
4646 | ||
4647 | /* Is vectorizable assignment? */ | |
4648 | if (!is_gimple_assign (stmt)) | |
4649 | return false; | |
4650 | ||
4651 | scalar_dest = gimple_assign_lhs (stmt); | |
4652 | if (TREE_CODE (scalar_dest) != SSA_NAME) | |
4653 | return false; | |
4654 | ||
fde9c428 | 4655 | code = gimple_assign_rhs_code (stmt); |
ebfd146a | 4656 | if (gimple_assign_single_p (stmt) |
fde9c428 RG |
4657 | || code == PAREN_EXPR |
4658 | || CONVERT_EXPR_CODE_P (code)) | |
ebfd146a IR |
4659 | op = gimple_assign_rhs1 (stmt); |
4660 | else | |
4661 | return false; | |
4662 | ||
7b7ec6c5 RG |
4663 | if (code == VIEW_CONVERT_EXPR) |
4664 | op = TREE_OPERAND (op, 0); | |
4665 | ||
465c8c19 JJ |
4666 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
4667 | unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
4668 | ||
4669 | /* Multiple types in SLP are handled by creating the appropriate number of | |
4670 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
4671 | case of SLP. */ | |
fce57248 | 4672 | if (slp_node) |
465c8c19 JJ |
4673 | ncopies = 1; |
4674 | else | |
4675 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
4676 | ||
4677 | gcc_assert (ncopies >= 1); | |
4678 | ||
81c40241 | 4679 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in)) |
ebfd146a | 4680 | { |
73fbfcad | 4681 | if (dump_enabled_p ()) |
78c60e3d | 4682 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4683 | "use not simple.\n"); |
ebfd146a IR |
4684 | return false; |
4685 | } | |
4686 | ||
fde9c428 RG |
4687 | /* We can handle NOP_EXPR conversions that do not change the number |
4688 | of elements or the vector size. */ | |
7b7ec6c5 RG |
4689 | if ((CONVERT_EXPR_CODE_P (code) |
4690 | || code == VIEW_CONVERT_EXPR) | |
fde9c428 RG |
4691 | && (!vectype_in |
4692 | || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits | |
4693 | || (GET_MODE_SIZE (TYPE_MODE (vectype)) | |
4694 | != GET_MODE_SIZE (TYPE_MODE (vectype_in))))) | |
4695 | return false; | |
4696 | ||
7b7b1813 RG |
4697 | /* We do not handle bit-precision changes. */ |
4698 | if ((CONVERT_EXPR_CODE_P (code) | |
4699 | || code == VIEW_CONVERT_EXPR) | |
4700 | && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) | |
4701 | && ((TYPE_PRECISION (TREE_TYPE (scalar_dest)) | |
4702 | != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest)))) | |
4703 | || ((TYPE_PRECISION (TREE_TYPE (op)) | |
4704 | != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op)))))) | |
4705 | /* But a conversion that does not change the bit-pattern is ok. */ | |
4706 | && !((TYPE_PRECISION (TREE_TYPE (scalar_dest)) | |
4707 | > TYPE_PRECISION (TREE_TYPE (op))) | |
2dab46d5 IE |
4708 | && TYPE_UNSIGNED (TREE_TYPE (op))) |
4709 | /* Conversion between boolean types of different sizes is | |
4710 | a simple assignment in case their vectypes are same | |
4711 | boolean vectors. */ | |
4712 | && (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
4713 | || !VECTOR_BOOLEAN_TYPE_P (vectype_in))) | |
7b7b1813 | 4714 | { |
73fbfcad | 4715 | if (dump_enabled_p ()) |
78c60e3d SS |
4716 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
4717 | "type conversion to/from bit-precision " | |
e645e942 | 4718 | "unsupported.\n"); |
7b7b1813 RG |
4719 | return false; |
4720 | } | |
4721 | ||
ebfd146a IR |
4722 | if (!vec_stmt) /* transformation not required. */ |
4723 | { | |
4724 | STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; | |
73fbfcad | 4725 | if (dump_enabled_p ()) |
78c60e3d | 4726 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4727 | "=== vectorizable_assignment ===\n"); |
4fc5ebf1 | 4728 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
ebfd146a IR |
4729 | return true; |
4730 | } | |
4731 | ||
67b8dbac | 4732 | /* Transform. */ |
73fbfcad | 4733 | if (dump_enabled_p ()) |
e645e942 | 4734 | dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n"); |
ebfd146a IR |
4735 | |
4736 | /* Handle def. */ | |
4737 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
4738 | ||
4739 | /* Handle use. */ | |
f18b55bd | 4740 | for (j = 0; j < ncopies; j++) |
ebfd146a | 4741 | { |
f18b55bd IR |
4742 | /* Handle uses. */ |
4743 | if (j == 0) | |
306b0c92 | 4744 | vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node); |
f18b55bd IR |
4745 | else |
4746 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL); | |
4747 | ||
4748 | /* Arguments are ready. create the new vector stmt. */ | |
9771b263 | 4749 | FOR_EACH_VEC_ELT (vec_oprnds, i, vop) |
f18b55bd | 4750 | { |
7b7ec6c5 RG |
4751 | if (CONVERT_EXPR_CODE_P (code) |
4752 | || code == VIEW_CONVERT_EXPR) | |
4a73490d | 4753 | vop = build1 (VIEW_CONVERT_EXPR, vectype, vop); |
f18b55bd IR |
4754 | new_stmt = gimple_build_assign (vec_dest, vop); |
4755 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4756 | gimple_assign_set_lhs (new_stmt, new_temp); | |
4757 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
4758 | if (slp_node) | |
9771b263 | 4759 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
f18b55bd | 4760 | } |
ebfd146a IR |
4761 | |
4762 | if (slp_node) | |
f18b55bd IR |
4763 | continue; |
4764 | ||
4765 | if (j == 0) | |
4766 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
4767 | else | |
4768 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
4769 | ||
4770 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
4771 | } | |
b8698a0f | 4772 | |
9771b263 | 4773 | vec_oprnds.release (); |
ebfd146a IR |
4774 | return true; |
4775 | } | |
4776 | ||
9dc3f7de | 4777 | |
1107f3ae IR |
4778 | /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE |
4779 | either as shift by a scalar or by a vector. */ | |
4780 | ||
4781 | bool | |
4782 | vect_supportable_shift (enum tree_code code, tree scalar_type) | |
4783 | { | |
4784 | ||
ef4bddc2 | 4785 | machine_mode vec_mode; |
1107f3ae IR |
4786 | optab optab; |
4787 | int icode; | |
4788 | tree vectype; | |
4789 | ||
4790 | vectype = get_vectype_for_scalar_type (scalar_type); | |
4791 | if (!vectype) | |
4792 | return false; | |
4793 | ||
4794 | optab = optab_for_tree_code (code, vectype, optab_scalar); | |
4795 | if (!optab | |
4796 | || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing) | |
4797 | { | |
4798 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
4799 | if (!optab | |
4800 | || (optab_handler (optab, TYPE_MODE (vectype)) | |
4801 | == CODE_FOR_nothing)) | |
4802 | return false; | |
4803 | } | |
4804 | ||
4805 | vec_mode = TYPE_MODE (vectype); | |
4806 | icode = (int) optab_handler (optab, vec_mode); | |
4807 | if (icode == CODE_FOR_nothing) | |
4808 | return false; | |
4809 | ||
4810 | return true; | |
4811 | } | |
4812 | ||
4813 | ||
9dc3f7de IR |
4814 | /* Function vectorizable_shift. |
4815 | ||
4816 | Check if STMT performs a shift operation that can be vectorized. | |
4817 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
4818 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. | |
4819 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
4820 | ||
4821 | static bool | |
355fe088 TS |
4822 | vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, |
4823 | gimple **vec_stmt, slp_tree slp_node) | |
9dc3f7de IR |
4824 | { |
4825 | tree vec_dest; | |
4826 | tree scalar_dest; | |
4827 | tree op0, op1 = NULL; | |
4828 | tree vec_oprnd1 = NULL_TREE; | |
4829 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
4830 | tree vectype; | |
4831 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4832 | enum tree_code code; | |
ef4bddc2 | 4833 | machine_mode vec_mode; |
9dc3f7de IR |
4834 | tree new_temp; |
4835 | optab optab; | |
4836 | int icode; | |
ef4bddc2 | 4837 | machine_mode optab_op2_mode; |
355fe088 | 4838 | gimple *def_stmt; |
9dc3f7de | 4839 | enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
4fc5ebf1 | 4840 | int ndts = 2; |
355fe088 | 4841 | gimple *new_stmt = NULL; |
9dc3f7de IR |
4842 | stmt_vec_info prev_stmt_info; |
4843 | int nunits_in; | |
4844 | int nunits_out; | |
4845 | tree vectype_out; | |
cede2577 | 4846 | tree op1_vectype; |
9dc3f7de IR |
4847 | int ncopies; |
4848 | int j, i; | |
6e1aa848 DN |
4849 | vec<tree> vec_oprnds0 = vNULL; |
4850 | vec<tree> vec_oprnds1 = vNULL; | |
9dc3f7de IR |
4851 | tree vop0, vop1; |
4852 | unsigned int k; | |
49eab32e | 4853 | bool scalar_shift_arg = true; |
9dc3f7de | 4854 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 4855 | vec_info *vinfo = stmt_info->vinfo; |
9dc3f7de IR |
4856 | int vf; |
4857 | ||
4858 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
4859 | return false; | |
4860 | ||
66c16fd9 RB |
4861 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
4862 | && ! vec_stmt) | |
9dc3f7de IR |
4863 | return false; |
4864 | ||
4865 | /* Is STMT a vectorizable binary/unary operation? */ | |
4866 | if (!is_gimple_assign (stmt)) | |
4867 | return false; | |
4868 | ||
4869 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
4870 | return false; | |
4871 | ||
4872 | code = gimple_assign_rhs_code (stmt); | |
4873 | ||
4874 | if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR | |
4875 | || code == RROTATE_EXPR)) | |
4876 | return false; | |
4877 | ||
4878 | scalar_dest = gimple_assign_lhs (stmt); | |
4879 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); | |
7b7b1813 RG |
4880 | if (TYPE_PRECISION (TREE_TYPE (scalar_dest)) |
4881 | != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest)))) | |
4882 | { | |
73fbfcad | 4883 | if (dump_enabled_p ()) |
78c60e3d | 4884 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4885 | "bit-precision shifts not supported.\n"); |
7b7b1813 RG |
4886 | return false; |
4887 | } | |
9dc3f7de IR |
4888 | |
4889 | op0 = gimple_assign_rhs1 (stmt); | |
81c40241 | 4890 | if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype)) |
9dc3f7de | 4891 | { |
73fbfcad | 4892 | if (dump_enabled_p ()) |
78c60e3d | 4893 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4894 | "use not simple.\n"); |
9dc3f7de IR |
4895 | return false; |
4896 | } | |
4897 | /* If op0 is an external or constant def use a vector type with | |
4898 | the same size as the output vector type. */ | |
4899 | if (!vectype) | |
4900 | vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out); | |
4901 | if (vec_stmt) | |
4902 | gcc_assert (vectype); | |
4903 | if (!vectype) | |
4904 | { | |
73fbfcad | 4905 | if (dump_enabled_p ()) |
78c60e3d | 4906 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4907 | "no vectype for scalar type\n"); |
9dc3f7de IR |
4908 | return false; |
4909 | } | |
4910 | ||
4911 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
4912 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype); | |
4913 | if (nunits_out != nunits_in) | |
4914 | return false; | |
4915 | ||
4916 | op1 = gimple_assign_rhs2 (stmt); | |
81c40241 | 4917 | if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype)) |
9dc3f7de | 4918 | { |
73fbfcad | 4919 | if (dump_enabled_p ()) |
78c60e3d | 4920 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4921 | "use not simple.\n"); |
9dc3f7de IR |
4922 | return false; |
4923 | } | |
4924 | ||
4925 | if (loop_vinfo) | |
4926 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
4927 | else | |
4928 | vf = 1; | |
4929 | ||
4930 | /* Multiple types in SLP are handled by creating the appropriate number of | |
4931 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
4932 | case of SLP. */ | |
fce57248 | 4933 | if (slp_node) |
9dc3f7de IR |
4934 | ncopies = 1; |
4935 | else | |
4936 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; | |
4937 | ||
4938 | gcc_assert (ncopies >= 1); | |
4939 | ||
4940 | /* Determine whether the shift amount is a vector, or scalar. If the | |
4941 | shift/rotate amount is a vector, use the vector/vector shift optabs. */ | |
4942 | ||
dbfa87aa YR |
4943 | if ((dt[1] == vect_internal_def |
4944 | || dt[1] == vect_induction_def) | |
4945 | && !slp_node) | |
49eab32e JJ |
4946 | scalar_shift_arg = false; |
4947 | else if (dt[1] == vect_constant_def | |
4948 | || dt[1] == vect_external_def | |
4949 | || dt[1] == vect_internal_def) | |
4950 | { | |
4951 | /* In SLP, need to check whether the shift count is the same, | |
4952 | in loops if it is a constant or invariant, it is always | |
4953 | a scalar shift. */ | |
4954 | if (slp_node) | |
4955 | { | |
355fe088 TS |
4956 | vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node); |
4957 | gimple *slpstmt; | |
49eab32e | 4958 | |
9771b263 | 4959 | FOR_EACH_VEC_ELT (stmts, k, slpstmt) |
49eab32e JJ |
4960 | if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0)) |
4961 | scalar_shift_arg = false; | |
4962 | } | |
60d393e8 RB |
4963 | |
4964 | /* If the shift amount is computed by a pattern stmt we cannot | |
4965 | use the scalar amount directly thus give up and use a vector | |
4966 | shift. */ | |
4967 | if (dt[1] == vect_internal_def) | |
4968 | { | |
4969 | gimple *def = SSA_NAME_DEF_STMT (op1); | |
4970 | if (is_pattern_stmt_p (vinfo_for_stmt (def))) | |
4971 | scalar_shift_arg = false; | |
4972 | } | |
49eab32e JJ |
4973 | } |
4974 | else | |
4975 | { | |
73fbfcad | 4976 | if (dump_enabled_p ()) |
78c60e3d | 4977 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4978 | "operand mode requires invariant argument.\n"); |
49eab32e JJ |
4979 | return false; |
4980 | } | |
4981 | ||
9dc3f7de | 4982 | /* Vector shifted by vector. */ |
49eab32e | 4983 | if (!scalar_shift_arg) |
9dc3f7de IR |
4984 | { |
4985 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
73fbfcad | 4986 | if (dump_enabled_p ()) |
78c60e3d | 4987 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4988 | "vector/vector shift/rotate found.\n"); |
78c60e3d | 4989 | |
aa948027 JJ |
4990 | if (!op1_vectype) |
4991 | op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out); | |
4992 | if (op1_vectype == NULL_TREE | |
4993 | || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype)) | |
cede2577 | 4994 | { |
73fbfcad | 4995 | if (dump_enabled_p ()) |
78c60e3d SS |
4996 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
4997 | "unusable type for last operand in" | |
e645e942 | 4998 | " vector/vector shift/rotate.\n"); |
cede2577 JJ |
4999 | return false; |
5000 | } | |
9dc3f7de IR |
5001 | } |
5002 | /* See if the machine has a vector shifted by scalar insn and if not | |
5003 | then see if it has a vector shifted by vector insn. */ | |
49eab32e | 5004 | else |
9dc3f7de IR |
5005 | { |
5006 | optab = optab_for_tree_code (code, vectype, optab_scalar); | |
5007 | if (optab | |
5008 | && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing) | |
5009 | { | |
73fbfcad | 5010 | if (dump_enabled_p ()) |
78c60e3d | 5011 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5012 | "vector/scalar shift/rotate found.\n"); |
9dc3f7de IR |
5013 | } |
5014 | else | |
5015 | { | |
5016 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
5017 | if (optab | |
5018 | && (optab_handler (optab, TYPE_MODE (vectype)) | |
5019 | != CODE_FOR_nothing)) | |
5020 | { | |
49eab32e JJ |
5021 | scalar_shift_arg = false; |
5022 | ||
73fbfcad | 5023 | if (dump_enabled_p ()) |
78c60e3d | 5024 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5025 | "vector/vector shift/rotate found.\n"); |
9dc3f7de IR |
5026 | |
5027 | /* Unlike the other binary operators, shifts/rotates have | |
5028 | the rhs being int, instead of the same type as the lhs, | |
5029 | so make sure the scalar is the right type if we are | |
aa948027 | 5030 | dealing with vectors of long long/long/short/char. */ |
9dc3f7de IR |
5031 | if (dt[1] == vect_constant_def) |
5032 | op1 = fold_convert (TREE_TYPE (vectype), op1); | |
aa948027 JJ |
5033 | else if (!useless_type_conversion_p (TREE_TYPE (vectype), |
5034 | TREE_TYPE (op1))) | |
5035 | { | |
5036 | if (slp_node | |
5037 | && TYPE_MODE (TREE_TYPE (vectype)) | |
5038 | != TYPE_MODE (TREE_TYPE (op1))) | |
5039 | { | |
73fbfcad | 5040 | if (dump_enabled_p ()) |
78c60e3d SS |
5041 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5042 | "unusable type for last operand in" | |
e645e942 | 5043 | " vector/vector shift/rotate.\n"); |
21c0a521 | 5044 | return false; |
aa948027 JJ |
5045 | } |
5046 | if (vec_stmt && !slp_node) | |
5047 | { | |
5048 | op1 = fold_convert (TREE_TYPE (vectype), op1); | |
5049 | op1 = vect_init_vector (stmt, op1, | |
5050 | TREE_TYPE (vectype), NULL); | |
5051 | } | |
5052 | } | |
9dc3f7de IR |
5053 | } |
5054 | } | |
5055 | } | |
9dc3f7de IR |
5056 | |
5057 | /* Supportable by target? */ | |
5058 | if (!optab) | |
5059 | { | |
73fbfcad | 5060 | if (dump_enabled_p ()) |
78c60e3d | 5061 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5062 | "no optab.\n"); |
9dc3f7de IR |
5063 | return false; |
5064 | } | |
5065 | vec_mode = TYPE_MODE (vectype); | |
5066 | icode = (int) optab_handler (optab, vec_mode); | |
5067 | if (icode == CODE_FOR_nothing) | |
5068 | { | |
73fbfcad | 5069 | if (dump_enabled_p ()) |
78c60e3d | 5070 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5071 | "op not supported by target.\n"); |
9dc3f7de IR |
5072 | /* Check only during analysis. */ |
5073 | if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD | |
5074 | || (vf < vect_min_worthwhile_factor (code) | |
5075 | && !vec_stmt)) | |
5076 | return false; | |
73fbfcad | 5077 | if (dump_enabled_p ()) |
e645e942 TJ |
5078 | dump_printf_loc (MSG_NOTE, vect_location, |
5079 | "proceeding using word mode.\n"); | |
9dc3f7de IR |
5080 | } |
5081 | ||
5082 | /* Worthwhile without SIMD support? Check only during analysis. */ | |
5083 | if (!VECTOR_MODE_P (TYPE_MODE (vectype)) | |
5084 | && vf < vect_min_worthwhile_factor (code) | |
5085 | && !vec_stmt) | |
5086 | { | |
73fbfcad | 5087 | if (dump_enabled_p ()) |
78c60e3d | 5088 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5089 | "not worthwhile without SIMD support.\n"); |
9dc3f7de IR |
5090 | return false; |
5091 | } | |
5092 | ||
5093 | if (!vec_stmt) /* transformation not required. */ | |
5094 | { | |
5095 | STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type; | |
73fbfcad | 5096 | if (dump_enabled_p ()) |
e645e942 TJ |
5097 | dump_printf_loc (MSG_NOTE, vect_location, |
5098 | "=== vectorizable_shift ===\n"); | |
4fc5ebf1 | 5099 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
9dc3f7de IR |
5100 | return true; |
5101 | } | |
5102 | ||
67b8dbac | 5103 | /* Transform. */ |
9dc3f7de | 5104 | |
73fbfcad | 5105 | if (dump_enabled_p ()) |
78c60e3d | 5106 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5107 | "transform binary/unary operation.\n"); |
9dc3f7de IR |
5108 | |
5109 | /* Handle def. */ | |
5110 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
5111 | ||
9dc3f7de IR |
5112 | prev_stmt_info = NULL; |
5113 | for (j = 0; j < ncopies; j++) | |
5114 | { | |
5115 | /* Handle uses. */ | |
5116 | if (j == 0) | |
5117 | { | |
5118 | if (scalar_shift_arg) | |
5119 | { | |
5120 | /* Vector shl and shr insn patterns can be defined with scalar | |
5121 | operand 2 (shift operand). In this case, use constant or loop | |
5122 | invariant op1 directly, without extending it to vector mode | |
5123 | first. */ | |
5124 | optab_op2_mode = insn_data[icode].operand[2].mode; | |
5125 | if (!VECTOR_MODE_P (optab_op2_mode)) | |
5126 | { | |
73fbfcad | 5127 | if (dump_enabled_p ()) |
78c60e3d | 5128 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5129 | "operand 1 using scalar mode.\n"); |
9dc3f7de | 5130 | vec_oprnd1 = op1; |
8930f723 | 5131 | vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1); |
9771b263 | 5132 | vec_oprnds1.quick_push (vec_oprnd1); |
9dc3f7de IR |
5133 | if (slp_node) |
5134 | { | |
5135 | /* Store vec_oprnd1 for every vector stmt to be created | |
5136 | for SLP_NODE. We check during the analysis that all | |
5137 | the shift arguments are the same. | |
5138 | TODO: Allow different constants for different vector | |
5139 | stmts generated for an SLP instance. */ | |
5140 | for (k = 0; k < slp_node->vec_stmts_size - 1; k++) | |
9771b263 | 5141 | vec_oprnds1.quick_push (vec_oprnd1); |
9dc3f7de IR |
5142 | } |
5143 | } | |
5144 | } | |
5145 | ||
5146 | /* vec_oprnd1 is available if operand 1 should be of a scalar-type | |
5147 | (a special case for certain kind of vector shifts); otherwise, | |
5148 | operand 1 should be of a vector type (the usual case). */ | |
5149 | if (vec_oprnd1) | |
5150 | vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, | |
306b0c92 | 5151 | slp_node); |
9dc3f7de IR |
5152 | else |
5153 | vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, | |
306b0c92 | 5154 | slp_node); |
9dc3f7de IR |
5155 | } |
5156 | else | |
5157 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1); | |
5158 | ||
5159 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 5160 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
9dc3f7de | 5161 | { |
9771b263 | 5162 | vop1 = vec_oprnds1[i]; |
0d0e4a03 | 5163 | new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); |
9dc3f7de IR |
5164 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5165 | gimple_assign_set_lhs (new_stmt, new_temp); | |
5166 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5167 | if (slp_node) | |
9771b263 | 5168 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
9dc3f7de IR |
5169 | } |
5170 | ||
5171 | if (slp_node) | |
5172 | continue; | |
5173 | ||
5174 | if (j == 0) | |
5175 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
5176 | else | |
5177 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
5178 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
5179 | } | |
5180 | ||
9771b263 DN |
5181 | vec_oprnds0.release (); |
5182 | vec_oprnds1.release (); | |
9dc3f7de IR |
5183 | |
5184 | return true; | |
5185 | } | |
5186 | ||
5187 | ||
ebfd146a IR |
5188 | /* Function vectorizable_operation. |
5189 | ||
16949072 RG |
5190 | Check if STMT performs a binary, unary or ternary operation that can |
5191 | be vectorized. | |
b8698a0f | 5192 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized |
ebfd146a IR |
5193 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
5194 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
5195 | ||
5196 | static bool | |
355fe088 TS |
5197 | vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, |
5198 | gimple **vec_stmt, slp_tree slp_node) | |
ebfd146a | 5199 | { |
00f07b86 | 5200 | tree vec_dest; |
ebfd146a | 5201 | tree scalar_dest; |
16949072 | 5202 | tree op0, op1 = NULL_TREE, op2 = NULL_TREE; |
ebfd146a | 5203 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
00f07b86 | 5204 | tree vectype; |
ebfd146a IR |
5205 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
5206 | enum tree_code code; | |
ef4bddc2 | 5207 | machine_mode vec_mode; |
ebfd146a IR |
5208 | tree new_temp; |
5209 | int op_type; | |
00f07b86 | 5210 | optab optab; |
523ba738 | 5211 | bool target_support_p; |
355fe088 | 5212 | gimple *def_stmt; |
16949072 RG |
5213 | enum vect_def_type dt[3] |
5214 | = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 5215 | int ndts = 3; |
355fe088 | 5216 | gimple *new_stmt = NULL; |
ebfd146a | 5217 | stmt_vec_info prev_stmt_info; |
b690cc0f | 5218 | int nunits_in; |
ebfd146a IR |
5219 | int nunits_out; |
5220 | tree vectype_out; | |
5221 | int ncopies; | |
5222 | int j, i; | |
6e1aa848 DN |
5223 | vec<tree> vec_oprnds0 = vNULL; |
5224 | vec<tree> vec_oprnds1 = vNULL; | |
5225 | vec<tree> vec_oprnds2 = vNULL; | |
16949072 | 5226 | tree vop0, vop1, vop2; |
a70d6342 | 5227 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5228 | vec_info *vinfo = stmt_info->vinfo; |
a70d6342 IR |
5229 | int vf; |
5230 | ||
a70d6342 | 5231 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
5232 | return false; |
5233 | ||
66c16fd9 RB |
5234 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5235 | && ! vec_stmt) | |
ebfd146a IR |
5236 | return false; |
5237 | ||
5238 | /* Is STMT a vectorizable binary/unary operation? */ | |
5239 | if (!is_gimple_assign (stmt)) | |
5240 | return false; | |
5241 | ||
5242 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
5243 | return false; | |
5244 | ||
ebfd146a IR |
5245 | code = gimple_assign_rhs_code (stmt); |
5246 | ||
5247 | /* For pointer addition, we should use the normal plus for | |
5248 | the vector addition. */ | |
5249 | if (code == POINTER_PLUS_EXPR) | |
5250 | code = PLUS_EXPR; | |
5251 | ||
5252 | /* Support only unary or binary operations. */ | |
5253 | op_type = TREE_CODE_LENGTH (code); | |
16949072 | 5254 | if (op_type != unary_op && op_type != binary_op && op_type != ternary_op) |
ebfd146a | 5255 | { |
73fbfcad | 5256 | if (dump_enabled_p ()) |
78c60e3d | 5257 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5258 | "num. args = %d (not unary/binary/ternary op).\n", |
78c60e3d | 5259 | op_type); |
ebfd146a IR |
5260 | return false; |
5261 | } | |
5262 | ||
b690cc0f RG |
5263 | scalar_dest = gimple_assign_lhs (stmt); |
5264 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); | |
5265 | ||
7b7b1813 RG |
5266 | /* Most operations cannot handle bit-precision types without extra |
5267 | truncations. */ | |
045c1278 IE |
5268 | if (!VECTOR_BOOLEAN_TYPE_P (vectype_out) |
5269 | && (TYPE_PRECISION (TREE_TYPE (scalar_dest)) | |
5270 | != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest)))) | |
7b7b1813 RG |
5271 | /* Exception are bitwise binary operations. */ |
5272 | && code != BIT_IOR_EXPR | |
5273 | && code != BIT_XOR_EXPR | |
5274 | && code != BIT_AND_EXPR) | |
5275 | { | |
73fbfcad | 5276 | if (dump_enabled_p ()) |
78c60e3d | 5277 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5278 | "bit-precision arithmetic not supported.\n"); |
7b7b1813 RG |
5279 | return false; |
5280 | } | |
5281 | ||
ebfd146a | 5282 | op0 = gimple_assign_rhs1 (stmt); |
81c40241 | 5283 | if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype)) |
ebfd146a | 5284 | { |
73fbfcad | 5285 | if (dump_enabled_p ()) |
78c60e3d | 5286 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5287 | "use not simple.\n"); |
ebfd146a IR |
5288 | return false; |
5289 | } | |
b690cc0f RG |
5290 | /* If op0 is an external or constant def use a vector type with |
5291 | the same size as the output vector type. */ | |
5292 | if (!vectype) | |
b036c6c5 IE |
5293 | { |
5294 | /* For boolean type we cannot determine vectype by | |
5295 | invariant value (don't know whether it is a vector | |
5296 | of booleans or vector of integers). We use output | |
5297 | vectype because operations on boolean don't change | |
5298 | type. */ | |
2568d8a1 | 5299 | if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0))) |
b036c6c5 | 5300 | { |
2568d8a1 | 5301 | if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest))) |
b036c6c5 IE |
5302 | { |
5303 | if (dump_enabled_p ()) | |
5304 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5305 | "not supported operation on bool value.\n"); | |
5306 | return false; | |
5307 | } | |
5308 | vectype = vectype_out; | |
5309 | } | |
5310 | else | |
5311 | vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out); | |
5312 | } | |
7d8930a0 IR |
5313 | if (vec_stmt) |
5314 | gcc_assert (vectype); | |
5315 | if (!vectype) | |
5316 | { | |
73fbfcad | 5317 | if (dump_enabled_p ()) |
7d8930a0 | 5318 | { |
78c60e3d SS |
5319 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5320 | "no vectype for scalar type "); | |
5321 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
5322 | TREE_TYPE (op0)); | |
e645e942 | 5323 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
7d8930a0 IR |
5324 | } |
5325 | ||
5326 | return false; | |
5327 | } | |
b690cc0f RG |
5328 | |
5329 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
5330 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype); | |
5331 | if (nunits_out != nunits_in) | |
5332 | return false; | |
ebfd146a | 5333 | |
16949072 | 5334 | if (op_type == binary_op || op_type == ternary_op) |
ebfd146a IR |
5335 | { |
5336 | op1 = gimple_assign_rhs2 (stmt); | |
81c40241 | 5337 | if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1])) |
ebfd146a | 5338 | { |
73fbfcad | 5339 | if (dump_enabled_p ()) |
78c60e3d | 5340 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5341 | "use not simple.\n"); |
ebfd146a IR |
5342 | return false; |
5343 | } | |
5344 | } | |
16949072 RG |
5345 | if (op_type == ternary_op) |
5346 | { | |
5347 | op2 = gimple_assign_rhs3 (stmt); | |
81c40241 | 5348 | if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2])) |
16949072 | 5349 | { |
73fbfcad | 5350 | if (dump_enabled_p ()) |
78c60e3d | 5351 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5352 | "use not simple.\n"); |
16949072 RG |
5353 | return false; |
5354 | } | |
5355 | } | |
ebfd146a | 5356 | |
b690cc0f RG |
5357 | if (loop_vinfo) |
5358 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
5359 | else | |
5360 | vf = 1; | |
5361 | ||
5362 | /* Multiple types in SLP are handled by creating the appropriate number of | |
ff802fa1 | 5363 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in |
b690cc0f | 5364 | case of SLP. */ |
fce57248 | 5365 | if (slp_node) |
b690cc0f RG |
5366 | ncopies = 1; |
5367 | else | |
5368 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; | |
5369 | ||
5370 | gcc_assert (ncopies >= 1); | |
5371 | ||
9dc3f7de | 5372 | /* Shifts are handled in vectorizable_shift (). */ |
ebfd146a IR |
5373 | if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR |
5374 | || code == RROTATE_EXPR) | |
9dc3f7de | 5375 | return false; |
ebfd146a | 5376 | |
ebfd146a | 5377 | /* Supportable by target? */ |
00f07b86 RH |
5378 | |
5379 | vec_mode = TYPE_MODE (vectype); | |
5380 | if (code == MULT_HIGHPART_EXPR) | |
523ba738 | 5381 | target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)); |
00f07b86 RH |
5382 | else |
5383 | { | |
5384 | optab = optab_for_tree_code (code, vectype, optab_default); | |
5385 | if (!optab) | |
5deb57cb | 5386 | { |
73fbfcad | 5387 | if (dump_enabled_p ()) |
78c60e3d | 5388 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5389 | "no optab.\n"); |
00f07b86 | 5390 | return false; |
5deb57cb | 5391 | } |
523ba738 RS |
5392 | target_support_p = (optab_handler (optab, vec_mode) |
5393 | != CODE_FOR_nothing); | |
5deb57cb JJ |
5394 | } |
5395 | ||
523ba738 | 5396 | if (!target_support_p) |
ebfd146a | 5397 | { |
73fbfcad | 5398 | if (dump_enabled_p ()) |
78c60e3d | 5399 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5400 | "op not supported by target.\n"); |
ebfd146a IR |
5401 | /* Check only during analysis. */ |
5402 | if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD | |
5deb57cb | 5403 | || (!vec_stmt && vf < vect_min_worthwhile_factor (code))) |
ebfd146a | 5404 | return false; |
73fbfcad | 5405 | if (dump_enabled_p ()) |
e645e942 TJ |
5406 | dump_printf_loc (MSG_NOTE, vect_location, |
5407 | "proceeding using word mode.\n"); | |
383d9c83 IR |
5408 | } |
5409 | ||
4a00c761 | 5410 | /* Worthwhile without SIMD support? Check only during analysis. */ |
5deb57cb JJ |
5411 | if (!VECTOR_MODE_P (vec_mode) |
5412 | && !vec_stmt | |
5413 | && vf < vect_min_worthwhile_factor (code)) | |
7d8930a0 | 5414 | { |
73fbfcad | 5415 | if (dump_enabled_p ()) |
78c60e3d | 5416 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5417 | "not worthwhile without SIMD support.\n"); |
e34842c6 | 5418 | return false; |
7d8930a0 | 5419 | } |
ebfd146a | 5420 | |
ebfd146a IR |
5421 | if (!vec_stmt) /* transformation not required. */ |
5422 | { | |
4a00c761 | 5423 | STMT_VINFO_TYPE (stmt_info) = op_vec_info_type; |
73fbfcad | 5424 | if (dump_enabled_p ()) |
78c60e3d | 5425 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5426 | "=== vectorizable_operation ===\n"); |
4fc5ebf1 | 5427 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
ebfd146a IR |
5428 | return true; |
5429 | } | |
5430 | ||
67b8dbac | 5431 | /* Transform. */ |
ebfd146a | 5432 | |
73fbfcad | 5433 | if (dump_enabled_p ()) |
78c60e3d | 5434 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5435 | "transform binary/unary operation.\n"); |
383d9c83 | 5436 | |
ebfd146a | 5437 | /* Handle def. */ |
00f07b86 | 5438 | vec_dest = vect_create_destination_var (scalar_dest, vectype); |
b8698a0f | 5439 | |
ebfd146a IR |
5440 | /* In case the vectorization factor (VF) is bigger than the number |
5441 | of elements that we can fit in a vectype (nunits), we have to generate | |
5442 | more than one vector stmt - i.e - we need to "unroll" the | |
4a00c761 JJ |
5443 | vector stmt by a factor VF/nunits. In doing so, we record a pointer |
5444 | from one copy of the vector stmt to the next, in the field | |
5445 | STMT_VINFO_RELATED_STMT. This is necessary in order to allow following | |
5446 | stages to find the correct vector defs to be used when vectorizing | |
5447 | stmts that use the defs of the current stmt. The example below | |
5448 | illustrates the vectorization process when VF=16 and nunits=4 (i.e., | |
5449 | we need to create 4 vectorized stmts): | |
5450 | ||
5451 | before vectorization: | |
5452 | RELATED_STMT VEC_STMT | |
5453 | S1: x = memref - - | |
5454 | S2: z = x + 1 - - | |
5455 | ||
5456 | step 1: vectorize stmt S1 (done in vectorizable_load. See more details | |
5457 | there): | |
5458 | RELATED_STMT VEC_STMT | |
5459 | VS1_0: vx0 = memref0 VS1_1 - | |
5460 | VS1_1: vx1 = memref1 VS1_2 - | |
5461 | VS1_2: vx2 = memref2 VS1_3 - | |
5462 | VS1_3: vx3 = memref3 - - | |
5463 | S1: x = load - VS1_0 | |
5464 | S2: z = x + 1 - - | |
5465 | ||
5466 | step2: vectorize stmt S2 (done here): | |
5467 | To vectorize stmt S2 we first need to find the relevant vector | |
5468 | def for the first operand 'x'. This is, as usual, obtained from | |
5469 | the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt | |
5470 | that defines 'x' (S1). This way we find the stmt VS1_0, and the | |
5471 | relevant vector def 'vx0'. Having found 'vx0' we can generate | |
5472 | the vector stmt VS2_0, and as usual, record it in the | |
5473 | STMT_VINFO_VEC_STMT of stmt S2. | |
5474 | When creating the second copy (VS2_1), we obtain the relevant vector | |
5475 | def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of | |
5476 | stmt VS1_0. This way we find the stmt VS1_1 and the relevant | |
5477 | vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a | |
5478 | pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0. | |
5479 | Similarly when creating stmts VS2_2 and VS2_3. This is the resulting | |
5480 | chain of stmts and pointers: | |
5481 | RELATED_STMT VEC_STMT | |
5482 | VS1_0: vx0 = memref0 VS1_1 - | |
5483 | VS1_1: vx1 = memref1 VS1_2 - | |
5484 | VS1_2: vx2 = memref2 VS1_3 - | |
5485 | VS1_3: vx3 = memref3 - - | |
5486 | S1: x = load - VS1_0 | |
5487 | VS2_0: vz0 = vx0 + v1 VS2_1 - | |
5488 | VS2_1: vz1 = vx1 + v1 VS2_2 - | |
5489 | VS2_2: vz2 = vx2 + v1 VS2_3 - | |
5490 | VS2_3: vz3 = vx3 + v1 - - | |
5491 | S2: z = x + 1 - VS2_0 */ | |
ebfd146a IR |
5492 | |
5493 | prev_stmt_info = NULL; | |
5494 | for (j = 0; j < ncopies; j++) | |
5495 | { | |
5496 | /* Handle uses. */ | |
5497 | if (j == 0) | |
4a00c761 JJ |
5498 | { |
5499 | if (op_type == binary_op || op_type == ternary_op) | |
5500 | vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, | |
306b0c92 | 5501 | slp_node); |
4a00c761 JJ |
5502 | else |
5503 | vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, | |
306b0c92 | 5504 | slp_node); |
4a00c761 | 5505 | if (op_type == ternary_op) |
c392943c | 5506 | vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL, |
306b0c92 | 5507 | slp_node); |
4a00c761 | 5508 | } |
ebfd146a | 5509 | else |
4a00c761 JJ |
5510 | { |
5511 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1); | |
5512 | if (op_type == ternary_op) | |
5513 | { | |
9771b263 DN |
5514 | tree vec_oprnd = vec_oprnds2.pop (); |
5515 | vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2], | |
5516 | vec_oprnd)); | |
4a00c761 JJ |
5517 | } |
5518 | } | |
5519 | ||
5520 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 5521 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
ebfd146a | 5522 | { |
4a00c761 | 5523 | vop1 = ((op_type == binary_op || op_type == ternary_op) |
9771b263 | 5524 | ? vec_oprnds1[i] : NULL_TREE); |
4a00c761 | 5525 | vop2 = ((op_type == ternary_op) |
9771b263 | 5526 | ? vec_oprnds2[i] : NULL_TREE); |
0d0e4a03 | 5527 | new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2); |
4a00c761 JJ |
5528 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5529 | gimple_assign_set_lhs (new_stmt, new_temp); | |
5530 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5531 | if (slp_node) | |
9771b263 | 5532 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
ebfd146a IR |
5533 | } |
5534 | ||
4a00c761 JJ |
5535 | if (slp_node) |
5536 | continue; | |
5537 | ||
5538 | if (j == 0) | |
5539 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
5540 | else | |
5541 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
5542 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
ebfd146a IR |
5543 | } |
5544 | ||
9771b263 DN |
5545 | vec_oprnds0.release (); |
5546 | vec_oprnds1.release (); | |
5547 | vec_oprnds2.release (); | |
ebfd146a | 5548 | |
ebfd146a IR |
5549 | return true; |
5550 | } | |
5551 | ||
c716e67f XDL |
5552 | /* A helper function to ensure data reference DR's base alignment |
5553 | for STMT_INFO. */ | |
5554 | ||
5555 | static void | |
5556 | ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr) | |
5557 | { | |
5558 | if (!dr->aux) | |
5559 | return; | |
5560 | ||
52639a61 | 5561 | if (DR_VECT_AUX (dr)->base_misaligned) |
c716e67f XDL |
5562 | { |
5563 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
52639a61 | 5564 | tree base_decl = DR_VECT_AUX (dr)->base_decl; |
c716e67f | 5565 | |
428f0c67 JH |
5566 | if (decl_in_symtab_p (base_decl)) |
5567 | symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype)); | |
5568 | else | |
5569 | { | |
fe37c7af | 5570 | SET_DECL_ALIGN (base_decl, TYPE_ALIGN (vectype)); |
428f0c67 JH |
5571 | DECL_USER_ALIGN (base_decl) = 1; |
5572 | } | |
52639a61 | 5573 | DR_VECT_AUX (dr)->base_misaligned = false; |
c716e67f XDL |
5574 | } |
5575 | } | |
5576 | ||
ebfd146a | 5577 | |
44fc7854 BE |
5578 | /* Function get_group_alias_ptr_type. |
5579 | ||
5580 | Return the alias type for the group starting at FIRST_STMT. */ | |
5581 | ||
5582 | static tree | |
5583 | get_group_alias_ptr_type (gimple *first_stmt) | |
5584 | { | |
5585 | struct data_reference *first_dr, *next_dr; | |
5586 | gimple *next_stmt; | |
5587 | ||
5588 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
5589 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt)); | |
5590 | while (next_stmt) | |
5591 | { | |
5592 | next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt)); | |
5593 | if (get_alias_set (DR_REF (first_dr)) | |
5594 | != get_alias_set (DR_REF (next_dr))) | |
5595 | { | |
5596 | if (dump_enabled_p ()) | |
5597 | dump_printf_loc (MSG_NOTE, vect_location, | |
5598 | "conflicting alias set types.\n"); | |
5599 | return ptr_type_node; | |
5600 | } | |
5601 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); | |
5602 | } | |
5603 | return reference_alias_ptr_type (DR_REF (first_dr)); | |
5604 | } | |
5605 | ||
5606 | ||
ebfd146a IR |
5607 | /* Function vectorizable_store. |
5608 | ||
b8698a0f L |
5609 | Check if STMT defines a non scalar data-ref (array/pointer/structure) that |
5610 | can be vectorized. | |
5611 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
ebfd146a IR |
5612 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
5613 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
5614 | ||
5615 | static bool | |
355fe088 | 5616 | vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, |
c716e67f | 5617 | slp_tree slp_node) |
ebfd146a IR |
5618 | { |
5619 | tree scalar_dest; | |
5620 | tree data_ref; | |
5621 | tree op; | |
5622 | tree vec_oprnd = NULL_TREE; | |
5623 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
5624 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL; | |
272c6793 | 5625 | tree elem_type; |
ebfd146a | 5626 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 5627 | struct loop *loop = NULL; |
ef4bddc2 | 5628 | machine_mode vec_mode; |
ebfd146a IR |
5629 | tree dummy; |
5630 | enum dr_alignment_support alignment_support_scheme; | |
355fe088 | 5631 | gimple *def_stmt; |
ebfd146a IR |
5632 | enum vect_def_type dt; |
5633 | stmt_vec_info prev_stmt_info = NULL; | |
5634 | tree dataref_ptr = NULL_TREE; | |
74bf76ed | 5635 | tree dataref_offset = NULL_TREE; |
355fe088 | 5636 | gimple *ptr_incr = NULL; |
ebfd146a IR |
5637 | int ncopies; |
5638 | int j; | |
2de001ee RS |
5639 | gimple *next_stmt, *first_stmt; |
5640 | bool grouped_store; | |
ebfd146a | 5641 | unsigned int group_size, i; |
6e1aa848 DN |
5642 | vec<tree> oprnds = vNULL; |
5643 | vec<tree> result_chain = vNULL; | |
ebfd146a | 5644 | bool inv_p; |
09dfa495 | 5645 | tree offset = NULL_TREE; |
6e1aa848 | 5646 | vec<tree> vec_oprnds = vNULL; |
ebfd146a | 5647 | bool slp = (slp_node != NULL); |
ebfd146a | 5648 | unsigned int vec_num; |
a70d6342 | 5649 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5650 | vec_info *vinfo = stmt_info->vinfo; |
272c6793 | 5651 | tree aggr_type; |
134c85ca | 5652 | gather_scatter_info gs_info; |
3bab6342 | 5653 | enum vect_def_type scatter_src_dt = vect_unknown_def_type; |
355fe088 | 5654 | gimple *new_stmt; |
b17dc4d4 | 5655 | int vf; |
2de001ee | 5656 | vec_load_store_type vls_type; |
44fc7854 | 5657 | tree ref_type; |
a70d6342 | 5658 | |
a70d6342 | 5659 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
5660 | return false; |
5661 | ||
66c16fd9 RB |
5662 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5663 | && ! vec_stmt) | |
ebfd146a IR |
5664 | return false; |
5665 | ||
5666 | /* Is vectorizable store? */ | |
5667 | ||
5668 | if (!is_gimple_assign (stmt)) | |
5669 | return false; | |
5670 | ||
5671 | scalar_dest = gimple_assign_lhs (stmt); | |
ab0ef706 JJ |
5672 | if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR |
5673 | && is_pattern_stmt_p (stmt_info)) | |
5674 | scalar_dest = TREE_OPERAND (scalar_dest, 0); | |
ebfd146a | 5675 | if (TREE_CODE (scalar_dest) != ARRAY_REF |
38000232 | 5676 | && TREE_CODE (scalar_dest) != BIT_FIELD_REF |
ebfd146a | 5677 | && TREE_CODE (scalar_dest) != INDIRECT_REF |
e9dbe7bb IR |
5678 | && TREE_CODE (scalar_dest) != COMPONENT_REF |
5679 | && TREE_CODE (scalar_dest) != IMAGPART_EXPR | |
70f34814 RG |
5680 | && TREE_CODE (scalar_dest) != REALPART_EXPR |
5681 | && TREE_CODE (scalar_dest) != MEM_REF) | |
ebfd146a IR |
5682 | return false; |
5683 | ||
fce57248 RS |
5684 | /* Cannot have hybrid store SLP -- that would mean storing to the |
5685 | same location twice. */ | |
5686 | gcc_assert (slp == PURE_SLP_STMT (stmt_info)); | |
5687 | ||
ebfd146a | 5688 | gcc_assert (gimple_assign_single_p (stmt)); |
465c8c19 | 5689 | |
f4d09712 | 5690 | tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE; |
465c8c19 JJ |
5691 | unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype); |
5692 | ||
5693 | if (loop_vinfo) | |
b17dc4d4 RB |
5694 | { |
5695 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
5696 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
5697 | } | |
5698 | else | |
5699 | vf = 1; | |
465c8c19 JJ |
5700 | |
5701 | /* Multiple types in SLP are handled by creating the appropriate number of | |
5702 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
5703 | case of SLP. */ | |
fce57248 | 5704 | if (slp) |
465c8c19 JJ |
5705 | ncopies = 1; |
5706 | else | |
5707 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
5708 | ||
5709 | gcc_assert (ncopies >= 1); | |
5710 | ||
5711 | /* FORNOW. This restriction should be relaxed. */ | |
5712 | if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1) | |
5713 | { | |
5714 | if (dump_enabled_p ()) | |
5715 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5716 | "multiple types in nested loop.\n"); | |
5717 | return false; | |
5718 | } | |
5719 | ||
ebfd146a | 5720 | op = gimple_assign_rhs1 (stmt); |
f4d09712 KY |
5721 | |
5722 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype)) | |
ebfd146a | 5723 | { |
73fbfcad | 5724 | if (dump_enabled_p ()) |
78c60e3d | 5725 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5726 | "use not simple.\n"); |
ebfd146a IR |
5727 | return false; |
5728 | } | |
5729 | ||
2de001ee RS |
5730 | if (dt == vect_constant_def || dt == vect_external_def) |
5731 | vls_type = VLS_STORE_INVARIANT; | |
5732 | else | |
5733 | vls_type = VLS_STORE; | |
5734 | ||
f4d09712 KY |
5735 | if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype)) |
5736 | return false; | |
5737 | ||
272c6793 | 5738 | elem_type = TREE_TYPE (vectype); |
ebfd146a | 5739 | vec_mode = TYPE_MODE (vectype); |
7b7b1813 | 5740 | |
ebfd146a IR |
5741 | /* FORNOW. In some cases can vectorize even if data-type not supported |
5742 | (e.g. - array initialization with 0). */ | |
947131ba | 5743 | if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing) |
ebfd146a IR |
5744 | return false; |
5745 | ||
5746 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
5747 | return false; | |
5748 | ||
2de001ee | 5749 | vect_memory_access_type memory_access_type; |
62da9e14 | 5750 | if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies, |
2de001ee RS |
5751 | &memory_access_type, &gs_info)) |
5752 | return false; | |
3bab6342 | 5753 | |
ebfd146a IR |
5754 | if (!vec_stmt) /* transformation not required. */ |
5755 | { | |
2de001ee | 5756 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; |
ebfd146a | 5757 | STMT_VINFO_TYPE (stmt_info) = store_vec_info_type; |
2e8ab70c RB |
5758 | /* The SLP costs are calculated during SLP analysis. */ |
5759 | if (!PURE_SLP_STMT (stmt_info)) | |
2de001ee | 5760 | vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt, |
2e8ab70c | 5761 | NULL, NULL, NULL); |
ebfd146a IR |
5762 | return true; |
5763 | } | |
2de001ee | 5764 | gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); |
ebfd146a | 5765 | |
67b8dbac | 5766 | /* Transform. */ |
ebfd146a | 5767 | |
c716e67f XDL |
5768 | ensure_base_align (stmt_info, dr); |
5769 | ||
2de001ee | 5770 | if (memory_access_type == VMAT_GATHER_SCATTER) |
3bab6342 AT |
5771 | { |
5772 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src; | |
134c85ca | 5773 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
3bab6342 AT |
5774 | tree rettype, srctype, ptrtype, idxtype, masktype, scaletype; |
5775 | tree ptr, mask, var, scale, perm_mask = NULL_TREE; | |
5776 | edge pe = loop_preheader_edge (loop); | |
5777 | gimple_seq seq; | |
5778 | basic_block new_bb; | |
5779 | enum { NARROW, NONE, WIDEN } modifier; | |
134c85ca | 5780 | int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); |
3bab6342 AT |
5781 | |
5782 | if (nunits == (unsigned int) scatter_off_nunits) | |
5783 | modifier = NONE; | |
5784 | else if (nunits == (unsigned int) scatter_off_nunits / 2) | |
5785 | { | |
5786 | unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits); | |
5787 | modifier = WIDEN; | |
5788 | ||
5789 | for (i = 0; i < (unsigned int) scatter_off_nunits; ++i) | |
5790 | sel[i] = i | nunits; | |
5791 | ||
134c85ca | 5792 | perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel); |
3bab6342 AT |
5793 | gcc_assert (perm_mask != NULL_TREE); |
5794 | } | |
5795 | else if (nunits == (unsigned int) scatter_off_nunits * 2) | |
5796 | { | |
5797 | unsigned char *sel = XALLOCAVEC (unsigned char, nunits); | |
5798 | modifier = NARROW; | |
5799 | ||
5800 | for (i = 0; i < (unsigned int) nunits; ++i) | |
5801 | sel[i] = i | scatter_off_nunits; | |
5802 | ||
5803 | perm_mask = vect_gen_perm_mask_checked (vectype, sel); | |
5804 | gcc_assert (perm_mask != NULL_TREE); | |
5805 | ncopies *= 2; | |
5806 | } | |
5807 | else | |
5808 | gcc_unreachable (); | |
5809 | ||
134c85ca | 5810 | rettype = TREE_TYPE (TREE_TYPE (gs_info.decl)); |
3bab6342 AT |
5811 | ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); |
5812 | masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
5813 | idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
5814 | srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
5815 | scaletype = TREE_VALUE (arglist); | |
5816 | ||
5817 | gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE | |
5818 | && TREE_CODE (rettype) == VOID_TYPE); | |
5819 | ||
134c85ca | 5820 | ptr = fold_convert (ptrtype, gs_info.base); |
3bab6342 AT |
5821 | if (!is_gimple_min_invariant (ptr)) |
5822 | { | |
5823 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
5824 | new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
5825 | gcc_assert (!new_bb); | |
5826 | } | |
5827 | ||
5828 | /* Currently we support only unconditional scatter stores, | |
5829 | so mask should be all ones. */ | |
5830 | mask = build_int_cst (masktype, -1); | |
5831 | mask = vect_init_vector (stmt, mask, masktype, NULL); | |
5832 | ||
134c85ca | 5833 | scale = build_int_cst (scaletype, gs_info.scale); |
3bab6342 AT |
5834 | |
5835 | prev_stmt_info = NULL; | |
5836 | for (j = 0; j < ncopies; ++j) | |
5837 | { | |
5838 | if (j == 0) | |
5839 | { | |
5840 | src = vec_oprnd1 | |
81c40241 | 5841 | = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt); |
3bab6342 | 5842 | op = vec_oprnd0 |
134c85ca | 5843 | = vect_get_vec_def_for_operand (gs_info.offset, stmt); |
3bab6342 AT |
5844 | } |
5845 | else if (modifier != NONE && (j & 1)) | |
5846 | { | |
5847 | if (modifier == WIDEN) | |
5848 | { | |
5849 | src = vec_oprnd1 | |
5850 | = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1); | |
5851 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask, | |
5852 | stmt, gsi); | |
5853 | } | |
5854 | else if (modifier == NARROW) | |
5855 | { | |
5856 | src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask, | |
5857 | stmt, gsi); | |
5858 | op = vec_oprnd0 | |
134c85ca RS |
5859 | = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, |
5860 | vec_oprnd0); | |
3bab6342 AT |
5861 | } |
5862 | else | |
5863 | gcc_unreachable (); | |
5864 | } | |
5865 | else | |
5866 | { | |
5867 | src = vec_oprnd1 | |
5868 | = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1); | |
5869 | op = vec_oprnd0 | |
134c85ca RS |
5870 | = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, |
5871 | vec_oprnd0); | |
3bab6342 AT |
5872 | } |
5873 | ||
5874 | if (!useless_type_conversion_p (srctype, TREE_TYPE (src))) | |
5875 | { | |
5876 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)) | |
5877 | == TYPE_VECTOR_SUBPARTS (srctype)); | |
0e22bb5a | 5878 | var = vect_get_new_ssa_name (srctype, vect_simple_var); |
3bab6342 AT |
5879 | src = build1 (VIEW_CONVERT_EXPR, srctype, src); |
5880 | new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src); | |
5881 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5882 | src = var; | |
5883 | } | |
5884 | ||
5885 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
5886 | { | |
5887 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)) | |
5888 | == TYPE_VECTOR_SUBPARTS (idxtype)); | |
0e22bb5a | 5889 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); |
3bab6342 AT |
5890 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); |
5891 | new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); | |
5892 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5893 | op = var; | |
5894 | } | |
5895 | ||
5896 | new_stmt | |
134c85ca | 5897 | = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale); |
3bab6342 AT |
5898 | |
5899 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5900 | ||
5901 | if (prev_stmt_info == NULL) | |
5902 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
5903 | else | |
5904 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
5905 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
5906 | } | |
5907 | return true; | |
5908 | } | |
5909 | ||
2de001ee | 5910 | grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info); |
0d0293ac | 5911 | if (grouped_store) |
ebfd146a | 5912 | { |
2de001ee | 5913 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
ebfd146a | 5914 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); |
e14c1050 | 5915 | group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
ebfd146a | 5916 | |
e14c1050 | 5917 | GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++; |
ebfd146a IR |
5918 | |
5919 | /* FORNOW */ | |
a70d6342 | 5920 | gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt)); |
ebfd146a IR |
5921 | |
5922 | /* We vectorize all the stmts of the interleaving group when we | |
5923 | reach the last stmt in the group. */ | |
e14c1050 IR |
5924 | if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt)) |
5925 | < GROUP_SIZE (vinfo_for_stmt (first_stmt)) | |
ebfd146a IR |
5926 | && !slp) |
5927 | { | |
5928 | *vec_stmt = NULL; | |
5929 | return true; | |
5930 | } | |
5931 | ||
5932 | if (slp) | |
4b5caab7 | 5933 | { |
0d0293ac | 5934 | grouped_store = false; |
4b5caab7 IR |
5935 | /* VEC_NUM is the number of vect stmts to be created for this |
5936 | group. */ | |
5937 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
9771b263 | 5938 | first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
52eab378 | 5939 | gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt); |
4b5caab7 | 5940 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); |
d092494c | 5941 | op = gimple_assign_rhs1 (first_stmt); |
4b5caab7 | 5942 | } |
ebfd146a | 5943 | else |
4b5caab7 IR |
5944 | /* VEC_NUM is the number of vect stmts to be created for this |
5945 | group. */ | |
ebfd146a | 5946 | vec_num = group_size; |
44fc7854 BE |
5947 | |
5948 | ref_type = get_group_alias_ptr_type (first_stmt); | |
ebfd146a | 5949 | } |
b8698a0f | 5950 | else |
ebfd146a IR |
5951 | { |
5952 | first_stmt = stmt; | |
5953 | first_dr = dr; | |
5954 | group_size = vec_num = 1; | |
44fc7854 | 5955 | ref_type = reference_alias_ptr_type (DR_REF (first_dr)); |
ebfd146a | 5956 | } |
b8698a0f | 5957 | |
73fbfcad | 5958 | if (dump_enabled_p ()) |
78c60e3d | 5959 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5960 | "transform store. ncopies = %d\n", ncopies); |
ebfd146a | 5961 | |
2de001ee RS |
5962 | if (memory_access_type == VMAT_ELEMENTWISE |
5963 | || memory_access_type == VMAT_STRIDED_SLP) | |
f2e2a985 MM |
5964 | { |
5965 | gimple_stmt_iterator incr_gsi; | |
5966 | bool insert_after; | |
355fe088 | 5967 | gimple *incr; |
f2e2a985 MM |
5968 | tree offvar; |
5969 | tree ivstep; | |
5970 | tree running_off; | |
5971 | gimple_seq stmts = NULL; | |
5972 | tree stride_base, stride_step, alias_off; | |
5973 | tree vec_oprnd; | |
f502d50e | 5974 | unsigned int g; |
f2e2a985 MM |
5975 | |
5976 | gcc_assert (!nested_in_vect_loop_p (loop, stmt)); | |
5977 | ||
5978 | stride_base | |
5979 | = fold_build_pointer_plus | |
f502d50e | 5980 | (unshare_expr (DR_BASE_ADDRESS (first_dr)), |
f2e2a985 | 5981 | size_binop (PLUS_EXPR, |
f502d50e | 5982 | convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))), |
44fc7854 | 5983 | convert_to_ptrofftype (DR_INIT (first_dr)))); |
f502d50e | 5984 | stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr))); |
f2e2a985 MM |
5985 | |
5986 | /* For a store with loop-invariant (but other than power-of-2) | |
5987 | stride (i.e. not a grouped access) like so: | |
5988 | ||
5989 | for (i = 0; i < n; i += stride) | |
5990 | array[i] = ...; | |
5991 | ||
5992 | we generate a new induction variable and new stores from | |
5993 | the components of the (vectorized) rhs: | |
5994 | ||
5995 | for (j = 0; ; j += VF*stride) | |
5996 | vectemp = ...; | |
5997 | tmp1 = vectemp[0]; | |
5998 | array[j] = tmp1; | |
5999 | tmp2 = vectemp[1]; | |
6000 | array[j + stride] = tmp2; | |
6001 | ... | |
6002 | */ | |
6003 | ||
cee62fee | 6004 | unsigned nstores = nunits; |
b17dc4d4 | 6005 | unsigned lnel = 1; |
cee62fee MM |
6006 | tree ltype = elem_type; |
6007 | if (slp) | |
6008 | { | |
b17dc4d4 RB |
6009 | if (group_size < nunits |
6010 | && nunits % group_size == 0) | |
6011 | { | |
6012 | nstores = nunits / group_size; | |
6013 | lnel = group_size; | |
6014 | ltype = build_vector_type (elem_type, group_size); | |
6015 | } | |
6016 | else if (group_size >= nunits | |
6017 | && group_size % nunits == 0) | |
6018 | { | |
6019 | nstores = 1; | |
6020 | lnel = nunits; | |
6021 | ltype = vectype; | |
6022 | } | |
cee62fee MM |
6023 | ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type)); |
6024 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
6025 | } | |
6026 | ||
f2e2a985 MM |
6027 | ivstep = stride_step; |
6028 | ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep, | |
b17dc4d4 | 6029 | build_int_cst (TREE_TYPE (ivstep), vf)); |
f2e2a985 MM |
6030 | |
6031 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
6032 | ||
6033 | create_iv (stride_base, ivstep, NULL, | |
6034 | loop, &incr_gsi, insert_after, | |
6035 | &offvar, NULL); | |
6036 | incr = gsi_stmt (incr_gsi); | |
310213d4 | 6037 | set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo)); |
f2e2a985 MM |
6038 | |
6039 | stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE); | |
6040 | if (stmts) | |
6041 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
6042 | ||
6043 | prev_stmt_info = NULL; | |
44fc7854 | 6044 | alias_off = build_int_cst (ref_type, 0); |
f502d50e MM |
6045 | next_stmt = first_stmt; |
6046 | for (g = 0; g < group_size; g++) | |
f2e2a985 | 6047 | { |
f502d50e MM |
6048 | running_off = offvar; |
6049 | if (g) | |
f2e2a985 | 6050 | { |
f502d50e MM |
6051 | tree size = TYPE_SIZE_UNIT (ltype); |
6052 | tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g), | |
f2e2a985 | 6053 | size); |
f502d50e | 6054 | tree newoff = copy_ssa_name (running_off, NULL); |
f2e2a985 | 6055 | incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, |
f502d50e | 6056 | running_off, pos); |
f2e2a985 | 6057 | vect_finish_stmt_generation (stmt, incr, gsi); |
f2e2a985 | 6058 | running_off = newoff; |
f502d50e | 6059 | } |
b17dc4d4 RB |
6060 | unsigned int group_el = 0; |
6061 | unsigned HOST_WIDE_INT | |
6062 | elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); | |
f502d50e MM |
6063 | for (j = 0; j < ncopies; j++) |
6064 | { | |
6065 | /* We've set op and dt above, from gimple_assign_rhs1(stmt), | |
6066 | and first_stmt == stmt. */ | |
6067 | if (j == 0) | |
6068 | { | |
6069 | if (slp) | |
6070 | { | |
6071 | vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL, | |
306b0c92 | 6072 | slp_node); |
f502d50e MM |
6073 | vec_oprnd = vec_oprnds[0]; |
6074 | } | |
6075 | else | |
6076 | { | |
6077 | gcc_assert (gimple_assign_single_p (next_stmt)); | |
6078 | op = gimple_assign_rhs1 (next_stmt); | |
81c40241 | 6079 | vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt); |
f502d50e MM |
6080 | } |
6081 | } | |
f2e2a985 | 6082 | else |
f502d50e MM |
6083 | { |
6084 | if (slp) | |
6085 | vec_oprnd = vec_oprnds[j]; | |
6086 | else | |
c079cbac | 6087 | { |
81c40241 | 6088 | vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt); |
c079cbac RB |
6089 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd); |
6090 | } | |
f502d50e MM |
6091 | } |
6092 | ||
6093 | for (i = 0; i < nstores; i++) | |
6094 | { | |
6095 | tree newref, newoff; | |
355fe088 | 6096 | gimple *incr, *assign; |
f502d50e MM |
6097 | tree size = TYPE_SIZE (ltype); |
6098 | /* Extract the i'th component. */ | |
6099 | tree pos = fold_build2 (MULT_EXPR, bitsizetype, | |
6100 | bitsize_int (i), size); | |
6101 | tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd, | |
6102 | size, pos); | |
6103 | ||
6104 | elem = force_gimple_operand_gsi (gsi, elem, true, | |
6105 | NULL_TREE, true, | |
6106 | GSI_SAME_STMT); | |
6107 | ||
b17dc4d4 RB |
6108 | tree this_off = build_int_cst (TREE_TYPE (alias_off), |
6109 | group_el * elsz); | |
f502d50e | 6110 | newref = build2 (MEM_REF, ltype, |
b17dc4d4 | 6111 | running_off, this_off); |
f502d50e MM |
6112 | |
6113 | /* And store it to *running_off. */ | |
6114 | assign = gimple_build_assign (newref, elem); | |
6115 | vect_finish_stmt_generation (stmt, assign, gsi); | |
6116 | ||
b17dc4d4 RB |
6117 | group_el += lnel; |
6118 | if (! slp | |
6119 | || group_el == group_size) | |
6120 | { | |
6121 | newoff = copy_ssa_name (running_off, NULL); | |
6122 | incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, | |
6123 | running_off, stride_step); | |
6124 | vect_finish_stmt_generation (stmt, incr, gsi); | |
f502d50e | 6125 | |
b17dc4d4 RB |
6126 | running_off = newoff; |
6127 | group_el = 0; | |
6128 | } | |
225ce44b RB |
6129 | if (g == group_size - 1 |
6130 | && !slp) | |
f502d50e MM |
6131 | { |
6132 | if (j == 0 && i == 0) | |
225ce44b RB |
6133 | STMT_VINFO_VEC_STMT (stmt_info) |
6134 | = *vec_stmt = assign; | |
f502d50e MM |
6135 | else |
6136 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign; | |
6137 | prev_stmt_info = vinfo_for_stmt (assign); | |
6138 | } | |
6139 | } | |
f2e2a985 | 6140 | } |
f502d50e | 6141 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
b17dc4d4 RB |
6142 | if (slp) |
6143 | break; | |
f2e2a985 | 6144 | } |
778dd3b6 RB |
6145 | |
6146 | vec_oprnds.release (); | |
f2e2a985 MM |
6147 | return true; |
6148 | } | |
6149 | ||
8c681247 | 6150 | auto_vec<tree> dr_chain (group_size); |
9771b263 | 6151 | oprnds.create (group_size); |
ebfd146a | 6152 | |
720f5239 | 6153 | alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false); |
ebfd146a | 6154 | gcc_assert (alignment_support_scheme); |
272c6793 RS |
6155 | /* Targets with store-lane instructions must not require explicit |
6156 | realignment. */ | |
2de001ee | 6157 | gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES |
272c6793 RS |
6158 | || alignment_support_scheme == dr_aligned |
6159 | || alignment_support_scheme == dr_unaligned_supported); | |
6160 | ||
62da9e14 RS |
6161 | if (memory_access_type == VMAT_CONTIGUOUS_DOWN |
6162 | || memory_access_type == VMAT_CONTIGUOUS_REVERSE) | |
09dfa495 BM |
6163 | offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1); |
6164 | ||
2de001ee | 6165 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
272c6793 RS |
6166 | aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); |
6167 | else | |
6168 | aggr_type = vectype; | |
ebfd146a IR |
6169 | |
6170 | /* In case the vectorization factor (VF) is bigger than the number | |
6171 | of elements that we can fit in a vectype (nunits), we have to generate | |
6172 | more than one vector stmt - i.e - we need to "unroll" the | |
b8698a0f | 6173 | vector stmt by a factor VF/nunits. For more details see documentation in |
ebfd146a IR |
6174 | vect_get_vec_def_for_copy_stmt. */ |
6175 | ||
0d0293ac | 6176 | /* In case of interleaving (non-unit grouped access): |
ebfd146a IR |
6177 | |
6178 | S1: &base + 2 = x2 | |
6179 | S2: &base = x0 | |
6180 | S3: &base + 1 = x1 | |
6181 | S4: &base + 3 = x3 | |
6182 | ||
6183 | We create vectorized stores starting from base address (the access of the | |
6184 | first stmt in the chain (S2 in the above example), when the last store stmt | |
6185 | of the chain (S4) is reached: | |
6186 | ||
6187 | VS1: &base = vx2 | |
6188 | VS2: &base + vec_size*1 = vx0 | |
6189 | VS3: &base + vec_size*2 = vx1 | |
6190 | VS4: &base + vec_size*3 = vx3 | |
6191 | ||
6192 | Then permutation statements are generated: | |
6193 | ||
3fcc1b55 JJ |
6194 | VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} > |
6195 | VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} > | |
ebfd146a | 6196 | ... |
b8698a0f | 6197 | |
ebfd146a IR |
6198 | And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts |
6199 | (the order of the data-refs in the output of vect_permute_store_chain | |
6200 | corresponds to the order of scalar stmts in the interleaving chain - see | |
6201 | the documentation of vect_permute_store_chain()). | |
6202 | ||
6203 | In case of both multiple types and interleaving, above vector stores and | |
ff802fa1 | 6204 | permutation stmts are created for every copy. The result vector stmts are |
ebfd146a | 6205 | put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding |
b8698a0f | 6206 | STMT_VINFO_RELATED_STMT for the next copies. |
ebfd146a IR |
6207 | */ |
6208 | ||
6209 | prev_stmt_info = NULL; | |
6210 | for (j = 0; j < ncopies; j++) | |
6211 | { | |
ebfd146a IR |
6212 | |
6213 | if (j == 0) | |
6214 | { | |
6215 | if (slp) | |
6216 | { | |
6217 | /* Get vectorized arguments for SLP_NODE. */ | |
d092494c | 6218 | vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, |
306b0c92 | 6219 | NULL, slp_node); |
ebfd146a | 6220 | |
9771b263 | 6221 | vec_oprnd = vec_oprnds[0]; |
ebfd146a IR |
6222 | } |
6223 | else | |
6224 | { | |
b8698a0f L |
6225 | /* For interleaved stores we collect vectorized defs for all the |
6226 | stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then | |
6227 | used as an input to vect_permute_store_chain(), and OPRNDS as | |
ebfd146a IR |
6228 | an input to vect_get_vec_def_for_stmt_copy() for the next copy. |
6229 | ||
0d0293ac | 6230 | If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and |
ebfd146a | 6231 | OPRNDS are of size 1. */ |
b8698a0f | 6232 | next_stmt = first_stmt; |
ebfd146a IR |
6233 | for (i = 0; i < group_size; i++) |
6234 | { | |
b8698a0f L |
6235 | /* Since gaps are not supported for interleaved stores, |
6236 | GROUP_SIZE is the exact number of stmts in the chain. | |
6237 | Therefore, NEXT_STMT can't be NULL_TREE. In case that | |
6238 | there is no interleaving, GROUP_SIZE is 1, and only one | |
ebfd146a IR |
6239 | iteration of the loop will be executed. */ |
6240 | gcc_assert (next_stmt | |
6241 | && gimple_assign_single_p (next_stmt)); | |
6242 | op = gimple_assign_rhs1 (next_stmt); | |
6243 | ||
81c40241 | 6244 | vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt); |
9771b263 DN |
6245 | dr_chain.quick_push (vec_oprnd); |
6246 | oprnds.quick_push (vec_oprnd); | |
e14c1050 | 6247 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
ebfd146a IR |
6248 | } |
6249 | } | |
6250 | ||
6251 | /* We should have catched mismatched types earlier. */ | |
6252 | gcc_assert (useless_type_conversion_p (vectype, | |
6253 | TREE_TYPE (vec_oprnd))); | |
74bf76ed JJ |
6254 | bool simd_lane_access_p |
6255 | = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info); | |
6256 | if (simd_lane_access_p | |
6257 | && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR | |
6258 | && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0)) | |
6259 | && integer_zerop (DR_OFFSET (first_dr)) | |
6260 | && integer_zerop (DR_INIT (first_dr)) | |
6261 | && alias_sets_conflict_p (get_alias_set (aggr_type), | |
44fc7854 | 6262 | get_alias_set (TREE_TYPE (ref_type)))) |
74bf76ed JJ |
6263 | { |
6264 | dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr)); | |
44fc7854 | 6265 | dataref_offset = build_int_cst (ref_type, 0); |
8928eff3 | 6266 | inv_p = false; |
74bf76ed JJ |
6267 | } |
6268 | else | |
6269 | dataref_ptr | |
6270 | = vect_create_data_ref_ptr (first_stmt, aggr_type, | |
6271 | simd_lane_access_p ? loop : NULL, | |
09dfa495 | 6272 | offset, &dummy, gsi, &ptr_incr, |
74bf76ed | 6273 | simd_lane_access_p, &inv_p); |
a70d6342 | 6274 | gcc_assert (bb_vinfo || !inv_p); |
ebfd146a | 6275 | } |
b8698a0f | 6276 | else |
ebfd146a | 6277 | { |
b8698a0f L |
6278 | /* For interleaved stores we created vectorized defs for all the |
6279 | defs stored in OPRNDS in the previous iteration (previous copy). | |
6280 | DR_CHAIN is then used as an input to vect_permute_store_chain(), | |
ebfd146a IR |
6281 | and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the |
6282 | next copy. | |
0d0293ac | 6283 | If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and |
ebfd146a IR |
6284 | OPRNDS are of size 1. */ |
6285 | for (i = 0; i < group_size; i++) | |
6286 | { | |
9771b263 | 6287 | op = oprnds[i]; |
81c40241 | 6288 | vect_is_simple_use (op, vinfo, &def_stmt, &dt); |
b8698a0f | 6289 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op); |
9771b263 DN |
6290 | dr_chain[i] = vec_oprnd; |
6291 | oprnds[i] = vec_oprnd; | |
ebfd146a | 6292 | } |
74bf76ed JJ |
6293 | if (dataref_offset) |
6294 | dataref_offset | |
6295 | = int_const_binop (PLUS_EXPR, dataref_offset, | |
6296 | TYPE_SIZE_UNIT (aggr_type)); | |
6297 | else | |
6298 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, | |
6299 | TYPE_SIZE_UNIT (aggr_type)); | |
ebfd146a IR |
6300 | } |
6301 | ||
2de001ee | 6302 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
ebfd146a | 6303 | { |
272c6793 | 6304 | tree vec_array; |
267d3070 | 6305 | |
272c6793 RS |
6306 | /* Combine all the vectors into an array. */ |
6307 | vec_array = create_vector_array (vectype, vec_num); | |
6308 | for (i = 0; i < vec_num; i++) | |
c2d7ab2a | 6309 | { |
9771b263 | 6310 | vec_oprnd = dr_chain[i]; |
272c6793 | 6311 | write_vector_array (stmt, gsi, vec_oprnd, vec_array, i); |
267d3070 | 6312 | } |
b8698a0f | 6313 | |
272c6793 RS |
6314 | /* Emit: |
6315 | MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */ | |
44fc7854 | 6316 | data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type); |
272c6793 RS |
6317 | new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array); |
6318 | gimple_call_set_lhs (new_stmt, data_ref); | |
267d3070 | 6319 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
272c6793 RS |
6320 | } |
6321 | else | |
6322 | { | |
6323 | new_stmt = NULL; | |
0d0293ac | 6324 | if (grouped_store) |
272c6793 | 6325 | { |
b6b9227d JJ |
6326 | if (j == 0) |
6327 | result_chain.create (group_size); | |
272c6793 RS |
6328 | /* Permute. */ |
6329 | vect_permute_store_chain (dr_chain, group_size, stmt, gsi, | |
6330 | &result_chain); | |
6331 | } | |
c2d7ab2a | 6332 | |
272c6793 RS |
6333 | next_stmt = first_stmt; |
6334 | for (i = 0; i < vec_num; i++) | |
6335 | { | |
644ffefd | 6336 | unsigned align, misalign; |
272c6793 RS |
6337 | |
6338 | if (i > 0) | |
6339 | /* Bump the vector pointer. */ | |
6340 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
6341 | stmt, NULL_TREE); | |
6342 | ||
6343 | if (slp) | |
9771b263 | 6344 | vec_oprnd = vec_oprnds[i]; |
0d0293ac MM |
6345 | else if (grouped_store) |
6346 | /* For grouped stores vectorized defs are interleaved in | |
272c6793 | 6347 | vect_permute_store_chain(). */ |
9771b263 | 6348 | vec_oprnd = result_chain[i]; |
272c6793 | 6349 | |
69a2e8a1 | 6350 | data_ref = fold_build2 (MEM_REF, vectype, |
aed93b23 RB |
6351 | dataref_ptr, |
6352 | dataref_offset | |
6353 | ? dataref_offset | |
44fc7854 | 6354 | : build_int_cst (ref_type, 0)); |
644ffefd | 6355 | align = TYPE_ALIGN_UNIT (vectype); |
272c6793 | 6356 | if (aligned_access_p (first_dr)) |
644ffefd | 6357 | misalign = 0; |
272c6793 RS |
6358 | else if (DR_MISALIGNMENT (first_dr) == -1) |
6359 | { | |
52639a61 RB |
6360 | if (DR_VECT_AUX (first_dr)->base_element_aligned) |
6361 | align = TYPE_ALIGN_UNIT (elem_type); | |
6362 | else | |
6363 | align = get_object_alignment (DR_REF (first_dr)) | |
6364 | / BITS_PER_UNIT; | |
6365 | misalign = 0; | |
272c6793 RS |
6366 | TREE_TYPE (data_ref) |
6367 | = build_aligned_type (TREE_TYPE (data_ref), | |
52639a61 | 6368 | align * BITS_PER_UNIT); |
272c6793 RS |
6369 | } |
6370 | else | |
6371 | { | |
6372 | TREE_TYPE (data_ref) | |
6373 | = build_aligned_type (TREE_TYPE (data_ref), | |
6374 | TYPE_ALIGN (elem_type)); | |
644ffefd | 6375 | misalign = DR_MISALIGNMENT (first_dr); |
272c6793 | 6376 | } |
aed93b23 RB |
6377 | if (dataref_offset == NULL_TREE |
6378 | && TREE_CODE (dataref_ptr) == SSA_NAME) | |
74bf76ed JJ |
6379 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, |
6380 | misalign); | |
c2d7ab2a | 6381 | |
62da9e14 | 6382 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
09dfa495 BM |
6383 | { |
6384 | tree perm_mask = perm_mask_for_reverse (vectype); | |
6385 | tree perm_dest | |
6386 | = vect_create_destination_var (gimple_assign_rhs1 (stmt), | |
6387 | vectype); | |
b731b390 | 6388 | tree new_temp = make_ssa_name (perm_dest); |
09dfa495 BM |
6389 | |
6390 | /* Generate the permute statement. */ | |
355fe088 | 6391 | gimple *perm_stmt |
0d0e4a03 JJ |
6392 | = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd, |
6393 | vec_oprnd, perm_mask); | |
09dfa495 BM |
6394 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); |
6395 | ||
6396 | perm_stmt = SSA_NAME_DEF_STMT (new_temp); | |
6397 | vec_oprnd = new_temp; | |
6398 | } | |
6399 | ||
272c6793 RS |
6400 | /* Arguments are ready. Create the new vector stmt. */ |
6401 | new_stmt = gimple_build_assign (data_ref, vec_oprnd); | |
6402 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
272c6793 RS |
6403 | |
6404 | if (slp) | |
6405 | continue; | |
6406 | ||
e14c1050 | 6407 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
272c6793 RS |
6408 | if (!next_stmt) |
6409 | break; | |
6410 | } | |
ebfd146a | 6411 | } |
1da0876c RS |
6412 | if (!slp) |
6413 | { | |
6414 | if (j == 0) | |
6415 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
6416 | else | |
6417 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
6418 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
6419 | } | |
ebfd146a IR |
6420 | } |
6421 | ||
9771b263 DN |
6422 | oprnds.release (); |
6423 | result_chain.release (); | |
6424 | vec_oprnds.release (); | |
ebfd146a IR |
6425 | |
6426 | return true; | |
6427 | } | |
6428 | ||
557be5a8 AL |
6429 | /* Given a vector type VECTYPE, turns permutation SEL into the equivalent |
6430 | VECTOR_CST mask. No checks are made that the target platform supports the | |
6431 | mask, so callers may wish to test can_vec_perm_p separately, or use | |
6432 | vect_gen_perm_mask_checked. */ | |
a1e53f3f | 6433 | |
3fcc1b55 | 6434 | tree |
557be5a8 | 6435 | vect_gen_perm_mask_any (tree vectype, const unsigned char *sel) |
a1e53f3f | 6436 | { |
d2a12ae7 | 6437 | tree mask_elt_type, mask_type, mask_vec, *mask_elts; |
2635892a | 6438 | int i, nunits; |
a1e53f3f | 6439 | |
22e4dee7 | 6440 | nunits = TYPE_VECTOR_SUBPARTS (vectype); |
22e4dee7 | 6441 | |
96f9265a RG |
6442 | mask_elt_type = lang_hooks.types.type_for_mode |
6443 | (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1); | |
22e4dee7 | 6444 | mask_type = get_vectype_for_scalar_type (mask_elt_type); |
a1e53f3f | 6445 | |
d2a12ae7 | 6446 | mask_elts = XALLOCAVEC (tree, nunits); |
aec7ae7d | 6447 | for (i = nunits - 1; i >= 0; i--) |
d2a12ae7 RG |
6448 | mask_elts[i] = build_int_cst (mask_elt_type, sel[i]); |
6449 | mask_vec = build_vector (mask_type, mask_elts); | |
a1e53f3f | 6450 | |
2635892a | 6451 | return mask_vec; |
a1e53f3f L |
6452 | } |
6453 | ||
cf7aa6a3 AL |
6454 | /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p, |
6455 | i.e. that the target supports the pattern _for arbitrary input vectors_. */ | |
557be5a8 AL |
6456 | |
6457 | tree | |
6458 | vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel) | |
6459 | { | |
6460 | gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel)); | |
6461 | return vect_gen_perm_mask_any (vectype, sel); | |
6462 | } | |
6463 | ||
aec7ae7d JJ |
6464 | /* Given a vector variable X and Y, that was generated for the scalar |
6465 | STMT, generate instructions to permute the vector elements of X and Y | |
6466 | using permutation mask MASK_VEC, insert them at *GSI and return the | |
6467 | permuted vector variable. */ | |
a1e53f3f L |
6468 | |
6469 | static tree | |
355fe088 | 6470 | permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt, |
aec7ae7d | 6471 | gimple_stmt_iterator *gsi) |
a1e53f3f L |
6472 | { |
6473 | tree vectype = TREE_TYPE (x); | |
aec7ae7d | 6474 | tree perm_dest, data_ref; |
355fe088 | 6475 | gimple *perm_stmt; |
a1e53f3f | 6476 | |
acdcd61b | 6477 | perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype); |
b731b390 | 6478 | data_ref = make_ssa_name (perm_dest); |
a1e53f3f L |
6479 | |
6480 | /* Generate the permute statement. */ | |
0d0e4a03 | 6481 | perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec); |
a1e53f3f L |
6482 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); |
6483 | ||
6484 | return data_ref; | |
6485 | } | |
6486 | ||
6b916b36 RB |
6487 | /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP, |
6488 | inserting them on the loops preheader edge. Returns true if we | |
6489 | were successful in doing so (and thus STMT can be moved then), | |
6490 | otherwise returns false. */ | |
6491 | ||
6492 | static bool | |
355fe088 | 6493 | hoist_defs_of_uses (gimple *stmt, struct loop *loop) |
6b916b36 RB |
6494 | { |
6495 | ssa_op_iter i; | |
6496 | tree op; | |
6497 | bool any = false; | |
6498 | ||
6499 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
6500 | { | |
355fe088 | 6501 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
6b916b36 RB |
6502 | if (!gimple_nop_p (def_stmt) |
6503 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) | |
6504 | { | |
6505 | /* Make sure we don't need to recurse. While we could do | |
6506 | so in simple cases when there are more complex use webs | |
6507 | we don't have an easy way to preserve stmt order to fulfil | |
6508 | dependencies within them. */ | |
6509 | tree op2; | |
6510 | ssa_op_iter i2; | |
d1417442 JJ |
6511 | if (gimple_code (def_stmt) == GIMPLE_PHI) |
6512 | return false; | |
6b916b36 RB |
6513 | FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE) |
6514 | { | |
355fe088 | 6515 | gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2); |
6b916b36 RB |
6516 | if (!gimple_nop_p (def_stmt2) |
6517 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2))) | |
6518 | return false; | |
6519 | } | |
6520 | any = true; | |
6521 | } | |
6522 | } | |
6523 | ||
6524 | if (!any) | |
6525 | return true; | |
6526 | ||
6527 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
6528 | { | |
355fe088 | 6529 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
6b916b36 RB |
6530 | if (!gimple_nop_p (def_stmt) |
6531 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) | |
6532 | { | |
6533 | gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt); | |
6534 | gsi_remove (&gsi, false); | |
6535 | gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt); | |
6536 | } | |
6537 | } | |
6538 | ||
6539 | return true; | |
6540 | } | |
6541 | ||
ebfd146a IR |
6542 | /* vectorizable_load. |
6543 | ||
b8698a0f L |
6544 | Check if STMT reads a non scalar data-ref (array/pointer/structure) that |
6545 | can be vectorized. | |
6546 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
ebfd146a IR |
6547 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
6548 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
6549 | ||
6550 | static bool | |
355fe088 | 6551 | vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, |
c716e67f | 6552 | slp_tree slp_node, slp_instance slp_node_instance) |
ebfd146a IR |
6553 | { |
6554 | tree scalar_dest; | |
6555 | tree vec_dest = NULL; | |
6556 | tree data_ref = NULL; | |
6557 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
b8698a0f | 6558 | stmt_vec_info prev_stmt_info; |
ebfd146a | 6559 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 6560 | struct loop *loop = NULL; |
ebfd146a | 6561 | struct loop *containing_loop = (gimple_bb (stmt))->loop_father; |
a70d6342 | 6562 | bool nested_in_vect_loop = false; |
c716e67f | 6563 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL; |
272c6793 | 6564 | tree elem_type; |
ebfd146a | 6565 | tree new_temp; |
ef4bddc2 | 6566 | machine_mode mode; |
355fe088 | 6567 | gimple *new_stmt = NULL; |
ebfd146a IR |
6568 | tree dummy; |
6569 | enum dr_alignment_support alignment_support_scheme; | |
6570 | tree dataref_ptr = NULL_TREE; | |
74bf76ed | 6571 | tree dataref_offset = NULL_TREE; |
355fe088 | 6572 | gimple *ptr_incr = NULL; |
ebfd146a | 6573 | int ncopies; |
44fc7854 | 6574 | int i, j, group_size, group_gap_adj; |
ebfd146a IR |
6575 | tree msq = NULL_TREE, lsq; |
6576 | tree offset = NULL_TREE; | |
356bbc4c | 6577 | tree byte_offset = NULL_TREE; |
ebfd146a | 6578 | tree realignment_token = NULL_TREE; |
538dd0b7 | 6579 | gphi *phi = NULL; |
6e1aa848 | 6580 | vec<tree> dr_chain = vNULL; |
0d0293ac | 6581 | bool grouped_load = false; |
355fe088 | 6582 | gimple *first_stmt; |
4f0a0218 | 6583 | gimple *first_stmt_for_drptr = NULL; |
ebfd146a IR |
6584 | bool inv_p; |
6585 | bool compute_in_loop = false; | |
6586 | struct loop *at_loop; | |
6587 | int vec_num; | |
6588 | bool slp = (slp_node != NULL); | |
6589 | bool slp_perm = false; | |
6590 | enum tree_code code; | |
a70d6342 IR |
6591 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
6592 | int vf; | |
272c6793 | 6593 | tree aggr_type; |
134c85ca | 6594 | gather_scatter_info gs_info; |
310213d4 | 6595 | vec_info *vinfo = stmt_info->vinfo; |
44fc7854 | 6596 | tree ref_type; |
a70d6342 | 6597 | |
465c8c19 JJ |
6598 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
6599 | return false; | |
6600 | ||
66c16fd9 RB |
6601 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
6602 | && ! vec_stmt) | |
465c8c19 JJ |
6603 | return false; |
6604 | ||
6605 | /* Is vectorizable load? */ | |
6606 | if (!is_gimple_assign (stmt)) | |
6607 | return false; | |
6608 | ||
6609 | scalar_dest = gimple_assign_lhs (stmt); | |
6610 | if (TREE_CODE (scalar_dest) != SSA_NAME) | |
6611 | return false; | |
6612 | ||
6613 | code = gimple_assign_rhs_code (stmt); | |
6614 | if (code != ARRAY_REF | |
6615 | && code != BIT_FIELD_REF | |
6616 | && code != INDIRECT_REF | |
6617 | && code != COMPONENT_REF | |
6618 | && code != IMAGPART_EXPR | |
6619 | && code != REALPART_EXPR | |
6620 | && code != MEM_REF | |
6621 | && TREE_CODE_CLASS (code) != tcc_declaration) | |
6622 | return false; | |
6623 | ||
6624 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
6625 | return false; | |
6626 | ||
6627 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
6628 | int nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
6629 | ||
a70d6342 IR |
6630 | if (loop_vinfo) |
6631 | { | |
6632 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
6633 | nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); | |
6634 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
6635 | } | |
6636 | else | |
3533e503 | 6637 | vf = 1; |
ebfd146a IR |
6638 | |
6639 | /* Multiple types in SLP are handled by creating the appropriate number of | |
ff802fa1 | 6640 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in |
ebfd146a | 6641 | case of SLP. */ |
fce57248 | 6642 | if (slp) |
ebfd146a IR |
6643 | ncopies = 1; |
6644 | else | |
6645 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
6646 | ||
6647 | gcc_assert (ncopies >= 1); | |
6648 | ||
6649 | /* FORNOW. This restriction should be relaxed. */ | |
6650 | if (nested_in_vect_loop && ncopies > 1) | |
6651 | { | |
73fbfcad | 6652 | if (dump_enabled_p ()) |
78c60e3d | 6653 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 6654 | "multiple types in nested loop.\n"); |
ebfd146a IR |
6655 | return false; |
6656 | } | |
6657 | ||
f2556b68 RB |
6658 | /* Invalidate assumptions made by dependence analysis when vectorization |
6659 | on the unrolled body effectively re-orders stmts. */ | |
6660 | if (ncopies > 1 | |
6661 | && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 | |
6662 | && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
6663 | > STMT_VINFO_MIN_NEG_DIST (stmt_info))) | |
6664 | { | |
6665 | if (dump_enabled_p ()) | |
6666 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6667 | "cannot perform implicit CSE when unrolling " | |
6668 | "with negative dependence distance\n"); | |
6669 | return false; | |
6670 | } | |
6671 | ||
7b7b1813 | 6672 | elem_type = TREE_TYPE (vectype); |
947131ba | 6673 | mode = TYPE_MODE (vectype); |
ebfd146a IR |
6674 | |
6675 | /* FORNOW. In some cases can vectorize even if data-type not supported | |
6676 | (e.g. - data copies). */ | |
947131ba | 6677 | if (optab_handler (mov_optab, mode) == CODE_FOR_nothing) |
ebfd146a | 6678 | { |
73fbfcad | 6679 | if (dump_enabled_p ()) |
78c60e3d | 6680 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 6681 | "Aligned load, but unsupported type.\n"); |
ebfd146a IR |
6682 | return false; |
6683 | } | |
6684 | ||
ebfd146a | 6685 | /* Check if the load is a part of an interleaving chain. */ |
0d0293ac | 6686 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
ebfd146a | 6687 | { |
0d0293ac | 6688 | grouped_load = true; |
ebfd146a | 6689 | /* FORNOW */ |
2de001ee RS |
6690 | gcc_assert (!nested_in_vect_loop); |
6691 | gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info)); | |
ebfd146a | 6692 | |
e14c1050 | 6693 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
d3465d72 | 6694 | group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
d5f035ea | 6695 | |
b1af7da6 RB |
6696 | if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) |
6697 | slp_perm = true; | |
6698 | ||
f2556b68 RB |
6699 | /* Invalidate assumptions made by dependence analysis when vectorization |
6700 | on the unrolled body effectively re-orders stmts. */ | |
6701 | if (!PURE_SLP_STMT (stmt_info) | |
6702 | && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 | |
6703 | && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
6704 | > STMT_VINFO_MIN_NEG_DIST (stmt_info))) | |
6705 | { | |
6706 | if (dump_enabled_p ()) | |
6707 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6708 | "cannot perform implicit CSE when performing " | |
6709 | "group loads with negative dependence distance\n"); | |
6710 | return false; | |
6711 | } | |
96bb56b2 RB |
6712 | |
6713 | /* Similarly when the stmt is a load that is both part of a SLP | |
6714 | instance and a loop vectorized stmt via the same-dr mechanism | |
6715 | we have to give up. */ | |
6716 | if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info) | |
6717 | && (STMT_SLP_TYPE (stmt_info) | |
6718 | != STMT_SLP_TYPE (vinfo_for_stmt | |
6719 | (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info))))) | |
6720 | { | |
6721 | if (dump_enabled_p ()) | |
6722 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6723 | "conflicting SLP types for CSEd load\n"); | |
6724 | return false; | |
6725 | } | |
ebfd146a IR |
6726 | } |
6727 | ||
2de001ee | 6728 | vect_memory_access_type memory_access_type; |
62da9e14 | 6729 | if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies, |
2de001ee RS |
6730 | &memory_access_type, &gs_info)) |
6731 | return false; | |
a1e53f3f | 6732 | |
ebfd146a IR |
6733 | if (!vec_stmt) /* transformation not required. */ |
6734 | { | |
2de001ee RS |
6735 | if (!slp) |
6736 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; | |
ebfd146a | 6737 | STMT_VINFO_TYPE (stmt_info) = load_vec_info_type; |
2e8ab70c RB |
6738 | /* The SLP costs are calculated during SLP analysis. */ |
6739 | if (!PURE_SLP_STMT (stmt_info)) | |
2de001ee | 6740 | vect_model_load_cost (stmt_info, ncopies, memory_access_type, |
2e8ab70c | 6741 | NULL, NULL, NULL); |
ebfd146a IR |
6742 | return true; |
6743 | } | |
6744 | ||
2de001ee RS |
6745 | if (!slp) |
6746 | gcc_assert (memory_access_type | |
6747 | == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); | |
6748 | ||
73fbfcad | 6749 | if (dump_enabled_p ()) |
78c60e3d | 6750 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 6751 | "transform load. ncopies = %d\n", ncopies); |
ebfd146a | 6752 | |
67b8dbac | 6753 | /* Transform. */ |
ebfd146a | 6754 | |
c716e67f XDL |
6755 | ensure_base_align (stmt_info, dr); |
6756 | ||
2de001ee | 6757 | if (memory_access_type == VMAT_GATHER_SCATTER) |
aec7ae7d JJ |
6758 | { |
6759 | tree vec_oprnd0 = NULL_TREE, op; | |
134c85ca | 6760 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
aec7ae7d | 6761 | tree rettype, srctype, ptrtype, idxtype, masktype, scaletype; |
d3c2fee0 | 6762 | tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE; |
aec7ae7d JJ |
6763 | edge pe = loop_preheader_edge (loop); |
6764 | gimple_seq seq; | |
6765 | basic_block new_bb; | |
6766 | enum { NARROW, NONE, WIDEN } modifier; | |
134c85ca | 6767 | int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); |
aec7ae7d JJ |
6768 | |
6769 | if (nunits == gather_off_nunits) | |
6770 | modifier = NONE; | |
6771 | else if (nunits == gather_off_nunits / 2) | |
6772 | { | |
6773 | unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits); | |
6774 | modifier = WIDEN; | |
6775 | ||
6776 | for (i = 0; i < gather_off_nunits; ++i) | |
6777 | sel[i] = i | nunits; | |
6778 | ||
134c85ca | 6779 | perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel); |
aec7ae7d JJ |
6780 | } |
6781 | else if (nunits == gather_off_nunits * 2) | |
6782 | { | |
6783 | unsigned char *sel = XALLOCAVEC (unsigned char, nunits); | |
6784 | modifier = NARROW; | |
6785 | ||
6786 | for (i = 0; i < nunits; ++i) | |
6787 | sel[i] = i < gather_off_nunits | |
6788 | ? i : i + nunits - gather_off_nunits; | |
6789 | ||
557be5a8 | 6790 | perm_mask = vect_gen_perm_mask_checked (vectype, sel); |
aec7ae7d JJ |
6791 | ncopies *= 2; |
6792 | } | |
6793 | else | |
6794 | gcc_unreachable (); | |
6795 | ||
134c85ca | 6796 | rettype = TREE_TYPE (TREE_TYPE (gs_info.decl)); |
aec7ae7d JJ |
6797 | srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); |
6798 | ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6799 | idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6800 | masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6801 | scaletype = TREE_VALUE (arglist); | |
d3c2fee0 | 6802 | gcc_checking_assert (types_compatible_p (srctype, rettype)); |
aec7ae7d JJ |
6803 | |
6804 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
6805 | ||
134c85ca | 6806 | ptr = fold_convert (ptrtype, gs_info.base); |
aec7ae7d JJ |
6807 | if (!is_gimple_min_invariant (ptr)) |
6808 | { | |
6809 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
6810 | new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
6811 | gcc_assert (!new_bb); | |
6812 | } | |
6813 | ||
6814 | /* Currently we support only unconditional gather loads, | |
6815 | so mask should be all ones. */ | |
d3c2fee0 AI |
6816 | if (TREE_CODE (masktype) == INTEGER_TYPE) |
6817 | mask = build_int_cst (masktype, -1); | |
6818 | else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE) | |
6819 | { | |
6820 | mask = build_int_cst (TREE_TYPE (masktype), -1); | |
6821 | mask = build_vector_from_val (masktype, mask); | |
03b9e8e4 | 6822 | mask = vect_init_vector (stmt, mask, masktype, NULL); |
d3c2fee0 | 6823 | } |
aec7ae7d JJ |
6824 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype))) |
6825 | { | |
6826 | REAL_VALUE_TYPE r; | |
6827 | long tmp[6]; | |
6828 | for (j = 0; j < 6; ++j) | |
6829 | tmp[j] = -1; | |
6830 | real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype))); | |
6831 | mask = build_real (TREE_TYPE (masktype), r); | |
d3c2fee0 | 6832 | mask = build_vector_from_val (masktype, mask); |
03b9e8e4 | 6833 | mask = vect_init_vector (stmt, mask, masktype, NULL); |
aec7ae7d JJ |
6834 | } |
6835 | else | |
6836 | gcc_unreachable (); | |
aec7ae7d | 6837 | |
134c85ca | 6838 | scale = build_int_cst (scaletype, gs_info.scale); |
aec7ae7d | 6839 | |
d3c2fee0 AI |
6840 | if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE) |
6841 | merge = build_int_cst (TREE_TYPE (rettype), 0); | |
6842 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype))) | |
6843 | { | |
6844 | REAL_VALUE_TYPE r; | |
6845 | long tmp[6]; | |
6846 | for (j = 0; j < 6; ++j) | |
6847 | tmp[j] = 0; | |
6848 | real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype))); | |
6849 | merge = build_real (TREE_TYPE (rettype), r); | |
6850 | } | |
6851 | else | |
6852 | gcc_unreachable (); | |
6853 | merge = build_vector_from_val (rettype, merge); | |
6854 | merge = vect_init_vector (stmt, merge, rettype, NULL); | |
6855 | ||
aec7ae7d JJ |
6856 | prev_stmt_info = NULL; |
6857 | for (j = 0; j < ncopies; ++j) | |
6858 | { | |
6859 | if (modifier == WIDEN && (j & 1)) | |
6860 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, | |
6861 | perm_mask, stmt, gsi); | |
6862 | else if (j == 0) | |
6863 | op = vec_oprnd0 | |
134c85ca | 6864 | = vect_get_vec_def_for_operand (gs_info.offset, stmt); |
aec7ae7d JJ |
6865 | else |
6866 | op = vec_oprnd0 | |
134c85ca | 6867 | = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0); |
aec7ae7d JJ |
6868 | |
6869 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
6870 | { | |
6871 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)) | |
6872 | == TYPE_VECTOR_SUBPARTS (idxtype)); | |
0e22bb5a | 6873 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); |
aec7ae7d JJ |
6874 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); |
6875 | new_stmt | |
0d0e4a03 | 6876 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
aec7ae7d JJ |
6877 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
6878 | op = var; | |
6879 | } | |
6880 | ||
6881 | new_stmt | |
134c85ca | 6882 | = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale); |
aec7ae7d JJ |
6883 | |
6884 | if (!useless_type_conversion_p (vectype, rettype)) | |
6885 | { | |
6886 | gcc_assert (TYPE_VECTOR_SUBPARTS (vectype) | |
6887 | == TYPE_VECTOR_SUBPARTS (rettype)); | |
0e22bb5a | 6888 | op = vect_get_new_ssa_name (rettype, vect_simple_var); |
aec7ae7d JJ |
6889 | gimple_call_set_lhs (new_stmt, op); |
6890 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
b731b390 | 6891 | var = make_ssa_name (vec_dest); |
aec7ae7d JJ |
6892 | op = build1 (VIEW_CONVERT_EXPR, vectype, op); |
6893 | new_stmt | |
0d0e4a03 | 6894 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
aec7ae7d JJ |
6895 | } |
6896 | else | |
6897 | { | |
6898 | var = make_ssa_name (vec_dest, new_stmt); | |
6899 | gimple_call_set_lhs (new_stmt, var); | |
6900 | } | |
6901 | ||
6902 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
6903 | ||
6904 | if (modifier == NARROW) | |
6905 | { | |
6906 | if ((j & 1) == 0) | |
6907 | { | |
6908 | prev_res = var; | |
6909 | continue; | |
6910 | } | |
6911 | var = permute_vec_elements (prev_res, var, | |
6912 | perm_mask, stmt, gsi); | |
6913 | new_stmt = SSA_NAME_DEF_STMT (var); | |
6914 | } | |
6915 | ||
6916 | if (prev_stmt_info == NULL) | |
6917 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
6918 | else | |
6919 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
6920 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
6921 | } | |
6922 | return true; | |
6923 | } | |
2de001ee RS |
6924 | |
6925 | if (memory_access_type == VMAT_ELEMENTWISE | |
6926 | || memory_access_type == VMAT_STRIDED_SLP) | |
7d75abc8 MM |
6927 | { |
6928 | gimple_stmt_iterator incr_gsi; | |
6929 | bool insert_after; | |
355fe088 | 6930 | gimple *incr; |
7d75abc8 | 6931 | tree offvar; |
7d75abc8 MM |
6932 | tree ivstep; |
6933 | tree running_off; | |
9771b263 | 6934 | vec<constructor_elt, va_gc> *v = NULL; |
7d75abc8 | 6935 | gimple_seq stmts = NULL; |
14ac6aa2 RB |
6936 | tree stride_base, stride_step, alias_off; |
6937 | ||
6938 | gcc_assert (!nested_in_vect_loop); | |
7d75abc8 | 6939 | |
f502d50e | 6940 | if (slp && grouped_load) |
44fc7854 BE |
6941 | { |
6942 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); | |
6943 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
6944 | group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); | |
6945 | ref_type = get_group_alias_ptr_type (first_stmt); | |
6946 | } | |
ab313a8c | 6947 | else |
44fc7854 BE |
6948 | { |
6949 | first_stmt = stmt; | |
6950 | first_dr = dr; | |
6951 | group_size = 1; | |
6952 | ref_type = reference_alias_ptr_type (DR_REF (first_dr)); | |
6953 | } | |
ab313a8c | 6954 | |
14ac6aa2 RB |
6955 | stride_base |
6956 | = fold_build_pointer_plus | |
ab313a8c | 6957 | (DR_BASE_ADDRESS (first_dr), |
14ac6aa2 | 6958 | size_binop (PLUS_EXPR, |
ab313a8c RB |
6959 | convert_to_ptrofftype (DR_OFFSET (first_dr)), |
6960 | convert_to_ptrofftype (DR_INIT (first_dr)))); | |
6961 | stride_step = fold_convert (sizetype, DR_STEP (first_dr)); | |
7d75abc8 MM |
6962 | |
6963 | /* For a load with loop-invariant (but other than power-of-2) | |
6964 | stride (i.e. not a grouped access) like so: | |
6965 | ||
6966 | for (i = 0; i < n; i += stride) | |
6967 | ... = array[i]; | |
6968 | ||
6969 | we generate a new induction variable and new accesses to | |
6970 | form a new vector (or vectors, depending on ncopies): | |
6971 | ||
6972 | for (j = 0; ; j += VF*stride) | |
6973 | tmp1 = array[j]; | |
6974 | tmp2 = array[j + stride]; | |
6975 | ... | |
6976 | vectemp = {tmp1, tmp2, ...} | |
6977 | */ | |
6978 | ||
ab313a8c RB |
6979 | ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step, |
6980 | build_int_cst (TREE_TYPE (stride_step), vf)); | |
7d75abc8 MM |
6981 | |
6982 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
6983 | ||
ab313a8c | 6984 | create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL, |
7d75abc8 MM |
6985 | loop, &incr_gsi, insert_after, |
6986 | &offvar, NULL); | |
6987 | incr = gsi_stmt (incr_gsi); | |
310213d4 | 6988 | set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo)); |
7d75abc8 | 6989 | |
ab313a8c RB |
6990 | stride_step = force_gimple_operand (unshare_expr (stride_step), |
6991 | &stmts, true, NULL_TREE); | |
7d75abc8 MM |
6992 | if (stmts) |
6993 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
6994 | ||
6995 | prev_stmt_info = NULL; | |
6996 | running_off = offvar; | |
44fc7854 | 6997 | alias_off = build_int_cst (ref_type, 0); |
7b5fc413 | 6998 | int nloads = nunits; |
e09b4c37 | 6999 | int lnel = 1; |
7b5fc413 | 7000 | tree ltype = TREE_TYPE (vectype); |
ea60dd34 | 7001 | tree lvectype = vectype; |
b266b968 | 7002 | auto_vec<tree> dr_chain; |
2de001ee | 7003 | if (memory_access_type == VMAT_STRIDED_SLP) |
7b5fc413 | 7004 | { |
2de001ee | 7005 | if (group_size < nunits) |
e09b4c37 | 7006 | { |
ea60dd34 RB |
7007 | /* Avoid emitting a constructor of vector elements by performing |
7008 | the loads using an integer type of the same size, | |
7009 | constructing a vector of those and then re-interpreting it | |
7010 | as the original vector type. This works around the fact | |
7011 | that the vec_init optab was only designed for scalar | |
7012 | element modes and thus expansion goes through memory. | |
7013 | This avoids a huge runtime penalty due to the general | |
7014 | inability to perform store forwarding from smaller stores | |
7015 | to a larger load. */ | |
7016 | unsigned lsize | |
7017 | = group_size * TYPE_PRECISION (TREE_TYPE (vectype)); | |
7018 | enum machine_mode elmode = mode_for_size (lsize, MODE_INT, 0); | |
7019 | enum machine_mode vmode = mode_for_vector (elmode, | |
7020 | nunits / group_size); | |
7021 | /* If we can't construct such a vector fall back to | |
7022 | element loads of the original vector type. */ | |
7023 | if (VECTOR_MODE_P (vmode) | |
7024 | && optab_handler (vec_init_optab, vmode) != CODE_FOR_nothing) | |
7025 | { | |
7026 | nloads = nunits / group_size; | |
7027 | lnel = group_size; | |
7028 | ltype = build_nonstandard_integer_type (lsize, 1); | |
7029 | lvectype = build_vector_type (ltype, nloads); | |
7030 | } | |
e09b4c37 | 7031 | } |
2de001ee | 7032 | else |
e09b4c37 | 7033 | { |
ea60dd34 | 7034 | nloads = 1; |
e09b4c37 RB |
7035 | lnel = nunits; |
7036 | ltype = vectype; | |
e09b4c37 | 7037 | } |
2de001ee RS |
7038 | ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype))); |
7039 | } | |
7040 | if (slp) | |
7041 | { | |
66c16fd9 RB |
7042 | /* For SLP permutation support we need to load the whole group, |
7043 | not only the number of vector stmts the permutation result | |
7044 | fits in. */ | |
b266b968 | 7045 | if (slp_perm) |
66c16fd9 RB |
7046 | { |
7047 | ncopies = (group_size * vf + nunits - 1) / nunits; | |
7048 | dr_chain.create (ncopies); | |
7049 | } | |
7050 | else | |
7051 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
7b5fc413 | 7052 | } |
e09b4c37 RB |
7053 | int group_el = 0; |
7054 | unsigned HOST_WIDE_INT | |
7055 | elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); | |
7d75abc8 MM |
7056 | for (j = 0; j < ncopies; j++) |
7057 | { | |
7b5fc413 | 7058 | if (nloads > 1) |
e09b4c37 RB |
7059 | vec_alloc (v, nloads); |
7060 | for (i = 0; i < nloads; i++) | |
7b5fc413 | 7061 | { |
e09b4c37 RB |
7062 | tree this_off = build_int_cst (TREE_TYPE (alias_off), |
7063 | group_el * elsz); | |
7064 | new_stmt = gimple_build_assign (make_ssa_name (ltype), | |
7065 | build2 (MEM_REF, ltype, | |
7066 | running_off, this_off)); | |
7067 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7068 | if (nloads > 1) | |
7069 | CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, | |
7070 | gimple_assign_lhs (new_stmt)); | |
7071 | ||
7072 | group_el += lnel; | |
7073 | if (! slp | |
7074 | || group_el == group_size) | |
7b5fc413 | 7075 | { |
e09b4c37 RB |
7076 | tree newoff = copy_ssa_name (running_off); |
7077 | gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, | |
7078 | running_off, stride_step); | |
7b5fc413 RB |
7079 | vect_finish_stmt_generation (stmt, incr, gsi); |
7080 | ||
7081 | running_off = newoff; | |
e09b4c37 | 7082 | group_el = 0; |
7b5fc413 | 7083 | } |
7b5fc413 | 7084 | } |
e09b4c37 | 7085 | if (nloads > 1) |
7d75abc8 | 7086 | { |
ea60dd34 RB |
7087 | tree vec_inv = build_constructor (lvectype, v); |
7088 | new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi); | |
e09b4c37 | 7089 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
ea60dd34 RB |
7090 | if (lvectype != vectype) |
7091 | { | |
7092 | new_stmt = gimple_build_assign (make_ssa_name (vectype), | |
7093 | VIEW_CONVERT_EXPR, | |
7094 | build1 (VIEW_CONVERT_EXPR, | |
7095 | vectype, new_temp)); | |
7096 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7097 | } | |
7d75abc8 MM |
7098 | } |
7099 | ||
7b5fc413 | 7100 | if (slp) |
b266b968 | 7101 | { |
b266b968 RB |
7102 | if (slp_perm) |
7103 | dr_chain.quick_push (gimple_assign_lhs (new_stmt)); | |
66c16fd9 RB |
7104 | else |
7105 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); | |
b266b968 | 7106 | } |
7d75abc8 | 7107 | else |
225ce44b RB |
7108 | { |
7109 | if (j == 0) | |
7110 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
7111 | else | |
7112 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
7113 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
7114 | } | |
7d75abc8 | 7115 | } |
b266b968 | 7116 | if (slp_perm) |
29afecdf RB |
7117 | { |
7118 | unsigned n_perms; | |
7119 | vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, | |
7120 | slp_node_instance, false, &n_perms); | |
7121 | } | |
7d75abc8 MM |
7122 | return true; |
7123 | } | |
aec7ae7d | 7124 | |
0d0293ac | 7125 | if (grouped_load) |
ebfd146a | 7126 | { |
e14c1050 | 7127 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
44fc7854 | 7128 | group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
4f0a0218 | 7129 | /* For SLP vectorization we directly vectorize a subchain |
52eab378 RB |
7130 | without permutation. */ |
7131 | if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) | |
4f0a0218 RB |
7132 | first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
7133 | /* For BB vectorization always use the first stmt to base | |
7134 | the data ref pointer on. */ | |
7135 | if (bb_vinfo) | |
7136 | first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0]; | |
6aa904c4 | 7137 | |
ebfd146a | 7138 | /* Check if the chain of loads is already vectorized. */ |
01d8bf07 RB |
7139 | if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)) |
7140 | /* For SLP we would need to copy over SLP_TREE_VEC_STMTS. | |
7141 | ??? But we can only do so if there is exactly one | |
7142 | as we have no way to get at the rest. Leave the CSE | |
7143 | opportunity alone. | |
7144 | ??? With the group load eventually participating | |
7145 | in multiple different permutations (having multiple | |
7146 | slp nodes which refer to the same group) the CSE | |
7147 | is even wrong code. See PR56270. */ | |
7148 | && !slp) | |
ebfd146a IR |
7149 | { |
7150 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
7151 | return true; | |
7152 | } | |
7153 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
9b999e8c | 7154 | group_gap_adj = 0; |
ebfd146a IR |
7155 | |
7156 | /* VEC_NUM is the number of vect stmts to be created for this group. */ | |
7157 | if (slp) | |
7158 | { | |
0d0293ac | 7159 | grouped_load = false; |
91ff1504 RB |
7160 | /* For SLP permutation support we need to load the whole group, |
7161 | not only the number of vector stmts the permutation result | |
7162 | fits in. */ | |
7163 | if (slp_perm) | |
7164 | vec_num = (group_size * vf + nunits - 1) / nunits; | |
7165 | else | |
7166 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
9b999e8c | 7167 | group_gap_adj = vf * group_size - nunits * vec_num; |
a70d6342 | 7168 | } |
ebfd146a | 7169 | else |
9b999e8c | 7170 | vec_num = group_size; |
44fc7854 BE |
7171 | |
7172 | ref_type = get_group_alias_ptr_type (first_stmt); | |
ebfd146a IR |
7173 | } |
7174 | else | |
7175 | { | |
7176 | first_stmt = stmt; | |
7177 | first_dr = dr; | |
7178 | group_size = vec_num = 1; | |
9b999e8c | 7179 | group_gap_adj = 0; |
44fc7854 | 7180 | ref_type = reference_alias_ptr_type (DR_REF (first_dr)); |
ebfd146a IR |
7181 | } |
7182 | ||
720f5239 | 7183 | alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false); |
ebfd146a | 7184 | gcc_assert (alignment_support_scheme); |
272c6793 RS |
7185 | /* Targets with load-lane instructions must not require explicit |
7186 | realignment. */ | |
2de001ee | 7187 | gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES |
272c6793 RS |
7188 | || alignment_support_scheme == dr_aligned |
7189 | || alignment_support_scheme == dr_unaligned_supported); | |
ebfd146a IR |
7190 | |
7191 | /* In case the vectorization factor (VF) is bigger than the number | |
7192 | of elements that we can fit in a vectype (nunits), we have to generate | |
7193 | more than one vector stmt - i.e - we need to "unroll" the | |
ff802fa1 | 7194 | vector stmt by a factor VF/nunits. In doing so, we record a pointer |
ebfd146a | 7195 | from one copy of the vector stmt to the next, in the field |
ff802fa1 | 7196 | STMT_VINFO_RELATED_STMT. This is necessary in order to allow following |
ebfd146a | 7197 | stages to find the correct vector defs to be used when vectorizing |
ff802fa1 IR |
7198 | stmts that use the defs of the current stmt. The example below |
7199 | illustrates the vectorization process when VF=16 and nunits=4 (i.e., we | |
7200 | need to create 4 vectorized stmts): | |
ebfd146a IR |
7201 | |
7202 | before vectorization: | |
7203 | RELATED_STMT VEC_STMT | |
7204 | S1: x = memref - - | |
7205 | S2: z = x + 1 - - | |
7206 | ||
7207 | step 1: vectorize stmt S1: | |
7208 | We first create the vector stmt VS1_0, and, as usual, record a | |
7209 | pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1. | |
7210 | Next, we create the vector stmt VS1_1, and record a pointer to | |
7211 | it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0. | |
ff802fa1 | 7212 | Similarly, for VS1_2 and VS1_3. This is the resulting chain of |
ebfd146a IR |
7213 | stmts and pointers: |
7214 | RELATED_STMT VEC_STMT | |
7215 | VS1_0: vx0 = memref0 VS1_1 - | |
7216 | VS1_1: vx1 = memref1 VS1_2 - | |
7217 | VS1_2: vx2 = memref2 VS1_3 - | |
7218 | VS1_3: vx3 = memref3 - - | |
7219 | S1: x = load - VS1_0 | |
7220 | S2: z = x + 1 - - | |
7221 | ||
b8698a0f L |
7222 | See in documentation in vect_get_vec_def_for_stmt_copy for how the |
7223 | information we recorded in RELATED_STMT field is used to vectorize | |
ebfd146a IR |
7224 | stmt S2. */ |
7225 | ||
0d0293ac | 7226 | /* In case of interleaving (non-unit grouped access): |
ebfd146a IR |
7227 | |
7228 | S1: x2 = &base + 2 | |
7229 | S2: x0 = &base | |
7230 | S3: x1 = &base + 1 | |
7231 | S4: x3 = &base + 3 | |
7232 | ||
b8698a0f | 7233 | Vectorized loads are created in the order of memory accesses |
ebfd146a IR |
7234 | starting from the access of the first stmt of the chain: |
7235 | ||
7236 | VS1: vx0 = &base | |
7237 | VS2: vx1 = &base + vec_size*1 | |
7238 | VS3: vx3 = &base + vec_size*2 | |
7239 | VS4: vx4 = &base + vec_size*3 | |
7240 | ||
7241 | Then permutation statements are generated: | |
7242 | ||
e2c83630 RH |
7243 | VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } > |
7244 | VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } > | |
ebfd146a IR |
7245 | ... |
7246 | ||
7247 | And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts | |
7248 | (the order of the data-refs in the output of vect_permute_load_chain | |
7249 | corresponds to the order of scalar stmts in the interleaving chain - see | |
7250 | the documentation of vect_permute_load_chain()). | |
7251 | The generation of permutation stmts and recording them in | |
0d0293ac | 7252 | STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load(). |
ebfd146a | 7253 | |
b8698a0f | 7254 | In case of both multiple types and interleaving, the vector loads and |
ff802fa1 IR |
7255 | permutation stmts above are created for every copy. The result vector |
7256 | stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the | |
7257 | corresponding STMT_VINFO_RELATED_STMT for the next copies. */ | |
ebfd146a IR |
7258 | |
7259 | /* If the data reference is aligned (dr_aligned) or potentially unaligned | |
7260 | on a target that supports unaligned accesses (dr_unaligned_supported) | |
7261 | we generate the following code: | |
7262 | p = initial_addr; | |
7263 | indx = 0; | |
7264 | loop { | |
7265 | p = p + indx * vectype_size; | |
7266 | vec_dest = *(p); | |
7267 | indx = indx + 1; | |
7268 | } | |
7269 | ||
7270 | Otherwise, the data reference is potentially unaligned on a target that | |
b8698a0f | 7271 | does not support unaligned accesses (dr_explicit_realign_optimized) - |
ebfd146a IR |
7272 | then generate the following code, in which the data in each iteration is |
7273 | obtained by two vector loads, one from the previous iteration, and one | |
7274 | from the current iteration: | |
7275 | p1 = initial_addr; | |
7276 | msq_init = *(floor(p1)) | |
7277 | p2 = initial_addr + VS - 1; | |
7278 | realignment_token = call target_builtin; | |
7279 | indx = 0; | |
7280 | loop { | |
7281 | p2 = p2 + indx * vectype_size | |
7282 | lsq = *(floor(p2)) | |
7283 | vec_dest = realign_load (msq, lsq, realignment_token) | |
7284 | indx = indx + 1; | |
7285 | msq = lsq; | |
7286 | } */ | |
7287 | ||
7288 | /* If the misalignment remains the same throughout the execution of the | |
7289 | loop, we can create the init_addr and permutation mask at the loop | |
ff802fa1 | 7290 | preheader. Otherwise, it needs to be created inside the loop. |
ebfd146a IR |
7291 | This can only occur when vectorizing memory accesses in the inner-loop |
7292 | nested within an outer-loop that is being vectorized. */ | |
7293 | ||
d1e4b493 | 7294 | if (nested_in_vect_loop |
211bea38 | 7295 | && (TREE_INT_CST_LOW (DR_STEP (dr)) |
ebfd146a IR |
7296 | % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)) |
7297 | { | |
7298 | gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); | |
7299 | compute_in_loop = true; | |
7300 | } | |
7301 | ||
7302 | if ((alignment_support_scheme == dr_explicit_realign_optimized | |
7303 | || alignment_support_scheme == dr_explicit_realign) | |
59fd17e3 | 7304 | && !compute_in_loop) |
ebfd146a IR |
7305 | { |
7306 | msq = vect_setup_realignment (first_stmt, gsi, &realignment_token, | |
7307 | alignment_support_scheme, NULL_TREE, | |
7308 | &at_loop); | |
7309 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
7310 | { | |
538dd0b7 | 7311 | phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq)); |
356bbc4c JJ |
7312 | byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype), |
7313 | size_one_node); | |
ebfd146a IR |
7314 | } |
7315 | } | |
7316 | else | |
7317 | at_loop = loop; | |
7318 | ||
62da9e14 | 7319 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
a1e53f3f L |
7320 | offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1); |
7321 | ||
2de001ee | 7322 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
272c6793 RS |
7323 | aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); |
7324 | else | |
7325 | aggr_type = vectype; | |
7326 | ||
ebfd146a IR |
7327 | prev_stmt_info = NULL; |
7328 | for (j = 0; j < ncopies; j++) | |
b8698a0f | 7329 | { |
272c6793 | 7330 | /* 1. Create the vector or array pointer update chain. */ |
ebfd146a | 7331 | if (j == 0) |
74bf76ed JJ |
7332 | { |
7333 | bool simd_lane_access_p | |
7334 | = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info); | |
7335 | if (simd_lane_access_p | |
7336 | && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR | |
7337 | && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0)) | |
7338 | && integer_zerop (DR_OFFSET (first_dr)) | |
7339 | && integer_zerop (DR_INIT (first_dr)) | |
7340 | && alias_sets_conflict_p (get_alias_set (aggr_type), | |
44fc7854 | 7341 | get_alias_set (TREE_TYPE (ref_type))) |
74bf76ed JJ |
7342 | && (alignment_support_scheme == dr_aligned |
7343 | || alignment_support_scheme == dr_unaligned_supported)) | |
7344 | { | |
7345 | dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr)); | |
44fc7854 | 7346 | dataref_offset = build_int_cst (ref_type, 0); |
8928eff3 | 7347 | inv_p = false; |
74bf76ed | 7348 | } |
4f0a0218 RB |
7349 | else if (first_stmt_for_drptr |
7350 | && first_stmt != first_stmt_for_drptr) | |
7351 | { | |
7352 | dataref_ptr | |
7353 | = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type, | |
7354 | at_loop, offset, &dummy, gsi, | |
7355 | &ptr_incr, simd_lane_access_p, | |
7356 | &inv_p, byte_offset); | |
7357 | /* Adjust the pointer by the difference to first_stmt. */ | |
7358 | data_reference_p ptrdr | |
7359 | = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr)); | |
7360 | tree diff = fold_convert (sizetype, | |
7361 | size_binop (MINUS_EXPR, | |
7362 | DR_INIT (first_dr), | |
7363 | DR_INIT (ptrdr))); | |
7364 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
7365 | stmt, diff); | |
7366 | } | |
74bf76ed JJ |
7367 | else |
7368 | dataref_ptr | |
7369 | = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop, | |
7370 | offset, &dummy, gsi, &ptr_incr, | |
356bbc4c JJ |
7371 | simd_lane_access_p, &inv_p, |
7372 | byte_offset); | |
74bf76ed JJ |
7373 | } |
7374 | else if (dataref_offset) | |
7375 | dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, | |
7376 | TYPE_SIZE_UNIT (aggr_type)); | |
ebfd146a | 7377 | else |
272c6793 RS |
7378 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, |
7379 | TYPE_SIZE_UNIT (aggr_type)); | |
ebfd146a | 7380 | |
0d0293ac | 7381 | if (grouped_load || slp_perm) |
9771b263 | 7382 | dr_chain.create (vec_num); |
5ce1ee7f | 7383 | |
2de001ee | 7384 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
ebfd146a | 7385 | { |
272c6793 RS |
7386 | tree vec_array; |
7387 | ||
7388 | vec_array = create_vector_array (vectype, vec_num); | |
7389 | ||
7390 | /* Emit: | |
7391 | VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */ | |
44fc7854 | 7392 | data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type); |
272c6793 RS |
7393 | new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref); |
7394 | gimple_call_set_lhs (new_stmt, vec_array); | |
7395 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
ebfd146a | 7396 | |
272c6793 RS |
7397 | /* Extract each vector into an SSA_NAME. */ |
7398 | for (i = 0; i < vec_num; i++) | |
ebfd146a | 7399 | { |
272c6793 RS |
7400 | new_temp = read_vector_array (stmt, gsi, scalar_dest, |
7401 | vec_array, i); | |
9771b263 | 7402 | dr_chain.quick_push (new_temp); |
272c6793 RS |
7403 | } |
7404 | ||
7405 | /* Record the mapping between SSA_NAMEs and statements. */ | |
0d0293ac | 7406 | vect_record_grouped_load_vectors (stmt, dr_chain); |
272c6793 RS |
7407 | } |
7408 | else | |
7409 | { | |
7410 | for (i = 0; i < vec_num; i++) | |
7411 | { | |
7412 | if (i > 0) | |
7413 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
7414 | stmt, NULL_TREE); | |
7415 | ||
7416 | /* 2. Create the vector-load in the loop. */ | |
7417 | switch (alignment_support_scheme) | |
7418 | { | |
7419 | case dr_aligned: | |
7420 | case dr_unaligned_supported: | |
be1ac4ec | 7421 | { |
644ffefd MJ |
7422 | unsigned int align, misalign; |
7423 | ||
272c6793 | 7424 | data_ref |
aed93b23 RB |
7425 | = fold_build2 (MEM_REF, vectype, dataref_ptr, |
7426 | dataref_offset | |
7427 | ? dataref_offset | |
44fc7854 | 7428 | : build_int_cst (ref_type, 0)); |
644ffefd | 7429 | align = TYPE_ALIGN_UNIT (vectype); |
272c6793 RS |
7430 | if (alignment_support_scheme == dr_aligned) |
7431 | { | |
7432 | gcc_assert (aligned_access_p (first_dr)); | |
644ffefd | 7433 | misalign = 0; |
272c6793 RS |
7434 | } |
7435 | else if (DR_MISALIGNMENT (first_dr) == -1) | |
7436 | { | |
52639a61 RB |
7437 | if (DR_VECT_AUX (first_dr)->base_element_aligned) |
7438 | align = TYPE_ALIGN_UNIT (elem_type); | |
7439 | else | |
7440 | align = (get_object_alignment (DR_REF (first_dr)) | |
7441 | / BITS_PER_UNIT); | |
7442 | misalign = 0; | |
272c6793 RS |
7443 | TREE_TYPE (data_ref) |
7444 | = build_aligned_type (TREE_TYPE (data_ref), | |
52639a61 | 7445 | align * BITS_PER_UNIT); |
272c6793 RS |
7446 | } |
7447 | else | |
7448 | { | |
7449 | TREE_TYPE (data_ref) | |
7450 | = build_aligned_type (TREE_TYPE (data_ref), | |
7451 | TYPE_ALIGN (elem_type)); | |
644ffefd | 7452 | misalign = DR_MISALIGNMENT (first_dr); |
272c6793 | 7453 | } |
aed93b23 RB |
7454 | if (dataref_offset == NULL_TREE |
7455 | && TREE_CODE (dataref_ptr) == SSA_NAME) | |
74bf76ed JJ |
7456 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), |
7457 | align, misalign); | |
272c6793 | 7458 | break; |
be1ac4ec | 7459 | } |
272c6793 | 7460 | case dr_explicit_realign: |
267d3070 | 7461 | { |
272c6793 | 7462 | tree ptr, bump; |
272c6793 | 7463 | |
d88981fc | 7464 | tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype)); |
272c6793 RS |
7465 | |
7466 | if (compute_in_loop) | |
7467 | msq = vect_setup_realignment (first_stmt, gsi, | |
7468 | &realignment_token, | |
7469 | dr_explicit_realign, | |
7470 | dataref_ptr, NULL); | |
7471 | ||
aed93b23 RB |
7472 | if (TREE_CODE (dataref_ptr) == SSA_NAME) |
7473 | ptr = copy_ssa_name (dataref_ptr); | |
7474 | else | |
7475 | ptr = make_ssa_name (TREE_TYPE (dataref_ptr)); | |
0d0e4a03 JJ |
7476 | new_stmt = gimple_build_assign |
7477 | (ptr, BIT_AND_EXPR, dataref_ptr, | |
272c6793 RS |
7478 | build_int_cst |
7479 | (TREE_TYPE (dataref_ptr), | |
7480 | -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype))); | |
272c6793 RS |
7481 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
7482 | data_ref | |
7483 | = build2 (MEM_REF, vectype, ptr, | |
44fc7854 | 7484 | build_int_cst (ref_type, 0)); |
272c6793 RS |
7485 | vec_dest = vect_create_destination_var (scalar_dest, |
7486 | vectype); | |
7487 | new_stmt = gimple_build_assign (vec_dest, data_ref); | |
7488 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
7489 | gimple_assign_set_lhs (new_stmt, new_temp); | |
7490 | gimple_set_vdef (new_stmt, gimple_vdef (stmt)); | |
7491 | gimple_set_vuse (new_stmt, gimple_vuse (stmt)); | |
7492 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7493 | msq = new_temp; | |
7494 | ||
d88981fc | 7495 | bump = size_binop (MULT_EXPR, vs, |
7b7b1813 | 7496 | TYPE_SIZE_UNIT (elem_type)); |
d88981fc | 7497 | bump = size_binop (MINUS_EXPR, bump, size_one_node); |
272c6793 | 7498 | ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump); |
0d0e4a03 JJ |
7499 | new_stmt = gimple_build_assign |
7500 | (NULL_TREE, BIT_AND_EXPR, ptr, | |
272c6793 RS |
7501 | build_int_cst |
7502 | (TREE_TYPE (ptr), | |
7503 | -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype))); | |
aed93b23 | 7504 | ptr = copy_ssa_name (ptr, new_stmt); |
272c6793 RS |
7505 | gimple_assign_set_lhs (new_stmt, ptr); |
7506 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7507 | data_ref | |
7508 | = build2 (MEM_REF, vectype, ptr, | |
44fc7854 | 7509 | build_int_cst (ref_type, 0)); |
272c6793 | 7510 | break; |
267d3070 | 7511 | } |
272c6793 | 7512 | case dr_explicit_realign_optimized: |
aed93b23 RB |
7513 | if (TREE_CODE (dataref_ptr) == SSA_NAME) |
7514 | new_temp = copy_ssa_name (dataref_ptr); | |
7515 | else | |
7516 | new_temp = make_ssa_name (TREE_TYPE (dataref_ptr)); | |
0d0e4a03 JJ |
7517 | new_stmt = gimple_build_assign |
7518 | (new_temp, BIT_AND_EXPR, dataref_ptr, | |
272c6793 RS |
7519 | build_int_cst |
7520 | (TREE_TYPE (dataref_ptr), | |
7521 | -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype))); | |
272c6793 RS |
7522 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
7523 | data_ref | |
7524 | = build2 (MEM_REF, vectype, new_temp, | |
44fc7854 | 7525 | build_int_cst (ref_type, 0)); |
272c6793 RS |
7526 | break; |
7527 | default: | |
7528 | gcc_unreachable (); | |
7529 | } | |
ebfd146a | 7530 | vec_dest = vect_create_destination_var (scalar_dest, vectype); |
272c6793 | 7531 | new_stmt = gimple_build_assign (vec_dest, data_ref); |
ebfd146a IR |
7532 | new_temp = make_ssa_name (vec_dest, new_stmt); |
7533 | gimple_assign_set_lhs (new_stmt, new_temp); | |
7534 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7535 | ||
272c6793 RS |
7536 | /* 3. Handle explicit realignment if necessary/supported. |
7537 | Create in loop: | |
7538 | vec_dest = realign_load (msq, lsq, realignment_token) */ | |
7539 | if (alignment_support_scheme == dr_explicit_realign_optimized | |
7540 | || alignment_support_scheme == dr_explicit_realign) | |
ebfd146a | 7541 | { |
272c6793 RS |
7542 | lsq = gimple_assign_lhs (new_stmt); |
7543 | if (!realignment_token) | |
7544 | realignment_token = dataref_ptr; | |
7545 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
0d0e4a03 JJ |
7546 | new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR, |
7547 | msq, lsq, realignment_token); | |
272c6793 RS |
7548 | new_temp = make_ssa_name (vec_dest, new_stmt); |
7549 | gimple_assign_set_lhs (new_stmt, new_temp); | |
7550 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7551 | ||
7552 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
7553 | { | |
7554 | gcc_assert (phi); | |
7555 | if (i == vec_num - 1 && j == ncopies - 1) | |
7556 | add_phi_arg (phi, lsq, | |
7557 | loop_latch_edge (containing_loop), | |
9e227d60 | 7558 | UNKNOWN_LOCATION); |
272c6793 RS |
7559 | msq = lsq; |
7560 | } | |
ebfd146a | 7561 | } |
ebfd146a | 7562 | |
59fd17e3 RB |
7563 | /* 4. Handle invariant-load. */ |
7564 | if (inv_p && !bb_vinfo) | |
7565 | { | |
59fd17e3 | 7566 | gcc_assert (!grouped_load); |
d1417442 JJ |
7567 | /* If we have versioned for aliasing or the loop doesn't |
7568 | have any data dependencies that would preclude this, | |
7569 | then we are sure this is a loop invariant load and | |
7570 | thus we can insert it on the preheader edge. */ | |
7571 | if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) | |
7572 | && !nested_in_vect_loop | |
6b916b36 | 7573 | && hoist_defs_of_uses (stmt, loop)) |
a0e35eb0 RB |
7574 | { |
7575 | if (dump_enabled_p ()) | |
7576 | { | |
7577 | dump_printf_loc (MSG_NOTE, vect_location, | |
7578 | "hoisting out of the vectorized " | |
7579 | "loop: "); | |
7580 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
a0e35eb0 | 7581 | } |
b731b390 | 7582 | tree tem = copy_ssa_name (scalar_dest); |
a0e35eb0 RB |
7583 | gsi_insert_on_edge_immediate |
7584 | (loop_preheader_edge (loop), | |
7585 | gimple_build_assign (tem, | |
7586 | unshare_expr | |
7587 | (gimple_assign_rhs1 (stmt)))); | |
7588 | new_temp = vect_init_vector (stmt, tem, vectype, NULL); | |
34cd48e5 RB |
7589 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
7590 | set_vinfo_for_stmt (new_stmt, | |
7591 | new_stmt_vec_info (new_stmt, vinfo)); | |
a0e35eb0 RB |
7592 | } |
7593 | else | |
7594 | { | |
7595 | gimple_stmt_iterator gsi2 = *gsi; | |
7596 | gsi_next (&gsi2); | |
7597 | new_temp = vect_init_vector (stmt, scalar_dest, | |
7598 | vectype, &gsi2); | |
34cd48e5 | 7599 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
a0e35eb0 | 7600 | } |
59fd17e3 RB |
7601 | } |
7602 | ||
62da9e14 | 7603 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
272c6793 | 7604 | { |
aec7ae7d JJ |
7605 | tree perm_mask = perm_mask_for_reverse (vectype); |
7606 | new_temp = permute_vec_elements (new_temp, new_temp, | |
7607 | perm_mask, stmt, gsi); | |
ebfd146a IR |
7608 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
7609 | } | |
267d3070 | 7610 | |
272c6793 | 7611 | /* Collect vector loads and later create their permutation in |
0d0293ac MM |
7612 | vect_transform_grouped_load (). */ |
7613 | if (grouped_load || slp_perm) | |
9771b263 | 7614 | dr_chain.quick_push (new_temp); |
267d3070 | 7615 | |
272c6793 RS |
7616 | /* Store vector loads in the corresponding SLP_NODE. */ |
7617 | if (slp && !slp_perm) | |
9771b263 | 7618 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
272c6793 | 7619 | } |
9b999e8c RB |
7620 | /* Bump the vector pointer to account for a gap or for excess |
7621 | elements loaded for a permuted SLP load. */ | |
7622 | if (group_gap_adj != 0) | |
a64b9c26 | 7623 | { |
9b999e8c RB |
7624 | bool ovf; |
7625 | tree bump | |
7626 | = wide_int_to_tree (sizetype, | |
7627 | wi::smul (TYPE_SIZE_UNIT (elem_type), | |
7628 | group_gap_adj, &ovf)); | |
a64b9c26 RB |
7629 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
7630 | stmt, bump); | |
7631 | } | |
ebfd146a IR |
7632 | } |
7633 | ||
7634 | if (slp && !slp_perm) | |
7635 | continue; | |
7636 | ||
7637 | if (slp_perm) | |
7638 | { | |
29afecdf | 7639 | unsigned n_perms; |
01d8bf07 | 7640 | if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, |
29afecdf RB |
7641 | slp_node_instance, false, |
7642 | &n_perms)) | |
ebfd146a | 7643 | { |
9771b263 | 7644 | dr_chain.release (); |
ebfd146a IR |
7645 | return false; |
7646 | } | |
7647 | } | |
7648 | else | |
7649 | { | |
0d0293ac | 7650 | if (grouped_load) |
ebfd146a | 7651 | { |
2de001ee | 7652 | if (memory_access_type != VMAT_LOAD_STORE_LANES) |
0d0293ac | 7653 | vect_transform_grouped_load (stmt, dr_chain, group_size, gsi); |
ebfd146a | 7654 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); |
ebfd146a IR |
7655 | } |
7656 | else | |
7657 | { | |
7658 | if (j == 0) | |
7659 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
7660 | else | |
7661 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
7662 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
7663 | } | |
7664 | } | |
9771b263 | 7665 | dr_chain.release (); |
ebfd146a IR |
7666 | } |
7667 | ||
ebfd146a IR |
7668 | return true; |
7669 | } | |
7670 | ||
7671 | /* Function vect_is_simple_cond. | |
b8698a0f | 7672 | |
ebfd146a IR |
7673 | Input: |
7674 | LOOP - the loop that is being vectorized. | |
7675 | COND - Condition that is checked for simple use. | |
7676 | ||
e9e1d143 RG |
7677 | Output: |
7678 | *COMP_VECTYPE - the vector type for the comparison. | |
4fc5ebf1 | 7679 | *DTS - The def types for the arguments of the comparison |
e9e1d143 | 7680 | |
ebfd146a IR |
7681 | Returns whether a COND can be vectorized. Checks whether |
7682 | condition operands are supportable using vec_is_simple_use. */ | |
7683 | ||
87aab9b2 | 7684 | static bool |
4fc5ebf1 JG |
7685 | vect_is_simple_cond (tree cond, vec_info *vinfo, |
7686 | tree *comp_vectype, enum vect_def_type *dts) | |
ebfd146a IR |
7687 | { |
7688 | tree lhs, rhs; | |
e9e1d143 | 7689 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
ebfd146a | 7690 | |
a414c77f IE |
7691 | /* Mask case. */ |
7692 | if (TREE_CODE (cond) == SSA_NAME | |
2568d8a1 | 7693 | && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond))) |
a414c77f IE |
7694 | { |
7695 | gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond); | |
7696 | if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt, | |
4fc5ebf1 | 7697 | &dts[0], comp_vectype) |
a414c77f IE |
7698 | || !*comp_vectype |
7699 | || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype)) | |
7700 | return false; | |
7701 | return true; | |
7702 | } | |
7703 | ||
ebfd146a IR |
7704 | if (!COMPARISON_CLASS_P (cond)) |
7705 | return false; | |
7706 | ||
7707 | lhs = TREE_OPERAND (cond, 0); | |
7708 | rhs = TREE_OPERAND (cond, 1); | |
7709 | ||
7710 | if (TREE_CODE (lhs) == SSA_NAME) | |
7711 | { | |
355fe088 | 7712 | gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs); |
4fc5ebf1 | 7713 | if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1)) |
ebfd146a IR |
7714 | return false; |
7715 | } | |
4fc5ebf1 JG |
7716 | else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST |
7717 | || TREE_CODE (lhs) == FIXED_CST) | |
7718 | dts[0] = vect_constant_def; | |
7719 | else | |
ebfd146a IR |
7720 | return false; |
7721 | ||
7722 | if (TREE_CODE (rhs) == SSA_NAME) | |
7723 | { | |
355fe088 | 7724 | gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs); |
4fc5ebf1 | 7725 | if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2)) |
ebfd146a IR |
7726 | return false; |
7727 | } | |
4fc5ebf1 JG |
7728 | else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST |
7729 | || TREE_CODE (rhs) == FIXED_CST) | |
7730 | dts[1] = vect_constant_def; | |
7731 | else | |
ebfd146a IR |
7732 | return false; |
7733 | ||
28b33016 IE |
7734 | if (vectype1 && vectype2 |
7735 | && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2)) | |
7736 | return false; | |
7737 | ||
e9e1d143 | 7738 | *comp_vectype = vectype1 ? vectype1 : vectype2; |
ebfd146a IR |
7739 | return true; |
7740 | } | |
7741 | ||
7742 | /* vectorizable_condition. | |
7743 | ||
b8698a0f L |
7744 | Check if STMT is conditional modify expression that can be vectorized. |
7745 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
7746 | stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it | |
4bbe8262 IR |
7747 | at GSI. |
7748 | ||
7749 | When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable | |
7750 | to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in | |
0ad23163 | 7751 | else clause if it is 2). |
ebfd146a IR |
7752 | |
7753 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
7754 | ||
4bbe8262 | 7755 | bool |
355fe088 TS |
7756 | vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, |
7757 | gimple **vec_stmt, tree reduc_def, int reduc_index, | |
f7e531cf | 7758 | slp_tree slp_node) |
ebfd146a IR |
7759 | { |
7760 | tree scalar_dest = NULL_TREE; | |
7761 | tree vec_dest = NULL_TREE; | |
01216d27 JJ |
7762 | tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE; |
7763 | tree then_clause, else_clause; | |
ebfd146a | 7764 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
df11cc78 | 7765 | tree comp_vectype = NULL_TREE; |
ff802fa1 IR |
7766 | tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE; |
7767 | tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE; | |
5958f9e2 | 7768 | tree vec_compare; |
ebfd146a IR |
7769 | tree new_temp; |
7770 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4fc5ebf1 JG |
7771 | enum vect_def_type dts[4] |
7772 | = {vect_unknown_def_type, vect_unknown_def_type, | |
7773 | vect_unknown_def_type, vect_unknown_def_type}; | |
7774 | int ndts = 4; | |
f7e531cf | 7775 | int ncopies; |
01216d27 | 7776 | enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; |
a855b1b1 | 7777 | stmt_vec_info prev_stmt_info = NULL; |
f7e531cf IR |
7778 | int i, j; |
7779 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
6e1aa848 DN |
7780 | vec<tree> vec_oprnds0 = vNULL; |
7781 | vec<tree> vec_oprnds1 = vNULL; | |
7782 | vec<tree> vec_oprnds2 = vNULL; | |
7783 | vec<tree> vec_oprnds3 = vNULL; | |
74946978 | 7784 | tree vec_cmp_type; |
a414c77f | 7785 | bool masked = false; |
b8698a0f | 7786 | |
f7e531cf IR |
7787 | if (reduc_index && STMT_SLP_TYPE (stmt_info)) |
7788 | return false; | |
7789 | ||
af29617a AH |
7790 | if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION) |
7791 | { | |
7792 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
7793 | return false; | |
ebfd146a | 7794 | |
af29617a AH |
7795 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
7796 | && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle | |
7797 | && reduc_def)) | |
7798 | return false; | |
ebfd146a | 7799 | |
af29617a AH |
7800 | /* FORNOW: not yet supported. */ |
7801 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
7802 | { | |
7803 | if (dump_enabled_p ()) | |
7804 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7805 | "value used after loop.\n"); | |
7806 | return false; | |
7807 | } | |
ebfd146a IR |
7808 | } |
7809 | ||
7810 | /* Is vectorizable conditional operation? */ | |
7811 | if (!is_gimple_assign (stmt)) | |
7812 | return false; | |
7813 | ||
7814 | code = gimple_assign_rhs_code (stmt); | |
7815 | ||
7816 | if (code != COND_EXPR) | |
7817 | return false; | |
7818 | ||
465c8c19 JJ |
7819 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
7820 | int nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
2947d3b2 | 7821 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
465c8c19 | 7822 | |
fce57248 | 7823 | if (slp_node) |
465c8c19 JJ |
7824 | ncopies = 1; |
7825 | else | |
7826 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
7827 | ||
7828 | gcc_assert (ncopies >= 1); | |
7829 | if (reduc_index && ncopies > 1) | |
7830 | return false; /* FORNOW */ | |
7831 | ||
4e71066d RG |
7832 | cond_expr = gimple_assign_rhs1 (stmt); |
7833 | then_clause = gimple_assign_rhs2 (stmt); | |
7834 | else_clause = gimple_assign_rhs3 (stmt); | |
ebfd146a | 7835 | |
4fc5ebf1 JG |
7836 | if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, |
7837 | &comp_vectype, &dts[0]) | |
e9e1d143 | 7838 | || !comp_vectype) |
ebfd146a IR |
7839 | return false; |
7840 | ||
81c40241 | 7841 | gimple *def_stmt; |
4fc5ebf1 | 7842 | if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2], |
2947d3b2 IE |
7843 | &vectype1)) |
7844 | return false; | |
4fc5ebf1 | 7845 | if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3], |
2947d3b2 | 7846 | &vectype2)) |
ebfd146a | 7847 | return false; |
2947d3b2 IE |
7848 | |
7849 | if (vectype1 && !useless_type_conversion_p (vectype, vectype1)) | |
7850 | return false; | |
7851 | ||
7852 | if (vectype2 && !useless_type_conversion_p (vectype, vectype2)) | |
ebfd146a IR |
7853 | return false; |
7854 | ||
28b33016 IE |
7855 | masked = !COMPARISON_CLASS_P (cond_expr); |
7856 | vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype); | |
7857 | ||
74946978 MP |
7858 | if (vec_cmp_type == NULL_TREE) |
7859 | return false; | |
784fb9b3 | 7860 | |
01216d27 JJ |
7861 | cond_code = TREE_CODE (cond_expr); |
7862 | if (!masked) | |
7863 | { | |
7864 | cond_expr0 = TREE_OPERAND (cond_expr, 0); | |
7865 | cond_expr1 = TREE_OPERAND (cond_expr, 1); | |
7866 | } | |
7867 | ||
7868 | if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype)) | |
7869 | { | |
7870 | /* Boolean values may have another representation in vectors | |
7871 | and therefore we prefer bit operations over comparison for | |
7872 | them (which also works for scalar masks). We store opcodes | |
7873 | to use in bitop1 and bitop2. Statement is vectorized as | |
7874 | BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2) | |
7875 | depending on bitop1 and bitop2 arity. */ | |
7876 | switch (cond_code) | |
7877 | { | |
7878 | case GT_EXPR: | |
7879 | bitop1 = BIT_NOT_EXPR; | |
7880 | bitop2 = BIT_AND_EXPR; | |
7881 | break; | |
7882 | case GE_EXPR: | |
7883 | bitop1 = BIT_NOT_EXPR; | |
7884 | bitop2 = BIT_IOR_EXPR; | |
7885 | break; | |
7886 | case LT_EXPR: | |
7887 | bitop1 = BIT_NOT_EXPR; | |
7888 | bitop2 = BIT_AND_EXPR; | |
7889 | std::swap (cond_expr0, cond_expr1); | |
7890 | break; | |
7891 | case LE_EXPR: | |
7892 | bitop1 = BIT_NOT_EXPR; | |
7893 | bitop2 = BIT_IOR_EXPR; | |
7894 | std::swap (cond_expr0, cond_expr1); | |
7895 | break; | |
7896 | case NE_EXPR: | |
7897 | bitop1 = BIT_XOR_EXPR; | |
7898 | break; | |
7899 | case EQ_EXPR: | |
7900 | bitop1 = BIT_XOR_EXPR; | |
7901 | bitop2 = BIT_NOT_EXPR; | |
7902 | break; | |
7903 | default: | |
7904 | return false; | |
7905 | } | |
7906 | cond_code = SSA_NAME; | |
7907 | } | |
7908 | ||
b8698a0f | 7909 | if (!vec_stmt) |
ebfd146a IR |
7910 | { |
7911 | STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type; | |
01216d27 JJ |
7912 | if (bitop1 != NOP_EXPR) |
7913 | { | |
7914 | machine_mode mode = TYPE_MODE (comp_vectype); | |
7915 | optab optab; | |
7916 | ||
7917 | optab = optab_for_tree_code (bitop1, comp_vectype, optab_default); | |
7918 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
7919 | return false; | |
7920 | ||
7921 | if (bitop2 != NOP_EXPR) | |
7922 | { | |
7923 | optab = optab_for_tree_code (bitop2, comp_vectype, | |
7924 | optab_default); | |
7925 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
7926 | return false; | |
7927 | } | |
7928 | } | |
4fc5ebf1 JG |
7929 | if (expand_vec_cond_expr_p (vectype, comp_vectype, |
7930 | cond_code)) | |
7931 | { | |
7932 | vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL); | |
7933 | return true; | |
7934 | } | |
7935 | return false; | |
ebfd146a IR |
7936 | } |
7937 | ||
f7e531cf IR |
7938 | /* Transform. */ |
7939 | ||
7940 | if (!slp_node) | |
7941 | { | |
9771b263 DN |
7942 | vec_oprnds0.create (1); |
7943 | vec_oprnds1.create (1); | |
7944 | vec_oprnds2.create (1); | |
7945 | vec_oprnds3.create (1); | |
f7e531cf | 7946 | } |
ebfd146a IR |
7947 | |
7948 | /* Handle def. */ | |
7949 | scalar_dest = gimple_assign_lhs (stmt); | |
7950 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
7951 | ||
7952 | /* Handle cond expr. */ | |
a855b1b1 MM |
7953 | for (j = 0; j < ncopies; j++) |
7954 | { | |
538dd0b7 | 7955 | gassign *new_stmt = NULL; |
a855b1b1 MM |
7956 | if (j == 0) |
7957 | { | |
f7e531cf IR |
7958 | if (slp_node) |
7959 | { | |
00f96dc9 TS |
7960 | auto_vec<tree, 4> ops; |
7961 | auto_vec<vec<tree>, 4> vec_defs; | |
9771b263 | 7962 | |
a414c77f | 7963 | if (masked) |
01216d27 | 7964 | ops.safe_push (cond_expr); |
a414c77f IE |
7965 | else |
7966 | { | |
01216d27 JJ |
7967 | ops.safe_push (cond_expr0); |
7968 | ops.safe_push (cond_expr1); | |
a414c77f | 7969 | } |
9771b263 DN |
7970 | ops.safe_push (then_clause); |
7971 | ops.safe_push (else_clause); | |
306b0c92 | 7972 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
37b5ec8f JJ |
7973 | vec_oprnds3 = vec_defs.pop (); |
7974 | vec_oprnds2 = vec_defs.pop (); | |
a414c77f IE |
7975 | if (!masked) |
7976 | vec_oprnds1 = vec_defs.pop (); | |
37b5ec8f | 7977 | vec_oprnds0 = vec_defs.pop (); |
f7e531cf IR |
7978 | } |
7979 | else | |
7980 | { | |
355fe088 | 7981 | gimple *gtemp; |
a414c77f IE |
7982 | if (masked) |
7983 | { | |
7984 | vec_cond_lhs | |
7985 | = vect_get_vec_def_for_operand (cond_expr, stmt, | |
7986 | comp_vectype); | |
7987 | vect_is_simple_use (cond_expr, stmt_info->vinfo, | |
7988 | >emp, &dts[0]); | |
7989 | } | |
7990 | else | |
7991 | { | |
01216d27 JJ |
7992 | vec_cond_lhs |
7993 | = vect_get_vec_def_for_operand (cond_expr0, | |
7994 | stmt, comp_vectype); | |
7995 | vect_is_simple_use (cond_expr0, loop_vinfo, >emp, &dts[0]); | |
7996 | ||
7997 | vec_cond_rhs | |
7998 | = vect_get_vec_def_for_operand (cond_expr1, | |
7999 | stmt, comp_vectype); | |
8000 | vect_is_simple_use (cond_expr1, loop_vinfo, >emp, &dts[1]); | |
a414c77f | 8001 | } |
f7e531cf IR |
8002 | if (reduc_index == 1) |
8003 | vec_then_clause = reduc_def; | |
8004 | else | |
8005 | { | |
8006 | vec_then_clause = vect_get_vec_def_for_operand (then_clause, | |
81c40241 RB |
8007 | stmt); |
8008 | vect_is_simple_use (then_clause, loop_vinfo, | |
8009 | >emp, &dts[2]); | |
f7e531cf IR |
8010 | } |
8011 | if (reduc_index == 2) | |
8012 | vec_else_clause = reduc_def; | |
8013 | else | |
8014 | { | |
8015 | vec_else_clause = vect_get_vec_def_for_operand (else_clause, | |
81c40241 RB |
8016 | stmt); |
8017 | vect_is_simple_use (else_clause, loop_vinfo, >emp, &dts[3]); | |
f7e531cf | 8018 | } |
a855b1b1 MM |
8019 | } |
8020 | } | |
8021 | else | |
8022 | { | |
a414c77f IE |
8023 | vec_cond_lhs |
8024 | = vect_get_vec_def_for_stmt_copy (dts[0], | |
8025 | vec_oprnds0.pop ()); | |
8026 | if (!masked) | |
8027 | vec_cond_rhs | |
8028 | = vect_get_vec_def_for_stmt_copy (dts[1], | |
8029 | vec_oprnds1.pop ()); | |
8030 | ||
a855b1b1 | 8031 | vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2], |
9771b263 | 8032 | vec_oprnds2.pop ()); |
a855b1b1 | 8033 | vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3], |
9771b263 | 8034 | vec_oprnds3.pop ()); |
f7e531cf IR |
8035 | } |
8036 | ||
8037 | if (!slp_node) | |
8038 | { | |
9771b263 | 8039 | vec_oprnds0.quick_push (vec_cond_lhs); |
a414c77f IE |
8040 | if (!masked) |
8041 | vec_oprnds1.quick_push (vec_cond_rhs); | |
9771b263 DN |
8042 | vec_oprnds2.quick_push (vec_then_clause); |
8043 | vec_oprnds3.quick_push (vec_else_clause); | |
a855b1b1 MM |
8044 | } |
8045 | ||
9dc3f7de | 8046 | /* Arguments are ready. Create the new vector stmt. */ |
9771b263 | 8047 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs) |
f7e531cf | 8048 | { |
9771b263 DN |
8049 | vec_then_clause = vec_oprnds2[i]; |
8050 | vec_else_clause = vec_oprnds3[i]; | |
a855b1b1 | 8051 | |
a414c77f IE |
8052 | if (masked) |
8053 | vec_compare = vec_cond_lhs; | |
8054 | else | |
8055 | { | |
8056 | vec_cond_rhs = vec_oprnds1[i]; | |
01216d27 JJ |
8057 | if (bitop1 == NOP_EXPR) |
8058 | vec_compare = build2 (cond_code, vec_cmp_type, | |
8059 | vec_cond_lhs, vec_cond_rhs); | |
8060 | else | |
8061 | { | |
8062 | new_temp = make_ssa_name (vec_cmp_type); | |
8063 | if (bitop1 == BIT_NOT_EXPR) | |
8064 | new_stmt = gimple_build_assign (new_temp, bitop1, | |
8065 | vec_cond_rhs); | |
8066 | else | |
8067 | new_stmt | |
8068 | = gimple_build_assign (new_temp, bitop1, vec_cond_lhs, | |
8069 | vec_cond_rhs); | |
8070 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8071 | if (bitop2 == NOP_EXPR) | |
8072 | vec_compare = new_temp; | |
8073 | else if (bitop2 == BIT_NOT_EXPR) | |
8074 | { | |
8075 | /* Instead of doing ~x ? y : z do x ? z : y. */ | |
8076 | vec_compare = new_temp; | |
8077 | std::swap (vec_then_clause, vec_else_clause); | |
8078 | } | |
8079 | else | |
8080 | { | |
8081 | vec_compare = make_ssa_name (vec_cmp_type); | |
8082 | new_stmt | |
8083 | = gimple_build_assign (vec_compare, bitop2, | |
8084 | vec_cond_lhs, new_temp); | |
8085 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8086 | } | |
8087 | } | |
a414c77f | 8088 | } |
5958f9e2 JJ |
8089 | new_temp = make_ssa_name (vec_dest); |
8090 | new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR, | |
8091 | vec_compare, vec_then_clause, | |
8092 | vec_else_clause); | |
f7e531cf IR |
8093 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
8094 | if (slp_node) | |
9771b263 | 8095 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
f7e531cf IR |
8096 | } |
8097 | ||
8098 | if (slp_node) | |
8099 | continue; | |
8100 | ||
8101 | if (j == 0) | |
8102 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
8103 | else | |
8104 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
8105 | ||
8106 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
a855b1b1 | 8107 | } |
b8698a0f | 8108 | |
9771b263 DN |
8109 | vec_oprnds0.release (); |
8110 | vec_oprnds1.release (); | |
8111 | vec_oprnds2.release (); | |
8112 | vec_oprnds3.release (); | |
f7e531cf | 8113 | |
ebfd146a IR |
8114 | return true; |
8115 | } | |
8116 | ||
42fd8198 IE |
8117 | /* vectorizable_comparison. |
8118 | ||
8119 | Check if STMT is comparison expression that can be vectorized. | |
8120 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
8121 | comparison, put it in VEC_STMT, and insert it at GSI. | |
8122 | ||
8123 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
8124 | ||
fce57248 | 8125 | static bool |
42fd8198 IE |
8126 | vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi, |
8127 | gimple **vec_stmt, tree reduc_def, | |
8128 | slp_tree slp_node) | |
8129 | { | |
8130 | tree lhs, rhs1, rhs2; | |
8131 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
8132 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; | |
8133 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
8134 | tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE; | |
8135 | tree new_temp; | |
8136 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
8137 | enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 8138 | int ndts = 2; |
42fd8198 IE |
8139 | unsigned nunits; |
8140 | int ncopies; | |
49e76ff1 | 8141 | enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; |
42fd8198 IE |
8142 | stmt_vec_info prev_stmt_info = NULL; |
8143 | int i, j; | |
8144 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
8145 | vec<tree> vec_oprnds0 = vNULL; | |
8146 | vec<tree> vec_oprnds1 = vNULL; | |
8147 | gimple *def_stmt; | |
8148 | tree mask_type; | |
8149 | tree mask; | |
8150 | ||
c245362b IE |
8151 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
8152 | return false; | |
8153 | ||
30480bcd | 8154 | if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype)) |
42fd8198 IE |
8155 | return false; |
8156 | ||
8157 | mask_type = vectype; | |
8158 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
8159 | ||
fce57248 | 8160 | if (slp_node) |
42fd8198 IE |
8161 | ncopies = 1; |
8162 | else | |
8163 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
8164 | ||
8165 | gcc_assert (ncopies >= 1); | |
42fd8198 IE |
8166 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
8167 | && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle | |
8168 | && reduc_def)) | |
8169 | return false; | |
8170 | ||
8171 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
8172 | { | |
8173 | if (dump_enabled_p ()) | |
8174 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
8175 | "value used after loop.\n"); | |
8176 | return false; | |
8177 | } | |
8178 | ||
8179 | if (!is_gimple_assign (stmt)) | |
8180 | return false; | |
8181 | ||
8182 | code = gimple_assign_rhs_code (stmt); | |
8183 | ||
8184 | if (TREE_CODE_CLASS (code) != tcc_comparison) | |
8185 | return false; | |
8186 | ||
8187 | rhs1 = gimple_assign_rhs1 (stmt); | |
8188 | rhs2 = gimple_assign_rhs2 (stmt); | |
8189 | ||
8190 | if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt, | |
8191 | &dts[0], &vectype1)) | |
8192 | return false; | |
8193 | ||
8194 | if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt, | |
8195 | &dts[1], &vectype2)) | |
8196 | return false; | |
8197 | ||
8198 | if (vectype1 && vectype2 | |
8199 | && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2)) | |
8200 | return false; | |
8201 | ||
8202 | vectype = vectype1 ? vectype1 : vectype2; | |
8203 | ||
8204 | /* Invariant comparison. */ | |
8205 | if (!vectype) | |
8206 | { | |
69a9a66f RB |
8207 | vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1)); |
8208 | if (TYPE_VECTOR_SUBPARTS (vectype) != nunits) | |
42fd8198 IE |
8209 | return false; |
8210 | } | |
8211 | else if (nunits != TYPE_VECTOR_SUBPARTS (vectype)) | |
8212 | return false; | |
8213 | ||
49e76ff1 IE |
8214 | /* Can't compare mask and non-mask types. */ |
8215 | if (vectype1 && vectype2 | |
8216 | && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2))) | |
8217 | return false; | |
8218 | ||
8219 | /* Boolean values may have another representation in vectors | |
8220 | and therefore we prefer bit operations over comparison for | |
8221 | them (which also works for scalar masks). We store opcodes | |
8222 | to use in bitop1 and bitop2. Statement is vectorized as | |
8223 | BITOP2 (rhs1 BITOP1 rhs2) or | |
8224 | rhs1 BITOP2 (BITOP1 rhs2) | |
8225 | depending on bitop1 and bitop2 arity. */ | |
8226 | if (VECTOR_BOOLEAN_TYPE_P (vectype)) | |
8227 | { | |
8228 | if (code == GT_EXPR) | |
8229 | { | |
8230 | bitop1 = BIT_NOT_EXPR; | |
8231 | bitop2 = BIT_AND_EXPR; | |
8232 | } | |
8233 | else if (code == GE_EXPR) | |
8234 | { | |
8235 | bitop1 = BIT_NOT_EXPR; | |
8236 | bitop2 = BIT_IOR_EXPR; | |
8237 | } | |
8238 | else if (code == LT_EXPR) | |
8239 | { | |
8240 | bitop1 = BIT_NOT_EXPR; | |
8241 | bitop2 = BIT_AND_EXPR; | |
8242 | std::swap (rhs1, rhs2); | |
264d951a | 8243 | std::swap (dts[0], dts[1]); |
49e76ff1 IE |
8244 | } |
8245 | else if (code == LE_EXPR) | |
8246 | { | |
8247 | bitop1 = BIT_NOT_EXPR; | |
8248 | bitop2 = BIT_IOR_EXPR; | |
8249 | std::swap (rhs1, rhs2); | |
264d951a | 8250 | std::swap (dts[0], dts[1]); |
49e76ff1 IE |
8251 | } |
8252 | else | |
8253 | { | |
8254 | bitop1 = BIT_XOR_EXPR; | |
8255 | if (code == EQ_EXPR) | |
8256 | bitop2 = BIT_NOT_EXPR; | |
8257 | } | |
8258 | } | |
8259 | ||
42fd8198 IE |
8260 | if (!vec_stmt) |
8261 | { | |
8262 | STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type; | |
49e76ff1 | 8263 | vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)), |
4fc5ebf1 | 8264 | dts, ndts, NULL, NULL); |
49e76ff1 | 8265 | if (bitop1 == NOP_EXPR) |
96592eed | 8266 | return expand_vec_cmp_expr_p (vectype, mask_type, code); |
49e76ff1 IE |
8267 | else |
8268 | { | |
8269 | machine_mode mode = TYPE_MODE (vectype); | |
8270 | optab optab; | |
8271 | ||
8272 | optab = optab_for_tree_code (bitop1, vectype, optab_default); | |
8273 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8274 | return false; | |
8275 | ||
8276 | if (bitop2 != NOP_EXPR) | |
8277 | { | |
8278 | optab = optab_for_tree_code (bitop2, vectype, optab_default); | |
8279 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8280 | return false; | |
8281 | } | |
8282 | return true; | |
8283 | } | |
42fd8198 IE |
8284 | } |
8285 | ||
8286 | /* Transform. */ | |
8287 | if (!slp_node) | |
8288 | { | |
8289 | vec_oprnds0.create (1); | |
8290 | vec_oprnds1.create (1); | |
8291 | } | |
8292 | ||
8293 | /* Handle def. */ | |
8294 | lhs = gimple_assign_lhs (stmt); | |
8295 | mask = vect_create_destination_var (lhs, mask_type); | |
8296 | ||
8297 | /* Handle cmp expr. */ | |
8298 | for (j = 0; j < ncopies; j++) | |
8299 | { | |
8300 | gassign *new_stmt = NULL; | |
8301 | if (j == 0) | |
8302 | { | |
8303 | if (slp_node) | |
8304 | { | |
8305 | auto_vec<tree, 2> ops; | |
8306 | auto_vec<vec<tree>, 2> vec_defs; | |
8307 | ||
8308 | ops.safe_push (rhs1); | |
8309 | ops.safe_push (rhs2); | |
306b0c92 | 8310 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
42fd8198 IE |
8311 | vec_oprnds1 = vec_defs.pop (); |
8312 | vec_oprnds0 = vec_defs.pop (); | |
8313 | } | |
8314 | else | |
8315 | { | |
e4af0bc4 IE |
8316 | vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype); |
8317 | vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype); | |
42fd8198 IE |
8318 | } |
8319 | } | |
8320 | else | |
8321 | { | |
8322 | vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0], | |
8323 | vec_oprnds0.pop ()); | |
8324 | vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1], | |
8325 | vec_oprnds1.pop ()); | |
8326 | } | |
8327 | ||
8328 | if (!slp_node) | |
8329 | { | |
8330 | vec_oprnds0.quick_push (vec_rhs1); | |
8331 | vec_oprnds1.quick_push (vec_rhs2); | |
8332 | } | |
8333 | ||
8334 | /* Arguments are ready. Create the new vector stmt. */ | |
8335 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1) | |
8336 | { | |
8337 | vec_rhs2 = vec_oprnds1[i]; | |
8338 | ||
8339 | new_temp = make_ssa_name (mask); | |
49e76ff1 IE |
8340 | if (bitop1 == NOP_EXPR) |
8341 | { | |
8342 | new_stmt = gimple_build_assign (new_temp, code, | |
8343 | vec_rhs1, vec_rhs2); | |
8344 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8345 | } | |
8346 | else | |
8347 | { | |
8348 | if (bitop1 == BIT_NOT_EXPR) | |
8349 | new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2); | |
8350 | else | |
8351 | new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1, | |
8352 | vec_rhs2); | |
8353 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8354 | if (bitop2 != NOP_EXPR) | |
8355 | { | |
8356 | tree res = make_ssa_name (mask); | |
8357 | if (bitop2 == BIT_NOT_EXPR) | |
8358 | new_stmt = gimple_build_assign (res, bitop2, new_temp); | |
8359 | else | |
8360 | new_stmt = gimple_build_assign (res, bitop2, vec_rhs1, | |
8361 | new_temp); | |
8362 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8363 | } | |
8364 | } | |
42fd8198 IE |
8365 | if (slp_node) |
8366 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); | |
8367 | } | |
8368 | ||
8369 | if (slp_node) | |
8370 | continue; | |
8371 | ||
8372 | if (j == 0) | |
8373 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
8374 | else | |
8375 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
8376 | ||
8377 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
8378 | } | |
8379 | ||
8380 | vec_oprnds0.release (); | |
8381 | vec_oprnds1.release (); | |
8382 | ||
8383 | return true; | |
8384 | } | |
ebfd146a | 8385 | |
8644a673 | 8386 | /* Make sure the statement is vectorizable. */ |
ebfd146a IR |
8387 | |
8388 | bool | |
355fe088 | 8389 | vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node) |
ebfd146a | 8390 | { |
8644a673 | 8391 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
a70d6342 | 8392 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
b8698a0f | 8393 | enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info); |
ebfd146a | 8394 | bool ok; |
355fe088 | 8395 | gimple *pattern_stmt; |
363477c0 | 8396 | gimple_seq pattern_def_seq; |
ebfd146a | 8397 | |
73fbfcad | 8398 | if (dump_enabled_p ()) |
ebfd146a | 8399 | { |
78c60e3d SS |
8400 | dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); |
8401 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
8644a673 | 8402 | } |
ebfd146a | 8403 | |
1825a1f3 | 8404 | if (gimple_has_volatile_ops (stmt)) |
b8698a0f | 8405 | { |
73fbfcad | 8406 | if (dump_enabled_p ()) |
78c60e3d | 8407 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 8408 | "not vectorized: stmt has volatile operands\n"); |
1825a1f3 IR |
8409 | |
8410 | return false; | |
8411 | } | |
b8698a0f L |
8412 | |
8413 | /* Skip stmts that do not need to be vectorized. In loops this is expected | |
8644a673 IR |
8414 | to include: |
8415 | - the COND_EXPR which is the loop exit condition | |
8416 | - any LABEL_EXPRs in the loop | |
b8698a0f | 8417 | - computations that are used only for array indexing or loop control. |
8644a673 | 8418 | In basic blocks we only analyze statements that are a part of some SLP |
83197f37 | 8419 | instance, therefore, all the statements are relevant. |
ebfd146a | 8420 | |
d092494c | 8421 | Pattern statement needs to be analyzed instead of the original statement |
83197f37 | 8422 | if the original statement is not relevant. Otherwise, we analyze both |
079c527f JJ |
8423 | statements. In basic blocks we are called from some SLP instance |
8424 | traversal, don't analyze pattern stmts instead, the pattern stmts | |
8425 | already will be part of SLP instance. */ | |
83197f37 IR |
8426 | |
8427 | pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
b8698a0f | 8428 | if (!STMT_VINFO_RELEVANT_P (stmt_info) |
8644a673 | 8429 | && !STMT_VINFO_LIVE_P (stmt_info)) |
ebfd146a | 8430 | { |
9d5e7640 | 8431 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
83197f37 | 8432 | && pattern_stmt |
9d5e7640 IR |
8433 | && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) |
8434 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) | |
8435 | { | |
83197f37 | 8436 | /* Analyze PATTERN_STMT instead of the original stmt. */ |
9d5e7640 IR |
8437 | stmt = pattern_stmt; |
8438 | stmt_info = vinfo_for_stmt (pattern_stmt); | |
73fbfcad | 8439 | if (dump_enabled_p ()) |
9d5e7640 | 8440 | { |
78c60e3d SS |
8441 | dump_printf_loc (MSG_NOTE, vect_location, |
8442 | "==> examining pattern statement: "); | |
8443 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
9d5e7640 IR |
8444 | } |
8445 | } | |
8446 | else | |
8447 | { | |
73fbfcad | 8448 | if (dump_enabled_p ()) |
e645e942 | 8449 | dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n"); |
ebfd146a | 8450 | |
9d5e7640 IR |
8451 | return true; |
8452 | } | |
8644a673 | 8453 | } |
83197f37 | 8454 | else if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
079c527f | 8455 | && node == NULL |
83197f37 IR |
8456 | && pattern_stmt |
8457 | && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) | |
8458 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) | |
8459 | { | |
8460 | /* Analyze PATTERN_STMT too. */ | |
73fbfcad | 8461 | if (dump_enabled_p ()) |
83197f37 | 8462 | { |
78c60e3d SS |
8463 | dump_printf_loc (MSG_NOTE, vect_location, |
8464 | "==> examining pattern statement: "); | |
8465 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
83197f37 IR |
8466 | } |
8467 | ||
8468 | if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node)) | |
8469 | return false; | |
8470 | } | |
ebfd146a | 8471 | |
1107f3ae | 8472 | if (is_pattern_stmt_p (stmt_info) |
079c527f | 8473 | && node == NULL |
363477c0 | 8474 | && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info))) |
1107f3ae | 8475 | { |
363477c0 | 8476 | gimple_stmt_iterator si; |
1107f3ae | 8477 | |
363477c0 JJ |
8478 | for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si)) |
8479 | { | |
355fe088 | 8480 | gimple *pattern_def_stmt = gsi_stmt (si); |
363477c0 JJ |
8481 | if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt)) |
8482 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt))) | |
8483 | { | |
8484 | /* Analyze def stmt of STMT if it's a pattern stmt. */ | |
73fbfcad | 8485 | if (dump_enabled_p ()) |
363477c0 | 8486 | { |
78c60e3d SS |
8487 | dump_printf_loc (MSG_NOTE, vect_location, |
8488 | "==> examining pattern def statement: "); | |
8489 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); | |
363477c0 | 8490 | } |
1107f3ae | 8491 | |
363477c0 JJ |
8492 | if (!vect_analyze_stmt (pattern_def_stmt, |
8493 | need_to_vectorize, node)) | |
8494 | return false; | |
8495 | } | |
8496 | } | |
8497 | } | |
1107f3ae | 8498 | |
8644a673 IR |
8499 | switch (STMT_VINFO_DEF_TYPE (stmt_info)) |
8500 | { | |
8501 | case vect_internal_def: | |
8502 | break; | |
ebfd146a | 8503 | |
8644a673 | 8504 | case vect_reduction_def: |
7c5222ff | 8505 | case vect_nested_cycle: |
14a61437 RB |
8506 | gcc_assert (!bb_vinfo |
8507 | && (relevance == vect_used_in_outer | |
8508 | || relevance == vect_used_in_outer_by_reduction | |
8509 | || relevance == vect_used_by_reduction | |
b28ead45 AH |
8510 | || relevance == vect_unused_in_scope |
8511 | || relevance == vect_used_only_live)); | |
8644a673 IR |
8512 | break; |
8513 | ||
8514 | case vect_induction_def: | |
e7baeb39 RB |
8515 | gcc_assert (!bb_vinfo); |
8516 | break; | |
8517 | ||
8644a673 IR |
8518 | case vect_constant_def: |
8519 | case vect_external_def: | |
8520 | case vect_unknown_def_type: | |
8521 | default: | |
8522 | gcc_unreachable (); | |
8523 | } | |
ebfd146a | 8524 | |
8644a673 | 8525 | if (STMT_VINFO_RELEVANT_P (stmt_info)) |
ebfd146a | 8526 | { |
8644a673 | 8527 | gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))); |
0136f8f0 AH |
8528 | gcc_assert (STMT_VINFO_VECTYPE (stmt_info) |
8529 | || (is_gimple_call (stmt) | |
8530 | && gimple_call_lhs (stmt) == NULL_TREE)); | |
8644a673 | 8531 | *need_to_vectorize = true; |
ebfd146a IR |
8532 | } |
8533 | ||
b1af7da6 RB |
8534 | if (PURE_SLP_STMT (stmt_info) && !node) |
8535 | { | |
8536 | dump_printf_loc (MSG_NOTE, vect_location, | |
8537 | "handled only by SLP analysis\n"); | |
8538 | return true; | |
8539 | } | |
8540 | ||
8541 | ok = true; | |
8542 | if (!bb_vinfo | |
8543 | && (STMT_VINFO_RELEVANT_P (stmt_info) | |
8544 | || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)) | |
8545 | ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node) | |
8546 | || vectorizable_conversion (stmt, NULL, NULL, node) | |
8547 | || vectorizable_shift (stmt, NULL, NULL, node) | |
8548 | || vectorizable_operation (stmt, NULL, NULL, node) | |
8549 | || vectorizable_assignment (stmt, NULL, NULL, node) | |
8550 | || vectorizable_load (stmt, NULL, NULL, node, NULL) | |
8551 | || vectorizable_call (stmt, NULL, NULL, node) | |
8552 | || vectorizable_store (stmt, NULL, NULL, node) | |
8553 | || vectorizable_reduction (stmt, NULL, NULL, node) | |
e7baeb39 | 8554 | || vectorizable_induction (stmt, NULL, NULL, node) |
42fd8198 IE |
8555 | || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node) |
8556 | || vectorizable_comparison (stmt, NULL, NULL, NULL, node)); | |
b1af7da6 RB |
8557 | else |
8558 | { | |
8559 | if (bb_vinfo) | |
8560 | ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node) | |
8561 | || vectorizable_conversion (stmt, NULL, NULL, node) | |
8562 | || vectorizable_shift (stmt, NULL, NULL, node) | |
8563 | || vectorizable_operation (stmt, NULL, NULL, node) | |
8564 | || vectorizable_assignment (stmt, NULL, NULL, node) | |
8565 | || vectorizable_load (stmt, NULL, NULL, node, NULL) | |
8566 | || vectorizable_call (stmt, NULL, NULL, node) | |
8567 | || vectorizable_store (stmt, NULL, NULL, node) | |
42fd8198 IE |
8568 | || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node) |
8569 | || vectorizable_comparison (stmt, NULL, NULL, NULL, node)); | |
b1af7da6 | 8570 | } |
8644a673 IR |
8571 | |
8572 | if (!ok) | |
ebfd146a | 8573 | { |
73fbfcad | 8574 | if (dump_enabled_p ()) |
8644a673 | 8575 | { |
78c60e3d SS |
8576 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
8577 | "not vectorized: relevant stmt not "); | |
8578 | dump_printf (MSG_MISSED_OPTIMIZATION, "supported: "); | |
8579 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
8644a673 | 8580 | } |
b8698a0f | 8581 | |
ebfd146a IR |
8582 | return false; |
8583 | } | |
8584 | ||
a70d6342 IR |
8585 | if (bb_vinfo) |
8586 | return true; | |
8587 | ||
8644a673 IR |
8588 | /* Stmts that are (also) "live" (i.e. - that are used out of the loop) |
8589 | need extra handling, except for vectorizable reductions. */ | |
8590 | if (STMT_VINFO_LIVE_P (stmt_info) | |
8591 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) | |
b28ead45 | 8592 | ok = vectorizable_live_operation (stmt, NULL, NULL, -1, NULL); |
ebfd146a | 8593 | |
8644a673 | 8594 | if (!ok) |
ebfd146a | 8595 | { |
73fbfcad | 8596 | if (dump_enabled_p ()) |
8644a673 | 8597 | { |
78c60e3d SS |
8598 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
8599 | "not vectorized: live stmt not "); | |
8600 | dump_printf (MSG_MISSED_OPTIMIZATION, "supported: "); | |
8601 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
8644a673 | 8602 | } |
b8698a0f | 8603 | |
8644a673 | 8604 | return false; |
ebfd146a IR |
8605 | } |
8606 | ||
ebfd146a IR |
8607 | return true; |
8608 | } | |
8609 | ||
8610 | ||
8611 | /* Function vect_transform_stmt. | |
8612 | ||
8613 | Create a vectorized stmt to replace STMT, and insert it at BSI. */ | |
8614 | ||
8615 | bool | |
355fe088 | 8616 | vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, |
0d0293ac | 8617 | bool *grouped_store, slp_tree slp_node, |
ebfd146a IR |
8618 | slp_instance slp_node_instance) |
8619 | { | |
8620 | bool is_store = false; | |
355fe088 | 8621 | gimple *vec_stmt = NULL; |
ebfd146a | 8622 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
ebfd146a | 8623 | bool done; |
ebfd146a | 8624 | |
fce57248 | 8625 | gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info)); |
355fe088 | 8626 | gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); |
225ce44b | 8627 | |
ebfd146a IR |
8628 | switch (STMT_VINFO_TYPE (stmt_info)) |
8629 | { | |
8630 | case type_demotion_vec_info_type: | |
ebfd146a | 8631 | case type_promotion_vec_info_type: |
ebfd146a IR |
8632 | case type_conversion_vec_info_type: |
8633 | done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node); | |
8634 | gcc_assert (done); | |
8635 | break; | |
8636 | ||
8637 | case induc_vec_info_type: | |
e7baeb39 | 8638 | done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node); |
ebfd146a IR |
8639 | gcc_assert (done); |
8640 | break; | |
8641 | ||
9dc3f7de IR |
8642 | case shift_vec_info_type: |
8643 | done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node); | |
8644 | gcc_assert (done); | |
8645 | break; | |
8646 | ||
ebfd146a IR |
8647 | case op_vec_info_type: |
8648 | done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node); | |
8649 | gcc_assert (done); | |
8650 | break; | |
8651 | ||
8652 | case assignment_vec_info_type: | |
8653 | done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node); | |
8654 | gcc_assert (done); | |
8655 | break; | |
8656 | ||
8657 | case load_vec_info_type: | |
b8698a0f | 8658 | done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node, |
ebfd146a IR |
8659 | slp_node_instance); |
8660 | gcc_assert (done); | |
8661 | break; | |
8662 | ||
8663 | case store_vec_info_type: | |
8664 | done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node); | |
8665 | gcc_assert (done); | |
0d0293ac | 8666 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node) |
ebfd146a IR |
8667 | { |
8668 | /* In case of interleaving, the whole chain is vectorized when the | |
ff802fa1 | 8669 | last store in the chain is reached. Store stmts before the last |
ebfd146a IR |
8670 | one are skipped, and there vec_stmt_info shouldn't be freed |
8671 | meanwhile. */ | |
0d0293ac | 8672 | *grouped_store = true; |
ebfd146a IR |
8673 | if (STMT_VINFO_VEC_STMT (stmt_info)) |
8674 | is_store = true; | |
8675 | } | |
8676 | else | |
8677 | is_store = true; | |
8678 | break; | |
8679 | ||
8680 | case condition_vec_info_type: | |
f7e531cf | 8681 | done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node); |
ebfd146a IR |
8682 | gcc_assert (done); |
8683 | break; | |
8684 | ||
42fd8198 IE |
8685 | case comparison_vec_info_type: |
8686 | done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node); | |
8687 | gcc_assert (done); | |
8688 | break; | |
8689 | ||
ebfd146a | 8690 | case call_vec_info_type: |
190c2236 | 8691 | done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node); |
039d9ea1 | 8692 | stmt = gsi_stmt (*gsi); |
8e4284d0 | 8693 | if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) |
5ce9450f | 8694 | is_store = true; |
ebfd146a IR |
8695 | break; |
8696 | ||
0136f8f0 AH |
8697 | case call_simd_clone_vec_info_type: |
8698 | done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node); | |
8699 | stmt = gsi_stmt (*gsi); | |
8700 | break; | |
8701 | ||
ebfd146a | 8702 | case reduc_vec_info_type: |
b5aeb3bb | 8703 | done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node); |
ebfd146a IR |
8704 | gcc_assert (done); |
8705 | break; | |
8706 | ||
8707 | default: | |
8708 | if (!STMT_VINFO_LIVE_P (stmt_info)) | |
8709 | { | |
73fbfcad | 8710 | if (dump_enabled_p ()) |
78c60e3d | 8711 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 8712 | "stmt not supported.\n"); |
ebfd146a IR |
8713 | gcc_unreachable (); |
8714 | } | |
8715 | } | |
8716 | ||
225ce44b RB |
8717 | /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT. |
8718 | This would break hybrid SLP vectorization. */ | |
8719 | if (slp_node) | |
d90f8440 RB |
8720 | gcc_assert (!vec_stmt |
8721 | && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt); | |
225ce44b | 8722 | |
ebfd146a IR |
8723 | /* Handle inner-loop stmts whose DEF is used in the loop-nest that |
8724 | is being vectorized, but outside the immediately enclosing loop. */ | |
8725 | if (vec_stmt | |
a70d6342 IR |
8726 | && STMT_VINFO_LOOP_VINFO (stmt_info) |
8727 | && nested_in_vect_loop_p (LOOP_VINFO_LOOP ( | |
8728 | STMT_VINFO_LOOP_VINFO (stmt_info)), stmt) | |
ebfd146a IR |
8729 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type |
8730 | && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer | |
b8698a0f | 8731 | || STMT_VINFO_RELEVANT (stmt_info) == |
a70d6342 | 8732 | vect_used_in_outer_by_reduction)) |
ebfd146a | 8733 | { |
a70d6342 IR |
8734 | struct loop *innerloop = LOOP_VINFO_LOOP ( |
8735 | STMT_VINFO_LOOP_VINFO (stmt_info))->inner; | |
ebfd146a IR |
8736 | imm_use_iterator imm_iter; |
8737 | use_operand_p use_p; | |
8738 | tree scalar_dest; | |
355fe088 | 8739 | gimple *exit_phi; |
ebfd146a | 8740 | |
73fbfcad | 8741 | if (dump_enabled_p ()) |
78c60e3d | 8742 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 8743 | "Record the vdef for outer-loop vectorization.\n"); |
ebfd146a IR |
8744 | |
8745 | /* Find the relevant loop-exit phi-node, and reord the vec_stmt there | |
8746 | (to be used when vectorizing outer-loop stmts that use the DEF of | |
8747 | STMT). */ | |
8748 | if (gimple_code (stmt) == GIMPLE_PHI) | |
8749 | scalar_dest = PHI_RESULT (stmt); | |
8750 | else | |
8751 | scalar_dest = gimple_assign_lhs (stmt); | |
8752 | ||
8753 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) | |
8754 | { | |
8755 | if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p)))) | |
8756 | { | |
8757 | exit_phi = USE_STMT (use_p); | |
8758 | STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt; | |
8759 | } | |
8760 | } | |
8761 | } | |
8762 | ||
8763 | /* Handle stmts whose DEF is used outside the loop-nest that is | |
8764 | being vectorized. */ | |
b28ead45 AH |
8765 | if (slp_node) |
8766 | { | |
8767 | gimple *slp_stmt; | |
8768 | int i; | |
bd2f172f RB |
8769 | if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) |
8770 | FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt) | |
8771 | { | |
8772 | stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt); | |
8773 | if (STMT_VINFO_LIVE_P (slp_stmt_info)) | |
8774 | { | |
8775 | done = vectorizable_live_operation (slp_stmt, gsi, slp_node, i, | |
8776 | &vec_stmt); | |
8777 | gcc_assert (done); | |
8778 | } | |
8779 | } | |
b28ead45 AH |
8780 | } |
8781 | else if (STMT_VINFO_LIVE_P (stmt_info) | |
bd2f172f | 8782 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) |
ebfd146a | 8783 | { |
b28ead45 | 8784 | done = vectorizable_live_operation (stmt, gsi, slp_node, -1, &vec_stmt); |
ebfd146a IR |
8785 | gcc_assert (done); |
8786 | } | |
8787 | ||
8788 | if (vec_stmt) | |
83197f37 | 8789 | STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt; |
ebfd146a | 8790 | |
b8698a0f | 8791 | return is_store; |
ebfd146a IR |
8792 | } |
8793 | ||
8794 | ||
b8698a0f | 8795 | /* Remove a group of stores (for SLP or interleaving), free their |
ebfd146a IR |
8796 | stmt_vec_info. */ |
8797 | ||
8798 | void | |
355fe088 | 8799 | vect_remove_stores (gimple *first_stmt) |
ebfd146a | 8800 | { |
355fe088 TS |
8801 | gimple *next = first_stmt; |
8802 | gimple *tmp; | |
ebfd146a IR |
8803 | gimple_stmt_iterator next_si; |
8804 | ||
8805 | while (next) | |
8806 | { | |
78048b1c JJ |
8807 | stmt_vec_info stmt_info = vinfo_for_stmt (next); |
8808 | ||
8809 | tmp = GROUP_NEXT_ELEMENT (stmt_info); | |
8810 | if (is_pattern_stmt_p (stmt_info)) | |
8811 | next = STMT_VINFO_RELATED_STMT (stmt_info); | |
ebfd146a IR |
8812 | /* Free the attached stmt_vec_info and remove the stmt. */ |
8813 | next_si = gsi_for_stmt (next); | |
3d3f2249 | 8814 | unlink_stmt_vdef (next); |
ebfd146a | 8815 | gsi_remove (&next_si, true); |
3d3f2249 | 8816 | release_defs (next); |
ebfd146a IR |
8817 | free_stmt_vec_info (next); |
8818 | next = tmp; | |
8819 | } | |
8820 | } | |
8821 | ||
8822 | ||
8823 | /* Function new_stmt_vec_info. | |
8824 | ||
8825 | Create and initialize a new stmt_vec_info struct for STMT. */ | |
8826 | ||
8827 | stmt_vec_info | |
310213d4 | 8828 | new_stmt_vec_info (gimple *stmt, vec_info *vinfo) |
ebfd146a IR |
8829 | { |
8830 | stmt_vec_info res; | |
8831 | res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info)); | |
8832 | ||
8833 | STMT_VINFO_TYPE (res) = undef_vec_info_type; | |
8834 | STMT_VINFO_STMT (res) = stmt; | |
310213d4 | 8835 | res->vinfo = vinfo; |
8644a673 | 8836 | STMT_VINFO_RELEVANT (res) = vect_unused_in_scope; |
ebfd146a IR |
8837 | STMT_VINFO_LIVE_P (res) = false; |
8838 | STMT_VINFO_VECTYPE (res) = NULL; | |
8839 | STMT_VINFO_VEC_STMT (res) = NULL; | |
4b5caab7 | 8840 | STMT_VINFO_VECTORIZABLE (res) = true; |
ebfd146a IR |
8841 | STMT_VINFO_IN_PATTERN_P (res) = false; |
8842 | STMT_VINFO_RELATED_STMT (res) = NULL; | |
363477c0 | 8843 | STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL; |
ebfd146a | 8844 | STMT_VINFO_DATA_REF (res) = NULL; |
af29617a | 8845 | STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION; |
7e16ce79 | 8846 | STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK; |
ebfd146a IR |
8847 | |
8848 | STMT_VINFO_DR_BASE_ADDRESS (res) = NULL; | |
8849 | STMT_VINFO_DR_OFFSET (res) = NULL; | |
8850 | STMT_VINFO_DR_INIT (res) = NULL; | |
8851 | STMT_VINFO_DR_STEP (res) = NULL; | |
8852 | STMT_VINFO_DR_ALIGNED_TO (res) = NULL; | |
8853 | ||
8854 | if (gimple_code (stmt) == GIMPLE_PHI | |
8855 | && is_loop_header_bb_p (gimple_bb (stmt))) | |
8856 | STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type; | |
8857 | else | |
8644a673 IR |
8858 | STMT_VINFO_DEF_TYPE (res) = vect_internal_def; |
8859 | ||
9771b263 | 8860 | STMT_VINFO_SAME_ALIGN_REFS (res).create (0); |
32e8bb8e | 8861 | STMT_SLP_TYPE (res) = loop_vect; |
78810bd3 RB |
8862 | STMT_VINFO_NUM_SLP_USES (res) = 0; |
8863 | ||
e14c1050 IR |
8864 | GROUP_FIRST_ELEMENT (res) = NULL; |
8865 | GROUP_NEXT_ELEMENT (res) = NULL; | |
8866 | GROUP_SIZE (res) = 0; | |
8867 | GROUP_STORE_COUNT (res) = 0; | |
8868 | GROUP_GAP (res) = 0; | |
8869 | GROUP_SAME_DR_STMT (res) = NULL; | |
ebfd146a IR |
8870 | |
8871 | return res; | |
8872 | } | |
8873 | ||
8874 | ||
8875 | /* Create a hash table for stmt_vec_info. */ | |
8876 | ||
8877 | void | |
8878 | init_stmt_vec_info_vec (void) | |
8879 | { | |
9771b263 DN |
8880 | gcc_assert (!stmt_vec_info_vec.exists ()); |
8881 | stmt_vec_info_vec.create (50); | |
ebfd146a IR |
8882 | } |
8883 | ||
8884 | ||
8885 | /* Free hash table for stmt_vec_info. */ | |
8886 | ||
8887 | void | |
8888 | free_stmt_vec_info_vec (void) | |
8889 | { | |
93675444 | 8890 | unsigned int i; |
3161455c | 8891 | stmt_vec_info info; |
93675444 JJ |
8892 | FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info) |
8893 | if (info != NULL) | |
3161455c | 8894 | free_stmt_vec_info (STMT_VINFO_STMT (info)); |
9771b263 DN |
8895 | gcc_assert (stmt_vec_info_vec.exists ()); |
8896 | stmt_vec_info_vec.release (); | |
ebfd146a IR |
8897 | } |
8898 | ||
8899 | ||
8900 | /* Free stmt vectorization related info. */ | |
8901 | ||
8902 | void | |
355fe088 | 8903 | free_stmt_vec_info (gimple *stmt) |
ebfd146a IR |
8904 | { |
8905 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
8906 | ||
8907 | if (!stmt_info) | |
8908 | return; | |
8909 | ||
78048b1c JJ |
8910 | /* Check if this statement has a related "pattern stmt" |
8911 | (introduced by the vectorizer during the pattern recognition | |
8912 | pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info | |
8913 | too. */ | |
8914 | if (STMT_VINFO_IN_PATTERN_P (stmt_info)) | |
8915 | { | |
8916 | stmt_vec_info patt_info | |
8917 | = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)); | |
8918 | if (patt_info) | |
8919 | { | |
363477c0 | 8920 | gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info); |
355fe088 | 8921 | gimple *patt_stmt = STMT_VINFO_STMT (patt_info); |
f0281fde RB |
8922 | gimple_set_bb (patt_stmt, NULL); |
8923 | tree lhs = gimple_get_lhs (patt_stmt); | |
e6f5c25d | 8924 | if (lhs && TREE_CODE (lhs) == SSA_NAME) |
f0281fde | 8925 | release_ssa_name (lhs); |
363477c0 JJ |
8926 | if (seq) |
8927 | { | |
8928 | gimple_stmt_iterator si; | |
8929 | for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si)) | |
f0281fde | 8930 | { |
355fe088 | 8931 | gimple *seq_stmt = gsi_stmt (si); |
f0281fde | 8932 | gimple_set_bb (seq_stmt, NULL); |
7532abf2 | 8933 | lhs = gimple_get_lhs (seq_stmt); |
e6f5c25d | 8934 | if (lhs && TREE_CODE (lhs) == SSA_NAME) |
f0281fde RB |
8935 | release_ssa_name (lhs); |
8936 | free_stmt_vec_info (seq_stmt); | |
8937 | } | |
363477c0 | 8938 | } |
f0281fde | 8939 | free_stmt_vec_info (patt_stmt); |
78048b1c JJ |
8940 | } |
8941 | } | |
8942 | ||
9771b263 | 8943 | STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release (); |
6c9e85fb | 8944 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release (); |
ebfd146a IR |
8945 | set_vinfo_for_stmt (stmt, NULL); |
8946 | free (stmt_info); | |
8947 | } | |
8948 | ||
8949 | ||
bb67d9c7 | 8950 | /* Function get_vectype_for_scalar_type_and_size. |
ebfd146a | 8951 | |
bb67d9c7 | 8952 | Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported |
ebfd146a IR |
8953 | by the target. */ |
8954 | ||
bb67d9c7 RG |
8955 | static tree |
8956 | get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size) | |
ebfd146a | 8957 | { |
c7d97b28 | 8958 | tree orig_scalar_type = scalar_type; |
ef4bddc2 RS |
8959 | machine_mode inner_mode = TYPE_MODE (scalar_type); |
8960 | machine_mode simd_mode; | |
2f816591 | 8961 | unsigned int nbytes = GET_MODE_SIZE (inner_mode); |
ebfd146a IR |
8962 | int nunits; |
8963 | tree vectype; | |
8964 | ||
cc4b5170 | 8965 | if (nbytes == 0) |
ebfd146a IR |
8966 | return NULL_TREE; |
8967 | ||
48f2e373 RB |
8968 | if (GET_MODE_CLASS (inner_mode) != MODE_INT |
8969 | && GET_MODE_CLASS (inner_mode) != MODE_FLOAT) | |
8970 | return NULL_TREE; | |
8971 | ||
7b7b1813 RG |
8972 | /* For vector types of elements whose mode precision doesn't |
8973 | match their types precision we use a element type of mode | |
8974 | precision. The vectorization routines will have to make sure | |
48f2e373 RB |
8975 | they support the proper result truncation/extension. |
8976 | We also make sure to build vector types with INTEGER_TYPE | |
8977 | component type only. */ | |
6d7971b8 | 8978 | if (INTEGRAL_TYPE_P (scalar_type) |
48f2e373 RB |
8979 | && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type) |
8980 | || TREE_CODE (scalar_type) != INTEGER_TYPE)) | |
7b7b1813 RG |
8981 | scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode), |
8982 | TYPE_UNSIGNED (scalar_type)); | |
6d7971b8 | 8983 | |
ccbf5bb4 RG |
8984 | /* We shouldn't end up building VECTOR_TYPEs of non-scalar components. |
8985 | When the component mode passes the above test simply use a type | |
8986 | corresponding to that mode. The theory is that any use that | |
8987 | would cause problems with this will disable vectorization anyway. */ | |
dfc2e2ac | 8988 | else if (!SCALAR_FLOAT_TYPE_P (scalar_type) |
e67f39f7 | 8989 | && !INTEGRAL_TYPE_P (scalar_type)) |
60b95d28 RB |
8990 | scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1); |
8991 | ||
8992 | /* We can't build a vector type of elements with alignment bigger than | |
8993 | their size. */ | |
dfc2e2ac | 8994 | else if (nbytes < TYPE_ALIGN_UNIT (scalar_type)) |
aca43c6c JJ |
8995 | scalar_type = lang_hooks.types.type_for_mode (inner_mode, |
8996 | TYPE_UNSIGNED (scalar_type)); | |
ccbf5bb4 | 8997 | |
dfc2e2ac RB |
8998 | /* If we felt back to using the mode fail if there was |
8999 | no scalar type for it. */ | |
9000 | if (scalar_type == NULL_TREE) | |
9001 | return NULL_TREE; | |
9002 | ||
bb67d9c7 RG |
9003 | /* If no size was supplied use the mode the target prefers. Otherwise |
9004 | lookup a vector mode of the specified size. */ | |
9005 | if (size == 0) | |
9006 | simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode); | |
9007 | else | |
9008 | simd_mode = mode_for_vector (inner_mode, size / nbytes); | |
cc4b5170 RG |
9009 | nunits = GET_MODE_SIZE (simd_mode) / nbytes; |
9010 | if (nunits <= 1) | |
9011 | return NULL_TREE; | |
ebfd146a IR |
9012 | |
9013 | vectype = build_vector_type (scalar_type, nunits); | |
ebfd146a IR |
9014 | |
9015 | if (!VECTOR_MODE_P (TYPE_MODE (vectype)) | |
9016 | && !INTEGRAL_MODE_P (TYPE_MODE (vectype))) | |
451dabda | 9017 | return NULL_TREE; |
ebfd146a | 9018 | |
c7d97b28 RB |
9019 | /* Re-attach the address-space qualifier if we canonicalized the scalar |
9020 | type. */ | |
9021 | if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype)) | |
9022 | return build_qualified_type | |
9023 | (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type))); | |
9024 | ||
ebfd146a IR |
9025 | return vectype; |
9026 | } | |
9027 | ||
bb67d9c7 RG |
9028 | unsigned int current_vector_size; |
9029 | ||
9030 | /* Function get_vectype_for_scalar_type. | |
9031 | ||
9032 | Returns the vector type corresponding to SCALAR_TYPE as supported | |
9033 | by the target. */ | |
9034 | ||
9035 | tree | |
9036 | get_vectype_for_scalar_type (tree scalar_type) | |
9037 | { | |
9038 | tree vectype; | |
9039 | vectype = get_vectype_for_scalar_type_and_size (scalar_type, | |
9040 | current_vector_size); | |
9041 | if (vectype | |
9042 | && current_vector_size == 0) | |
9043 | current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype)); | |
9044 | return vectype; | |
9045 | } | |
9046 | ||
42fd8198 IE |
9047 | /* Function get_mask_type_for_scalar_type. |
9048 | ||
9049 | Returns the mask type corresponding to a result of comparison | |
9050 | of vectors of specified SCALAR_TYPE as supported by target. */ | |
9051 | ||
9052 | tree | |
9053 | get_mask_type_for_scalar_type (tree scalar_type) | |
9054 | { | |
9055 | tree vectype = get_vectype_for_scalar_type (scalar_type); | |
9056 | ||
9057 | if (!vectype) | |
9058 | return NULL; | |
9059 | ||
9060 | return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype), | |
9061 | current_vector_size); | |
9062 | } | |
9063 | ||
b690cc0f RG |
9064 | /* Function get_same_sized_vectype |
9065 | ||
9066 | Returns a vector type corresponding to SCALAR_TYPE of size | |
9067 | VECTOR_TYPE if supported by the target. */ | |
9068 | ||
9069 | tree | |
bb67d9c7 | 9070 | get_same_sized_vectype (tree scalar_type, tree vector_type) |
b690cc0f | 9071 | { |
2568d8a1 | 9072 | if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)) |
9f47c7e5 IE |
9073 | return build_same_sized_truth_vector_type (vector_type); |
9074 | ||
bb67d9c7 RG |
9075 | return get_vectype_for_scalar_type_and_size |
9076 | (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type))); | |
b690cc0f RG |
9077 | } |
9078 | ||
ebfd146a IR |
9079 | /* Function vect_is_simple_use. |
9080 | ||
9081 | Input: | |
81c40241 RB |
9082 | VINFO - the vect info of the loop or basic block that is being vectorized. |
9083 | OPERAND - operand in the loop or bb. | |
9084 | Output: | |
9085 | DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME. | |
9086 | DT - the type of definition | |
ebfd146a IR |
9087 | |
9088 | Returns whether a stmt with OPERAND can be vectorized. | |
b8698a0f | 9089 | For loops, supportable operands are constants, loop invariants, and operands |
ff802fa1 | 9090 | that are defined by the current iteration of the loop. Unsupportable |
b8698a0f | 9091 | operands are those that are defined by a previous iteration of the loop (as |
a70d6342 IR |
9092 | is the case in reduction/induction computations). |
9093 | For basic blocks, supportable operands are constants and bb invariants. | |
9094 | For now, operands defined outside the basic block are not supported. */ | |
ebfd146a IR |
9095 | |
9096 | bool | |
81c40241 RB |
9097 | vect_is_simple_use (tree operand, vec_info *vinfo, |
9098 | gimple **def_stmt, enum vect_def_type *dt) | |
b8698a0f | 9099 | { |
ebfd146a | 9100 | *def_stmt = NULL; |
3fc356dc | 9101 | *dt = vect_unknown_def_type; |
b8698a0f | 9102 | |
73fbfcad | 9103 | if (dump_enabled_p ()) |
ebfd146a | 9104 | { |
78c60e3d SS |
9105 | dump_printf_loc (MSG_NOTE, vect_location, |
9106 | "vect_is_simple_use: operand "); | |
9107 | dump_generic_expr (MSG_NOTE, TDF_SLIM, operand); | |
e645e942 | 9108 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a | 9109 | } |
b8698a0f | 9110 | |
b758f602 | 9111 | if (CONSTANT_CLASS_P (operand)) |
ebfd146a IR |
9112 | { |
9113 | *dt = vect_constant_def; | |
9114 | return true; | |
9115 | } | |
b8698a0f | 9116 | |
ebfd146a IR |
9117 | if (is_gimple_min_invariant (operand)) |
9118 | { | |
8644a673 | 9119 | *dt = vect_external_def; |
ebfd146a IR |
9120 | return true; |
9121 | } | |
9122 | ||
ebfd146a IR |
9123 | if (TREE_CODE (operand) != SSA_NAME) |
9124 | { | |
73fbfcad | 9125 | if (dump_enabled_p ()) |
af29617a AH |
9126 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
9127 | "not ssa-name.\n"); | |
ebfd146a IR |
9128 | return false; |
9129 | } | |
b8698a0f | 9130 | |
3fc356dc | 9131 | if (SSA_NAME_IS_DEFAULT_DEF (operand)) |
ebfd146a | 9132 | { |
3fc356dc RB |
9133 | *dt = vect_external_def; |
9134 | return true; | |
ebfd146a IR |
9135 | } |
9136 | ||
3fc356dc | 9137 | *def_stmt = SSA_NAME_DEF_STMT (operand); |
73fbfcad | 9138 | if (dump_enabled_p ()) |
ebfd146a | 9139 | { |
78c60e3d SS |
9140 | dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: "); |
9141 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0); | |
ebfd146a IR |
9142 | } |
9143 | ||
61d371eb | 9144 | if (! vect_stmt_in_region_p (vinfo, *def_stmt)) |
8644a673 | 9145 | *dt = vect_external_def; |
ebfd146a IR |
9146 | else |
9147 | { | |
3fc356dc | 9148 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt); |
603cca93 | 9149 | *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo); |
ebfd146a IR |
9150 | } |
9151 | ||
2e8ab70c RB |
9152 | if (dump_enabled_p ()) |
9153 | { | |
9154 | dump_printf_loc (MSG_NOTE, vect_location, "type of def: "); | |
9155 | switch (*dt) | |
9156 | { | |
9157 | case vect_uninitialized_def: | |
9158 | dump_printf (MSG_NOTE, "uninitialized\n"); | |
9159 | break; | |
9160 | case vect_constant_def: | |
9161 | dump_printf (MSG_NOTE, "constant\n"); | |
9162 | break; | |
9163 | case vect_external_def: | |
9164 | dump_printf (MSG_NOTE, "external\n"); | |
9165 | break; | |
9166 | case vect_internal_def: | |
9167 | dump_printf (MSG_NOTE, "internal\n"); | |
9168 | break; | |
9169 | case vect_induction_def: | |
9170 | dump_printf (MSG_NOTE, "induction\n"); | |
9171 | break; | |
9172 | case vect_reduction_def: | |
9173 | dump_printf (MSG_NOTE, "reduction\n"); | |
9174 | break; | |
9175 | case vect_double_reduction_def: | |
9176 | dump_printf (MSG_NOTE, "double reduction\n"); | |
9177 | break; | |
9178 | case vect_nested_cycle: | |
9179 | dump_printf (MSG_NOTE, "nested cycle\n"); | |
9180 | break; | |
9181 | case vect_unknown_def_type: | |
9182 | dump_printf (MSG_NOTE, "unknown\n"); | |
9183 | break; | |
9184 | } | |
9185 | } | |
9186 | ||
81c40241 | 9187 | if (*dt == vect_unknown_def_type) |
ebfd146a | 9188 | { |
73fbfcad | 9189 | if (dump_enabled_p ()) |
78c60e3d | 9190 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 9191 | "Unsupported pattern.\n"); |
ebfd146a IR |
9192 | return false; |
9193 | } | |
9194 | ||
ebfd146a IR |
9195 | switch (gimple_code (*def_stmt)) |
9196 | { | |
9197 | case GIMPLE_PHI: | |
ebfd146a | 9198 | case GIMPLE_ASSIGN: |
ebfd146a | 9199 | case GIMPLE_CALL: |
81c40241 | 9200 | break; |
ebfd146a | 9201 | default: |
73fbfcad | 9202 | if (dump_enabled_p ()) |
78c60e3d | 9203 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 9204 | "unsupported defining stmt:\n"); |
ebfd146a IR |
9205 | return false; |
9206 | } | |
9207 | ||
9208 | return true; | |
9209 | } | |
9210 | ||
81c40241 | 9211 | /* Function vect_is_simple_use. |
b690cc0f | 9212 | |
81c40241 | 9213 | Same as vect_is_simple_use but also determines the vector operand |
b690cc0f RG |
9214 | type of OPERAND and stores it to *VECTYPE. If the definition of |
9215 | OPERAND is vect_uninitialized_def, vect_constant_def or | |
9216 | vect_external_def *VECTYPE will be set to NULL_TREE and the caller | |
9217 | is responsible to compute the best suited vector type for the | |
9218 | scalar operand. */ | |
9219 | ||
9220 | bool | |
81c40241 RB |
9221 | vect_is_simple_use (tree operand, vec_info *vinfo, |
9222 | gimple **def_stmt, enum vect_def_type *dt, tree *vectype) | |
b690cc0f | 9223 | { |
81c40241 | 9224 | if (!vect_is_simple_use (operand, vinfo, def_stmt, dt)) |
b690cc0f RG |
9225 | return false; |
9226 | ||
9227 | /* Now get a vector type if the def is internal, otherwise supply | |
9228 | NULL_TREE and leave it up to the caller to figure out a proper | |
9229 | type for the use stmt. */ | |
9230 | if (*dt == vect_internal_def | |
9231 | || *dt == vect_induction_def | |
9232 | || *dt == vect_reduction_def | |
9233 | || *dt == vect_double_reduction_def | |
9234 | || *dt == vect_nested_cycle) | |
9235 | { | |
9236 | stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt); | |
83197f37 IR |
9237 | |
9238 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) | |
9239 | && !STMT_VINFO_RELEVANT (stmt_info) | |
9240 | && !STMT_VINFO_LIVE_P (stmt_info)) | |
b690cc0f | 9241 | stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)); |
83197f37 | 9242 | |
b690cc0f RG |
9243 | *vectype = STMT_VINFO_VECTYPE (stmt_info); |
9244 | gcc_assert (*vectype != NULL_TREE); | |
9245 | } | |
9246 | else if (*dt == vect_uninitialized_def | |
9247 | || *dt == vect_constant_def | |
9248 | || *dt == vect_external_def) | |
9249 | *vectype = NULL_TREE; | |
9250 | else | |
9251 | gcc_unreachable (); | |
9252 | ||
9253 | return true; | |
9254 | } | |
9255 | ||
ebfd146a IR |
9256 | |
9257 | /* Function supportable_widening_operation | |
9258 | ||
b8698a0f L |
9259 | Check whether an operation represented by the code CODE is a |
9260 | widening operation that is supported by the target platform in | |
b690cc0f RG |
9261 | vector form (i.e., when operating on arguments of type VECTYPE_IN |
9262 | producing a result of type VECTYPE_OUT). | |
b8698a0f | 9263 | |
ebfd146a IR |
9264 | Widening operations we currently support are NOP (CONVERT), FLOAT |
9265 | and WIDEN_MULT. This function checks if these operations are supported | |
9266 | by the target platform either directly (via vector tree-codes), or via | |
9267 | target builtins. | |
9268 | ||
9269 | Output: | |
b8698a0f L |
9270 | - CODE1 and CODE2 are codes of vector operations to be used when |
9271 | vectorizing the operation, if available. | |
ebfd146a IR |
9272 | - MULTI_STEP_CVT determines the number of required intermediate steps in |
9273 | case of multi-step conversion (like char->short->int - in that case | |
9274 | MULTI_STEP_CVT will be 1). | |
b8698a0f L |
9275 | - INTERM_TYPES contains the intermediate type required to perform the |
9276 | widening operation (short in the above example). */ | |
ebfd146a IR |
9277 | |
9278 | bool | |
355fe088 | 9279 | supportable_widening_operation (enum tree_code code, gimple *stmt, |
b690cc0f | 9280 | tree vectype_out, tree vectype_in, |
ebfd146a IR |
9281 | enum tree_code *code1, enum tree_code *code2, |
9282 | int *multi_step_cvt, | |
9771b263 | 9283 | vec<tree> *interm_types) |
ebfd146a IR |
9284 | { |
9285 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
9286 | loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4ef69dfc | 9287 | struct loop *vect_loop = NULL; |
ef4bddc2 | 9288 | machine_mode vec_mode; |
81f40b79 | 9289 | enum insn_code icode1, icode2; |
ebfd146a | 9290 | optab optab1, optab2; |
b690cc0f RG |
9291 | tree vectype = vectype_in; |
9292 | tree wide_vectype = vectype_out; | |
ebfd146a | 9293 | enum tree_code c1, c2; |
4a00c761 JJ |
9294 | int i; |
9295 | tree prev_type, intermediate_type; | |
ef4bddc2 | 9296 | machine_mode intermediate_mode, prev_mode; |
4a00c761 | 9297 | optab optab3, optab4; |
ebfd146a | 9298 | |
4a00c761 | 9299 | *multi_step_cvt = 0; |
4ef69dfc IR |
9300 | if (loop_info) |
9301 | vect_loop = LOOP_VINFO_LOOP (loop_info); | |
9302 | ||
ebfd146a IR |
9303 | switch (code) |
9304 | { | |
9305 | case WIDEN_MULT_EXPR: | |
6ae6116f RH |
9306 | /* The result of a vectorized widening operation usually requires |
9307 | two vectors (because the widened results do not fit into one vector). | |
9308 | The generated vector results would normally be expected to be | |
9309 | generated in the same order as in the original scalar computation, | |
9310 | i.e. if 8 results are generated in each vector iteration, they are | |
9311 | to be organized as follows: | |
9312 | vect1: [res1,res2,res3,res4], | |
9313 | vect2: [res5,res6,res7,res8]. | |
9314 | ||
9315 | However, in the special case that the result of the widening | |
9316 | operation is used in a reduction computation only, the order doesn't | |
9317 | matter (because when vectorizing a reduction we change the order of | |
9318 | the computation). Some targets can take advantage of this and | |
9319 | generate more efficient code. For example, targets like Altivec, | |
9320 | that support widen_mult using a sequence of {mult_even,mult_odd} | |
9321 | generate the following vectors: | |
9322 | vect1: [res1,res3,res5,res7], | |
9323 | vect2: [res2,res4,res6,res8]. | |
9324 | ||
9325 | When vectorizing outer-loops, we execute the inner-loop sequentially | |
9326 | (each vectorized inner-loop iteration contributes to VF outer-loop | |
9327 | iterations in parallel). We therefore don't allow to change the | |
9328 | order of the computation in the inner-loop during outer-loop | |
9329 | vectorization. */ | |
9330 | /* TODO: Another case in which order doesn't *really* matter is when we | |
9331 | widen and then contract again, e.g. (short)((int)x * y >> 8). | |
9332 | Normally, pack_trunc performs an even/odd permute, whereas the | |
9333 | repack from an even/odd expansion would be an interleave, which | |
9334 | would be significantly simpler for e.g. AVX2. */ | |
9335 | /* In any case, in order to avoid duplicating the code below, recurse | |
9336 | on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values | |
9337 | are properly set up for the caller. If we fail, we'll continue with | |
9338 | a VEC_WIDEN_MULT_LO/HI_EXPR check. */ | |
9339 | if (vect_loop | |
9340 | && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction | |
9341 | && !nested_in_vect_loop_p (vect_loop, stmt) | |
9342 | && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR, | |
9343 | stmt, vectype_out, vectype_in, | |
a86ec597 RH |
9344 | code1, code2, multi_step_cvt, |
9345 | interm_types)) | |
ebc047a2 CH |
9346 | { |
9347 | /* Elements in a vector with vect_used_by_reduction property cannot | |
9348 | be reordered if the use chain with this property does not have the | |
9349 | same operation. One such an example is s += a * b, where elements | |
9350 | in a and b cannot be reordered. Here we check if the vector defined | |
9351 | by STMT is only directly used in the reduction statement. */ | |
9352 | tree lhs = gimple_assign_lhs (stmt); | |
9353 | use_operand_p dummy; | |
355fe088 | 9354 | gimple *use_stmt; |
ebc047a2 CH |
9355 | stmt_vec_info use_stmt_info = NULL; |
9356 | if (single_imm_use (lhs, &dummy, &use_stmt) | |
9357 | && (use_stmt_info = vinfo_for_stmt (use_stmt)) | |
9358 | && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def) | |
9359 | return true; | |
9360 | } | |
4a00c761 JJ |
9361 | c1 = VEC_WIDEN_MULT_LO_EXPR; |
9362 | c2 = VEC_WIDEN_MULT_HI_EXPR; | |
ebfd146a IR |
9363 | break; |
9364 | ||
81c40241 RB |
9365 | case DOT_PROD_EXPR: |
9366 | c1 = DOT_PROD_EXPR; | |
9367 | c2 = DOT_PROD_EXPR; | |
9368 | break; | |
9369 | ||
9370 | case SAD_EXPR: | |
9371 | c1 = SAD_EXPR; | |
9372 | c2 = SAD_EXPR; | |
9373 | break; | |
9374 | ||
6ae6116f RH |
9375 | case VEC_WIDEN_MULT_EVEN_EXPR: |
9376 | /* Support the recursion induced just above. */ | |
9377 | c1 = VEC_WIDEN_MULT_EVEN_EXPR; | |
9378 | c2 = VEC_WIDEN_MULT_ODD_EXPR; | |
9379 | break; | |
9380 | ||
36ba4aae | 9381 | case WIDEN_LSHIFT_EXPR: |
4a00c761 JJ |
9382 | c1 = VEC_WIDEN_LSHIFT_LO_EXPR; |
9383 | c2 = VEC_WIDEN_LSHIFT_HI_EXPR; | |
36ba4aae IR |
9384 | break; |
9385 | ||
ebfd146a | 9386 | CASE_CONVERT: |
4a00c761 JJ |
9387 | c1 = VEC_UNPACK_LO_EXPR; |
9388 | c2 = VEC_UNPACK_HI_EXPR; | |
ebfd146a IR |
9389 | break; |
9390 | ||
9391 | case FLOAT_EXPR: | |
4a00c761 JJ |
9392 | c1 = VEC_UNPACK_FLOAT_LO_EXPR; |
9393 | c2 = VEC_UNPACK_FLOAT_HI_EXPR; | |
ebfd146a IR |
9394 | break; |
9395 | ||
9396 | case FIX_TRUNC_EXPR: | |
9397 | /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/ | |
9398 | VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for | |
9399 | computing the operation. */ | |
9400 | return false; | |
9401 | ||
9402 | default: | |
9403 | gcc_unreachable (); | |
9404 | } | |
9405 | ||
6ae6116f | 9406 | if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR) |
6b4db501 | 9407 | std::swap (c1, c2); |
4a00c761 | 9408 | |
ebfd146a IR |
9409 | if (code == FIX_TRUNC_EXPR) |
9410 | { | |
9411 | /* The signedness is determined from output operand. */ | |
b690cc0f RG |
9412 | optab1 = optab_for_tree_code (c1, vectype_out, optab_default); |
9413 | optab2 = optab_for_tree_code (c2, vectype_out, optab_default); | |
ebfd146a IR |
9414 | } |
9415 | else | |
9416 | { | |
9417 | optab1 = optab_for_tree_code (c1, vectype, optab_default); | |
9418 | optab2 = optab_for_tree_code (c2, vectype, optab_default); | |
9419 | } | |
9420 | ||
9421 | if (!optab1 || !optab2) | |
9422 | return false; | |
9423 | ||
9424 | vec_mode = TYPE_MODE (vectype); | |
947131ba RS |
9425 | if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing |
9426 | || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing) | |
ebfd146a IR |
9427 | return false; |
9428 | ||
4a00c761 JJ |
9429 | *code1 = c1; |
9430 | *code2 = c2; | |
9431 | ||
9432 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) | |
9433 | && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype)) | |
5e8d6dff IE |
9434 | /* For scalar masks we may have different boolean |
9435 | vector types having the same QImode. Thus we | |
9436 | add additional check for elements number. */ | |
9437 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
9438 | || (TYPE_VECTOR_SUBPARTS (vectype) / 2 | |
9439 | == TYPE_VECTOR_SUBPARTS (wide_vectype))); | |
4a00c761 | 9440 | |
b8698a0f | 9441 | /* Check if it's a multi-step conversion that can be done using intermediate |
ebfd146a | 9442 | types. */ |
ebfd146a | 9443 | |
4a00c761 JJ |
9444 | prev_type = vectype; |
9445 | prev_mode = vec_mode; | |
b8698a0f | 9446 | |
4a00c761 JJ |
9447 | if (!CONVERT_EXPR_CODE_P (code)) |
9448 | return false; | |
b8698a0f | 9449 | |
4a00c761 JJ |
9450 | /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS |
9451 | intermediate steps in promotion sequence. We try | |
9452 | MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do | |
9453 | not. */ | |
9771b263 | 9454 | interm_types->create (MAX_INTERM_CVT_STEPS); |
4a00c761 JJ |
9455 | for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) |
9456 | { | |
9457 | intermediate_mode = insn_data[icode1].operand[0].mode; | |
3ae0661a IE |
9458 | if (VECTOR_BOOLEAN_TYPE_P (prev_type)) |
9459 | { | |
9460 | intermediate_type | |
9461 | = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) / 2, | |
9462 | current_vector_size); | |
9463 | if (intermediate_mode != TYPE_MODE (intermediate_type)) | |
9464 | return false; | |
9465 | } | |
9466 | else | |
9467 | intermediate_type | |
9468 | = lang_hooks.types.type_for_mode (intermediate_mode, | |
9469 | TYPE_UNSIGNED (prev_type)); | |
9470 | ||
4a00c761 JJ |
9471 | optab3 = optab_for_tree_code (c1, intermediate_type, optab_default); |
9472 | optab4 = optab_for_tree_code (c2, intermediate_type, optab_default); | |
9473 | ||
9474 | if (!optab3 || !optab4 | |
9475 | || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing | |
9476 | || insn_data[icode1].operand[0].mode != intermediate_mode | |
9477 | || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing | |
9478 | || insn_data[icode2].operand[0].mode != intermediate_mode | |
9479 | || ((icode1 = optab_handler (optab3, intermediate_mode)) | |
9480 | == CODE_FOR_nothing) | |
9481 | || ((icode2 = optab_handler (optab4, intermediate_mode)) | |
9482 | == CODE_FOR_nothing)) | |
9483 | break; | |
ebfd146a | 9484 | |
9771b263 | 9485 | interm_types->quick_push (intermediate_type); |
4a00c761 JJ |
9486 | (*multi_step_cvt)++; |
9487 | ||
9488 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) | |
9489 | && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype)) | |
5e8d6dff IE |
9490 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) |
9491 | || (TYPE_VECTOR_SUBPARTS (intermediate_type) / 2 | |
9492 | == TYPE_VECTOR_SUBPARTS (wide_vectype))); | |
4a00c761 JJ |
9493 | |
9494 | prev_type = intermediate_type; | |
9495 | prev_mode = intermediate_mode; | |
ebfd146a IR |
9496 | } |
9497 | ||
9771b263 | 9498 | interm_types->release (); |
4a00c761 | 9499 | return false; |
ebfd146a IR |
9500 | } |
9501 | ||
9502 | ||
9503 | /* Function supportable_narrowing_operation | |
9504 | ||
b8698a0f L |
9505 | Check whether an operation represented by the code CODE is a |
9506 | narrowing operation that is supported by the target platform in | |
b690cc0f RG |
9507 | vector form (i.e., when operating on arguments of type VECTYPE_IN |
9508 | and producing a result of type VECTYPE_OUT). | |
b8698a0f | 9509 | |
ebfd146a | 9510 | Narrowing operations we currently support are NOP (CONVERT) and |
ff802fa1 | 9511 | FIX_TRUNC. This function checks if these operations are supported by |
ebfd146a IR |
9512 | the target platform directly via vector tree-codes. |
9513 | ||
9514 | Output: | |
b8698a0f L |
9515 | - CODE1 is the code of a vector operation to be used when |
9516 | vectorizing the operation, if available. | |
ebfd146a IR |
9517 | - MULTI_STEP_CVT determines the number of required intermediate steps in |
9518 | case of multi-step conversion (like int->short->char - in that case | |
9519 | MULTI_STEP_CVT will be 1). | |
9520 | - INTERM_TYPES contains the intermediate type required to perform the | |
b8698a0f | 9521 | narrowing operation (short in the above example). */ |
ebfd146a IR |
9522 | |
9523 | bool | |
9524 | supportable_narrowing_operation (enum tree_code code, | |
b690cc0f | 9525 | tree vectype_out, tree vectype_in, |
ebfd146a | 9526 | enum tree_code *code1, int *multi_step_cvt, |
9771b263 | 9527 | vec<tree> *interm_types) |
ebfd146a | 9528 | { |
ef4bddc2 | 9529 | machine_mode vec_mode; |
ebfd146a IR |
9530 | enum insn_code icode1; |
9531 | optab optab1, interm_optab; | |
b690cc0f RG |
9532 | tree vectype = vectype_in; |
9533 | tree narrow_vectype = vectype_out; | |
ebfd146a | 9534 | enum tree_code c1; |
3ae0661a | 9535 | tree intermediate_type, prev_type; |
ef4bddc2 | 9536 | machine_mode intermediate_mode, prev_mode; |
ebfd146a | 9537 | int i; |
4a00c761 | 9538 | bool uns; |
ebfd146a | 9539 | |
4a00c761 | 9540 | *multi_step_cvt = 0; |
ebfd146a IR |
9541 | switch (code) |
9542 | { | |
9543 | CASE_CONVERT: | |
9544 | c1 = VEC_PACK_TRUNC_EXPR; | |
9545 | break; | |
9546 | ||
9547 | case FIX_TRUNC_EXPR: | |
9548 | c1 = VEC_PACK_FIX_TRUNC_EXPR; | |
9549 | break; | |
9550 | ||
9551 | case FLOAT_EXPR: | |
9552 | /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR | |
9553 | tree code and optabs used for computing the operation. */ | |
9554 | return false; | |
9555 | ||
9556 | default: | |
9557 | gcc_unreachable (); | |
9558 | } | |
9559 | ||
9560 | if (code == FIX_TRUNC_EXPR) | |
9561 | /* The signedness is determined from output operand. */ | |
b690cc0f | 9562 | optab1 = optab_for_tree_code (c1, vectype_out, optab_default); |
ebfd146a IR |
9563 | else |
9564 | optab1 = optab_for_tree_code (c1, vectype, optab_default); | |
9565 | ||
9566 | if (!optab1) | |
9567 | return false; | |
9568 | ||
9569 | vec_mode = TYPE_MODE (vectype); | |
947131ba | 9570 | if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing) |
ebfd146a IR |
9571 | return false; |
9572 | ||
4a00c761 JJ |
9573 | *code1 = c1; |
9574 | ||
9575 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) | |
5e8d6dff IE |
9576 | /* For scalar masks we may have different boolean |
9577 | vector types having the same QImode. Thus we | |
9578 | add additional check for elements number. */ | |
9579 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
9580 | || (TYPE_VECTOR_SUBPARTS (vectype) * 2 | |
9581 | == TYPE_VECTOR_SUBPARTS (narrow_vectype))); | |
4a00c761 | 9582 | |
ebfd146a IR |
9583 | /* Check if it's a multi-step conversion that can be done using intermediate |
9584 | types. */ | |
4a00c761 | 9585 | prev_mode = vec_mode; |
3ae0661a | 9586 | prev_type = vectype; |
4a00c761 JJ |
9587 | if (code == FIX_TRUNC_EXPR) |
9588 | uns = TYPE_UNSIGNED (vectype_out); | |
9589 | else | |
9590 | uns = TYPE_UNSIGNED (vectype); | |
9591 | ||
9592 | /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer | |
9593 | conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more | |
9594 | costly than signed. */ | |
9595 | if (code == FIX_TRUNC_EXPR && uns) | |
9596 | { | |
9597 | enum insn_code icode2; | |
9598 | ||
9599 | intermediate_type | |
9600 | = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0); | |
9601 | interm_optab | |
9602 | = optab_for_tree_code (c1, intermediate_type, optab_default); | |
2225b9f2 | 9603 | if (interm_optab != unknown_optab |
4a00c761 JJ |
9604 | && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing |
9605 | && insn_data[icode1].operand[0].mode | |
9606 | == insn_data[icode2].operand[0].mode) | |
9607 | { | |
9608 | uns = false; | |
9609 | optab1 = interm_optab; | |
9610 | icode1 = icode2; | |
9611 | } | |
9612 | } | |
ebfd146a | 9613 | |
4a00c761 JJ |
9614 | /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS |
9615 | intermediate steps in promotion sequence. We try | |
9616 | MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */ | |
9771b263 | 9617 | interm_types->create (MAX_INTERM_CVT_STEPS); |
4a00c761 JJ |
9618 | for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) |
9619 | { | |
9620 | intermediate_mode = insn_data[icode1].operand[0].mode; | |
3ae0661a IE |
9621 | if (VECTOR_BOOLEAN_TYPE_P (prev_type)) |
9622 | { | |
9623 | intermediate_type | |
9624 | = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2, | |
9625 | current_vector_size); | |
9626 | if (intermediate_mode != TYPE_MODE (intermediate_type)) | |
9627 | return false; | |
9628 | } | |
9629 | else | |
9630 | intermediate_type | |
9631 | = lang_hooks.types.type_for_mode (intermediate_mode, uns); | |
4a00c761 JJ |
9632 | interm_optab |
9633 | = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type, | |
9634 | optab_default); | |
9635 | if (!interm_optab | |
9636 | || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing) | |
9637 | || insn_data[icode1].operand[0].mode != intermediate_mode | |
9638 | || ((icode1 = optab_handler (interm_optab, intermediate_mode)) | |
9639 | == CODE_FOR_nothing)) | |
9640 | break; | |
9641 | ||
9771b263 | 9642 | interm_types->quick_push (intermediate_type); |
4a00c761 JJ |
9643 | (*multi_step_cvt)++; |
9644 | ||
9645 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) | |
5e8d6dff IE |
9646 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) |
9647 | || (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2 | |
9648 | == TYPE_VECTOR_SUBPARTS (narrow_vectype))); | |
4a00c761 JJ |
9649 | |
9650 | prev_mode = intermediate_mode; | |
3ae0661a | 9651 | prev_type = intermediate_type; |
4a00c761 | 9652 | optab1 = interm_optab; |
ebfd146a IR |
9653 | } |
9654 | ||
9771b263 | 9655 | interm_types->release (); |
4a00c761 | 9656 | return false; |
ebfd146a | 9657 | } |