]>
Commit | Line | Data |
---|---|---|
ebfd146a | 1 | /* Statement Analysis and Transformation for Vectorization |
cbe34bb5 | 2 | Copyright (C) 2003-2017 Free Software Foundation, Inc. |
b8698a0f | 3 | Contributed by Dorit Naishlos <dorit@il.ibm.com> |
ebfd146a IR |
4 | and Ira Rosen <irar@il.ibm.com> |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 3, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
c7131fb2 | 25 | #include "backend.h" |
957060b5 AM |
26 | #include "target.h" |
27 | #include "rtl.h" | |
ebfd146a | 28 | #include "tree.h" |
c7131fb2 | 29 | #include "gimple.h" |
c7131fb2 | 30 | #include "ssa.h" |
957060b5 AM |
31 | #include "optabs-tree.h" |
32 | #include "insn-config.h" | |
33 | #include "recog.h" /* FIXME: for insn_data */ | |
34 | #include "cgraph.h" | |
957060b5 | 35 | #include "dumpfile.h" |
c7131fb2 | 36 | #include "alias.h" |
40e23961 | 37 | #include "fold-const.h" |
d8a2d370 | 38 | #include "stor-layout.h" |
2fb9a547 | 39 | #include "tree-eh.h" |
45b0be94 | 40 | #include "gimplify.h" |
5be5c238 | 41 | #include "gimple-iterator.h" |
18f429e2 | 42 | #include "gimplify-me.h" |
442b4905 | 43 | #include "tree-cfg.h" |
e28030cf | 44 | #include "tree-ssa-loop-manip.h" |
ebfd146a | 45 | #include "cfgloop.h" |
0136f8f0 AH |
46 | #include "tree-ssa-loop.h" |
47 | #include "tree-scalar-evolution.h" | |
ebfd146a | 48 | #include "tree-vectorizer.h" |
9b2b7279 | 49 | #include "builtins.h" |
70439f0d | 50 | #include "internal-fn.h" |
ebfd146a | 51 | |
7ee2468b SB |
52 | /* For lang_hooks.types.type_for_mode. */ |
53 | #include "langhooks.h" | |
ebfd146a | 54 | |
2de001ee RS |
55 | /* Says whether a statement is a load, a store of a vectorized statement |
56 | result, or a store of an invariant value. */ | |
57 | enum vec_load_store_type { | |
58 | VLS_LOAD, | |
59 | VLS_STORE, | |
60 | VLS_STORE_INVARIANT | |
61 | }; | |
62 | ||
c3e7ee41 BS |
63 | /* Return the vectorized type for the given statement. */ |
64 | ||
65 | tree | |
66 | stmt_vectype (struct _stmt_vec_info *stmt_info) | |
67 | { | |
68 | return STMT_VINFO_VECTYPE (stmt_info); | |
69 | } | |
70 | ||
71 | /* Return TRUE iff the given statement is in an inner loop relative to | |
72 | the loop being vectorized. */ | |
73 | bool | |
74 | stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info) | |
75 | { | |
355fe088 | 76 | gimple *stmt = STMT_VINFO_STMT (stmt_info); |
c3e7ee41 BS |
77 | basic_block bb = gimple_bb (stmt); |
78 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
79 | struct loop* loop; | |
80 | ||
81 | if (!loop_vinfo) | |
82 | return false; | |
83 | ||
84 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
85 | ||
86 | return (bb->loop_father == loop->inner); | |
87 | } | |
88 | ||
89 | /* Record the cost of a statement, either by directly informing the | |
90 | target model or by saving it in a vector for later processing. | |
91 | Return a preliminary estimate of the statement's cost. */ | |
92 | ||
93 | unsigned | |
92345349 | 94 | record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count, |
c3e7ee41 | 95 | enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, |
92345349 | 96 | int misalign, enum vect_cost_model_location where) |
c3e7ee41 | 97 | { |
92345349 | 98 | if (body_cost_vec) |
c3e7ee41 | 99 | { |
92345349 | 100 | tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; |
ddf56386 RB |
101 | stmt_info_for_cost si = { count, kind, |
102 | stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL, | |
103 | misalign }; | |
104 | body_cost_vec->safe_push (si); | |
c3e7ee41 | 105 | return (unsigned) |
92345349 | 106 | (builtin_vectorization_cost (kind, vectype, misalign) * count); |
c3e7ee41 BS |
107 | } |
108 | else | |
310213d4 RB |
109 | return add_stmt_cost (stmt_info->vinfo->target_cost_data, |
110 | count, kind, stmt_info, misalign, where); | |
c3e7ee41 BS |
111 | } |
112 | ||
272c6793 RS |
113 | /* Return a variable of type ELEM_TYPE[NELEMS]. */ |
114 | ||
115 | static tree | |
116 | create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems) | |
117 | { | |
118 | return create_tmp_var (build_array_type_nelts (elem_type, nelems), | |
119 | "vect_array"); | |
120 | } | |
121 | ||
122 | /* ARRAY is an array of vectors created by create_vector_array. | |
123 | Return an SSA_NAME for the vector in index N. The reference | |
124 | is part of the vectorization of STMT and the vector is associated | |
125 | with scalar destination SCALAR_DEST. */ | |
126 | ||
127 | static tree | |
355fe088 | 128 | read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest, |
272c6793 RS |
129 | tree array, unsigned HOST_WIDE_INT n) |
130 | { | |
131 | tree vect_type, vect, vect_name, array_ref; | |
355fe088 | 132 | gimple *new_stmt; |
272c6793 RS |
133 | |
134 | gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE); | |
135 | vect_type = TREE_TYPE (TREE_TYPE (array)); | |
136 | vect = vect_create_destination_var (scalar_dest, vect_type); | |
137 | array_ref = build4 (ARRAY_REF, vect_type, array, | |
138 | build_int_cst (size_type_node, n), | |
139 | NULL_TREE, NULL_TREE); | |
140 | ||
141 | new_stmt = gimple_build_assign (vect, array_ref); | |
142 | vect_name = make_ssa_name (vect, new_stmt); | |
143 | gimple_assign_set_lhs (new_stmt, vect_name); | |
144 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
272c6793 RS |
145 | |
146 | return vect_name; | |
147 | } | |
148 | ||
149 | /* ARRAY is an array of vectors created by create_vector_array. | |
150 | Emit code to store SSA_NAME VECT in index N of the array. | |
151 | The store is part of the vectorization of STMT. */ | |
152 | ||
153 | static void | |
355fe088 | 154 | write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect, |
272c6793 RS |
155 | tree array, unsigned HOST_WIDE_INT n) |
156 | { | |
157 | tree array_ref; | |
355fe088 | 158 | gimple *new_stmt; |
272c6793 RS |
159 | |
160 | array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array, | |
161 | build_int_cst (size_type_node, n), | |
162 | NULL_TREE, NULL_TREE); | |
163 | ||
164 | new_stmt = gimple_build_assign (array_ref, vect); | |
165 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
272c6793 RS |
166 | } |
167 | ||
168 | /* PTR is a pointer to an array of type TYPE. Return a representation | |
169 | of *PTR. The memory reference replaces those in FIRST_DR | |
170 | (and its group). */ | |
171 | ||
172 | static tree | |
44fc7854 | 173 | create_array_ref (tree type, tree ptr, tree alias_ptr_type) |
272c6793 | 174 | { |
44fc7854 | 175 | tree mem_ref; |
272c6793 | 176 | |
272c6793 RS |
177 | mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0)); |
178 | /* Arrays have the same alignment as their type. */ | |
644ffefd | 179 | set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0); |
272c6793 RS |
180 | return mem_ref; |
181 | } | |
182 | ||
ebfd146a IR |
183 | /* Utility functions used by vect_mark_stmts_to_be_vectorized. */ |
184 | ||
185 | /* Function vect_mark_relevant. | |
186 | ||
187 | Mark STMT as "relevant for vectorization" and add it to WORKLIST. */ | |
188 | ||
189 | static void | |
355fe088 | 190 | vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt, |
97ecdb46 | 191 | enum vect_relevant relevant, bool live_p) |
ebfd146a IR |
192 | { |
193 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
194 | enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info); | |
195 | bool save_live_p = STMT_VINFO_LIVE_P (stmt_info); | |
355fe088 | 196 | gimple *pattern_stmt; |
ebfd146a | 197 | |
73fbfcad | 198 | if (dump_enabled_p ()) |
66c16fd9 RB |
199 | { |
200 | dump_printf_loc (MSG_NOTE, vect_location, | |
201 | "mark relevant %d, live %d: ", relevant, live_p); | |
202 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
203 | } | |
ebfd146a | 204 | |
83197f37 IR |
205 | /* If this stmt is an original stmt in a pattern, we might need to mark its |
206 | related pattern stmt instead of the original stmt. However, such stmts | |
207 | may have their own uses that are not in any pattern, in such cases the | |
208 | stmt itself should be marked. */ | |
ebfd146a IR |
209 | if (STMT_VINFO_IN_PATTERN_P (stmt_info)) |
210 | { | |
97ecdb46 JJ |
211 | /* This is the last stmt in a sequence that was detected as a |
212 | pattern that can potentially be vectorized. Don't mark the stmt | |
213 | as relevant/live because it's not going to be vectorized. | |
214 | Instead mark the pattern-stmt that replaces it. */ | |
83197f37 | 215 | |
97ecdb46 JJ |
216 | pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info); |
217 | ||
218 | if (dump_enabled_p ()) | |
219 | dump_printf_loc (MSG_NOTE, vect_location, | |
220 | "last stmt in pattern. don't mark" | |
221 | " relevant/live.\n"); | |
222 | stmt_info = vinfo_for_stmt (pattern_stmt); | |
223 | gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt); | |
224 | save_relevant = STMT_VINFO_RELEVANT (stmt_info); | |
225 | save_live_p = STMT_VINFO_LIVE_P (stmt_info); | |
226 | stmt = pattern_stmt; | |
ebfd146a IR |
227 | } |
228 | ||
229 | STMT_VINFO_LIVE_P (stmt_info) |= live_p; | |
230 | if (relevant > STMT_VINFO_RELEVANT (stmt_info)) | |
231 | STMT_VINFO_RELEVANT (stmt_info) = relevant; | |
232 | ||
233 | if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant | |
234 | && STMT_VINFO_LIVE_P (stmt_info) == save_live_p) | |
235 | { | |
73fbfcad | 236 | if (dump_enabled_p ()) |
78c60e3d | 237 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 238 | "already marked relevant/live.\n"); |
ebfd146a IR |
239 | return; |
240 | } | |
241 | ||
9771b263 | 242 | worklist->safe_push (stmt); |
ebfd146a IR |
243 | } |
244 | ||
245 | ||
b28ead45 AH |
246 | /* Function is_simple_and_all_uses_invariant |
247 | ||
248 | Return true if STMT is simple and all uses of it are invariant. */ | |
249 | ||
250 | bool | |
251 | is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo) | |
252 | { | |
253 | tree op; | |
254 | gimple *def_stmt; | |
255 | ssa_op_iter iter; | |
256 | ||
257 | if (!is_gimple_assign (stmt)) | |
258 | return false; | |
259 | ||
260 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE) | |
261 | { | |
262 | enum vect_def_type dt = vect_uninitialized_def; | |
263 | ||
264 | if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt)) | |
265 | { | |
266 | if (dump_enabled_p ()) | |
267 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
268 | "use not simple.\n"); | |
269 | return false; | |
270 | } | |
271 | ||
272 | if (dt != vect_external_def && dt != vect_constant_def) | |
273 | return false; | |
274 | } | |
275 | return true; | |
276 | } | |
277 | ||
ebfd146a IR |
278 | /* Function vect_stmt_relevant_p. |
279 | ||
280 | Return true if STMT in loop that is represented by LOOP_VINFO is | |
281 | "relevant for vectorization". | |
282 | ||
283 | A stmt is considered "relevant for vectorization" if: | |
284 | - it has uses outside the loop. | |
285 | - it has vdefs (it alters memory). | |
286 | - control stmts in the loop (except for the exit condition). | |
287 | ||
288 | CHECKME: what other side effects would the vectorizer allow? */ | |
289 | ||
290 | static bool | |
355fe088 | 291 | vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo, |
ebfd146a IR |
292 | enum vect_relevant *relevant, bool *live_p) |
293 | { | |
294 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
295 | ssa_op_iter op_iter; | |
296 | imm_use_iterator imm_iter; | |
297 | use_operand_p use_p; | |
298 | def_operand_p def_p; | |
299 | ||
8644a673 | 300 | *relevant = vect_unused_in_scope; |
ebfd146a IR |
301 | *live_p = false; |
302 | ||
303 | /* cond stmt other than loop exit cond. */ | |
b8698a0f L |
304 | if (is_ctrl_stmt (stmt) |
305 | && STMT_VINFO_TYPE (vinfo_for_stmt (stmt)) | |
306 | != loop_exit_ctrl_vec_info_type) | |
8644a673 | 307 | *relevant = vect_used_in_scope; |
ebfd146a IR |
308 | |
309 | /* changing memory. */ | |
310 | if (gimple_code (stmt) != GIMPLE_PHI) | |
ac6aeab4 RB |
311 | if (gimple_vdef (stmt) |
312 | && !gimple_clobber_p (stmt)) | |
ebfd146a | 313 | { |
73fbfcad | 314 | if (dump_enabled_p ()) |
78c60e3d | 315 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 316 | "vec_stmt_relevant_p: stmt has vdefs.\n"); |
8644a673 | 317 | *relevant = vect_used_in_scope; |
ebfd146a IR |
318 | } |
319 | ||
320 | /* uses outside the loop. */ | |
321 | FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF) | |
322 | { | |
323 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p)) | |
324 | { | |
325 | basic_block bb = gimple_bb (USE_STMT (use_p)); | |
326 | if (!flow_bb_inside_loop_p (loop, bb)) | |
327 | { | |
73fbfcad | 328 | if (dump_enabled_p ()) |
78c60e3d | 329 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 330 | "vec_stmt_relevant_p: used out of loop.\n"); |
ebfd146a | 331 | |
3157b0c2 AO |
332 | if (is_gimple_debug (USE_STMT (use_p))) |
333 | continue; | |
334 | ||
ebfd146a IR |
335 | /* We expect all such uses to be in the loop exit phis |
336 | (because of loop closed form) */ | |
337 | gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI); | |
338 | gcc_assert (bb == single_exit (loop)->dest); | |
339 | ||
340 | *live_p = true; | |
341 | } | |
342 | } | |
343 | } | |
344 | ||
3a2edf4c AH |
345 | if (*live_p && *relevant == vect_unused_in_scope |
346 | && !is_simple_and_all_uses_invariant (stmt, loop_vinfo)) | |
b28ead45 AH |
347 | { |
348 | if (dump_enabled_p ()) | |
349 | dump_printf_loc (MSG_NOTE, vect_location, | |
350 | "vec_stmt_relevant_p: stmt live but not relevant.\n"); | |
351 | *relevant = vect_used_only_live; | |
352 | } | |
353 | ||
ebfd146a IR |
354 | return (*live_p || *relevant); |
355 | } | |
356 | ||
357 | ||
b8698a0f | 358 | /* Function exist_non_indexing_operands_for_use_p |
ebfd146a | 359 | |
ff802fa1 | 360 | USE is one of the uses attached to STMT. Check if USE is |
ebfd146a IR |
361 | used in STMT for anything other than indexing an array. */ |
362 | ||
363 | static bool | |
355fe088 | 364 | exist_non_indexing_operands_for_use_p (tree use, gimple *stmt) |
ebfd146a IR |
365 | { |
366 | tree operand; | |
367 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
59a05b0c | 368 | |
ff802fa1 | 369 | /* USE corresponds to some operand in STMT. If there is no data |
ebfd146a IR |
370 | reference in STMT, then any operand that corresponds to USE |
371 | is not indexing an array. */ | |
372 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
373 | return true; | |
59a05b0c | 374 | |
ebfd146a IR |
375 | /* STMT has a data_ref. FORNOW this means that its of one of |
376 | the following forms: | |
377 | -1- ARRAY_REF = var | |
378 | -2- var = ARRAY_REF | |
379 | (This should have been verified in analyze_data_refs). | |
380 | ||
381 | 'var' in the second case corresponds to a def, not a use, | |
b8698a0f | 382 | so USE cannot correspond to any operands that are not used |
ebfd146a IR |
383 | for array indexing. |
384 | ||
385 | Therefore, all we need to check is if STMT falls into the | |
386 | first case, and whether var corresponds to USE. */ | |
ebfd146a IR |
387 | |
388 | if (!gimple_assign_copy_p (stmt)) | |
5ce9450f JJ |
389 | { |
390 | if (is_gimple_call (stmt) | |
391 | && gimple_call_internal_p (stmt)) | |
392 | switch (gimple_call_internal_fn (stmt)) | |
393 | { | |
394 | case IFN_MASK_STORE: | |
395 | operand = gimple_call_arg (stmt, 3); | |
396 | if (operand == use) | |
397 | return true; | |
398 | /* FALLTHRU */ | |
399 | case IFN_MASK_LOAD: | |
400 | operand = gimple_call_arg (stmt, 2); | |
401 | if (operand == use) | |
402 | return true; | |
403 | break; | |
404 | default: | |
405 | break; | |
406 | } | |
407 | return false; | |
408 | } | |
409 | ||
59a05b0c EB |
410 | if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME) |
411 | return false; | |
ebfd146a | 412 | operand = gimple_assign_rhs1 (stmt); |
ebfd146a IR |
413 | if (TREE_CODE (operand) != SSA_NAME) |
414 | return false; | |
415 | ||
416 | if (operand == use) | |
417 | return true; | |
418 | ||
419 | return false; | |
420 | } | |
421 | ||
422 | ||
b8698a0f | 423 | /* |
ebfd146a IR |
424 | Function process_use. |
425 | ||
426 | Inputs: | |
427 | - a USE in STMT in a loop represented by LOOP_VINFO | |
b28ead45 | 428 | - RELEVANT - enum value to be set in the STMT_VINFO of the stmt |
ff802fa1 | 429 | that defined USE. This is done by calling mark_relevant and passing it |
ebfd146a | 430 | the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant). |
aec7ae7d JJ |
431 | - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't |
432 | be performed. | |
ebfd146a IR |
433 | |
434 | Outputs: | |
435 | Generally, LIVE_P and RELEVANT are used to define the liveness and | |
436 | relevance info of the DEF_STMT of this USE: | |
437 | STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p | |
438 | STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant | |
439 | Exceptions: | |
440 | - case 1: If USE is used only for address computations (e.g. array indexing), | |
b8698a0f | 441 | which does not need to be directly vectorized, then the liveness/relevance |
ebfd146a | 442 | of the respective DEF_STMT is left unchanged. |
b8698a0f L |
443 | - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we |
444 | skip DEF_STMT cause it had already been processed. | |
ebfd146a IR |
445 | - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will |
446 | be modified accordingly. | |
447 | ||
448 | Return true if everything is as expected. Return false otherwise. */ | |
449 | ||
450 | static bool | |
b28ead45 | 451 | process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, |
355fe088 | 452 | enum vect_relevant relevant, vec<gimple *> *worklist, |
aec7ae7d | 453 | bool force) |
ebfd146a IR |
454 | { |
455 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
456 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); | |
457 | stmt_vec_info dstmt_vinfo; | |
458 | basic_block bb, def_bb; | |
355fe088 | 459 | gimple *def_stmt; |
ebfd146a IR |
460 | enum vect_def_type dt; |
461 | ||
b8698a0f | 462 | /* case 1: we are only interested in uses that need to be vectorized. Uses |
ebfd146a | 463 | that are used for address computation are not considered relevant. */ |
aec7ae7d | 464 | if (!force && !exist_non_indexing_operands_for_use_p (use, stmt)) |
ebfd146a IR |
465 | return true; |
466 | ||
81c40241 | 467 | if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt)) |
b8698a0f | 468 | { |
73fbfcad | 469 | if (dump_enabled_p ()) |
78c60e3d | 470 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 471 | "not vectorized: unsupported use in stmt.\n"); |
ebfd146a IR |
472 | return false; |
473 | } | |
474 | ||
475 | if (!def_stmt || gimple_nop_p (def_stmt)) | |
476 | return true; | |
477 | ||
478 | def_bb = gimple_bb (def_stmt); | |
479 | if (!flow_bb_inside_loop_p (loop, def_bb)) | |
480 | { | |
73fbfcad | 481 | if (dump_enabled_p ()) |
e645e942 | 482 | dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n"); |
ebfd146a IR |
483 | return true; |
484 | } | |
485 | ||
b8698a0f L |
486 | /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT). |
487 | DEF_STMT must have already been processed, because this should be the | |
488 | only way that STMT, which is a reduction-phi, was put in the worklist, | |
489 | as there should be no other uses for DEF_STMT in the loop. So we just | |
ebfd146a IR |
490 | check that everything is as expected, and we are done. */ |
491 | dstmt_vinfo = vinfo_for_stmt (def_stmt); | |
492 | bb = gimple_bb (stmt); | |
493 | if (gimple_code (stmt) == GIMPLE_PHI | |
494 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def | |
495 | && gimple_code (def_stmt) != GIMPLE_PHI | |
496 | && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def | |
497 | && bb->loop_father == def_bb->loop_father) | |
498 | { | |
73fbfcad | 499 | if (dump_enabled_p ()) |
78c60e3d | 500 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 501 | "reduc-stmt defining reduc-phi in the same nest.\n"); |
ebfd146a IR |
502 | if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo)) |
503 | dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo)); | |
504 | gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction); | |
b8698a0f | 505 | gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo) |
8644a673 | 506 | || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope); |
ebfd146a IR |
507 | return true; |
508 | } | |
509 | ||
510 | /* case 3a: outer-loop stmt defining an inner-loop stmt: | |
511 | outer-loop-header-bb: | |
512 | d = def_stmt | |
513 | inner-loop: | |
514 | stmt # use (d) | |
515 | outer-loop-tail-bb: | |
516 | ... */ | |
517 | if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father)) | |
518 | { | |
73fbfcad | 519 | if (dump_enabled_p ()) |
78c60e3d | 520 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 521 | "outer-loop def-stmt defining inner-loop stmt.\n"); |
7c5222ff | 522 | |
ebfd146a IR |
523 | switch (relevant) |
524 | { | |
8644a673 | 525 | case vect_unused_in_scope: |
7c5222ff IR |
526 | relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ? |
527 | vect_used_in_scope : vect_unused_in_scope; | |
ebfd146a | 528 | break; |
7c5222ff | 529 | |
ebfd146a | 530 | case vect_used_in_outer_by_reduction: |
7c5222ff | 531 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def); |
ebfd146a IR |
532 | relevant = vect_used_by_reduction; |
533 | break; | |
7c5222ff | 534 | |
ebfd146a | 535 | case vect_used_in_outer: |
7c5222ff | 536 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def); |
8644a673 | 537 | relevant = vect_used_in_scope; |
ebfd146a | 538 | break; |
7c5222ff | 539 | |
8644a673 | 540 | case vect_used_in_scope: |
ebfd146a IR |
541 | break; |
542 | ||
543 | default: | |
544 | gcc_unreachable (); | |
b8698a0f | 545 | } |
ebfd146a IR |
546 | } |
547 | ||
548 | /* case 3b: inner-loop stmt defining an outer-loop stmt: | |
549 | outer-loop-header-bb: | |
550 | ... | |
551 | inner-loop: | |
552 | d = def_stmt | |
06066f92 | 553 | outer-loop-tail-bb (or outer-loop-exit-bb in double reduction): |
ebfd146a IR |
554 | stmt # use (d) */ |
555 | else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father)) | |
556 | { | |
73fbfcad | 557 | if (dump_enabled_p ()) |
78c60e3d | 558 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 559 | "inner-loop def-stmt defining outer-loop stmt.\n"); |
7c5222ff | 560 | |
ebfd146a IR |
561 | switch (relevant) |
562 | { | |
8644a673 | 563 | case vect_unused_in_scope: |
b8698a0f | 564 | relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def |
06066f92 | 565 | || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ? |
a70d6342 | 566 | vect_used_in_outer_by_reduction : vect_unused_in_scope; |
ebfd146a IR |
567 | break; |
568 | ||
ebfd146a | 569 | case vect_used_by_reduction: |
b28ead45 | 570 | case vect_used_only_live: |
ebfd146a IR |
571 | relevant = vect_used_in_outer_by_reduction; |
572 | break; | |
573 | ||
8644a673 | 574 | case vect_used_in_scope: |
ebfd146a IR |
575 | relevant = vect_used_in_outer; |
576 | break; | |
577 | ||
578 | default: | |
579 | gcc_unreachable (); | |
580 | } | |
581 | } | |
643a9684 RB |
582 | /* We are also not interested in uses on loop PHI backedges that are |
583 | inductions. Otherwise we'll needlessly vectorize the IV increment | |
e294f495 RB |
584 | and cause hybrid SLP for SLP inductions. Unless the PHI is live |
585 | of course. */ | |
643a9684 RB |
586 | else if (gimple_code (stmt) == GIMPLE_PHI |
587 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def | |
e294f495 | 588 | && ! STMT_VINFO_LIVE_P (stmt_vinfo) |
643a9684 RB |
589 | && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father)) |
590 | == use)) | |
591 | { | |
592 | if (dump_enabled_p ()) | |
593 | dump_printf_loc (MSG_NOTE, vect_location, | |
594 | "induction value on backedge.\n"); | |
595 | return true; | |
596 | } | |
597 | ||
ebfd146a | 598 | |
b28ead45 | 599 | vect_mark_relevant (worklist, def_stmt, relevant, false); |
ebfd146a IR |
600 | return true; |
601 | } | |
602 | ||
603 | ||
604 | /* Function vect_mark_stmts_to_be_vectorized. | |
605 | ||
606 | Not all stmts in the loop need to be vectorized. For example: | |
607 | ||
608 | for i... | |
609 | for j... | |
610 | 1. T0 = i + j | |
611 | 2. T1 = a[T0] | |
612 | ||
613 | 3. j = j + 1 | |
614 | ||
615 | Stmt 1 and 3 do not need to be vectorized, because loop control and | |
616 | addressing of vectorized data-refs are handled differently. | |
617 | ||
618 | This pass detects such stmts. */ | |
619 | ||
620 | bool | |
621 | vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) | |
622 | { | |
ebfd146a IR |
623 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
624 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); | |
625 | unsigned int nbbs = loop->num_nodes; | |
626 | gimple_stmt_iterator si; | |
355fe088 | 627 | gimple *stmt; |
ebfd146a IR |
628 | unsigned int i; |
629 | stmt_vec_info stmt_vinfo; | |
630 | basic_block bb; | |
355fe088 | 631 | gimple *phi; |
ebfd146a | 632 | bool live_p; |
b28ead45 | 633 | enum vect_relevant relevant; |
ebfd146a | 634 | |
73fbfcad | 635 | if (dump_enabled_p ()) |
78c60e3d | 636 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 637 | "=== vect_mark_stmts_to_be_vectorized ===\n"); |
ebfd146a | 638 | |
355fe088 | 639 | auto_vec<gimple *, 64> worklist; |
ebfd146a IR |
640 | |
641 | /* 1. Init worklist. */ | |
642 | for (i = 0; i < nbbs; i++) | |
643 | { | |
644 | bb = bbs[i]; | |
645 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
b8698a0f | 646 | { |
ebfd146a | 647 | phi = gsi_stmt (si); |
73fbfcad | 648 | if (dump_enabled_p ()) |
ebfd146a | 649 | { |
78c60e3d SS |
650 | dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? "); |
651 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); | |
ebfd146a IR |
652 | } |
653 | ||
654 | if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p)) | |
97ecdb46 | 655 | vect_mark_relevant (&worklist, phi, relevant, live_p); |
ebfd146a IR |
656 | } |
657 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
658 | { | |
659 | stmt = gsi_stmt (si); | |
73fbfcad | 660 | if (dump_enabled_p ()) |
ebfd146a | 661 | { |
78c60e3d SS |
662 | dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? "); |
663 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
b8698a0f | 664 | } |
ebfd146a IR |
665 | |
666 | if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p)) | |
97ecdb46 | 667 | vect_mark_relevant (&worklist, stmt, relevant, live_p); |
ebfd146a IR |
668 | } |
669 | } | |
670 | ||
671 | /* 2. Process_worklist */ | |
9771b263 | 672 | while (worklist.length () > 0) |
ebfd146a IR |
673 | { |
674 | use_operand_p use_p; | |
675 | ssa_op_iter iter; | |
676 | ||
9771b263 | 677 | stmt = worklist.pop (); |
73fbfcad | 678 | if (dump_enabled_p ()) |
ebfd146a | 679 | { |
78c60e3d SS |
680 | dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: "); |
681 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
ebfd146a IR |
682 | } |
683 | ||
b8698a0f | 684 | /* Examine the USEs of STMT. For each USE, mark the stmt that defines it |
b28ead45 AH |
685 | (DEF_STMT) as relevant/irrelevant according to the relevance property |
686 | of STMT. */ | |
ebfd146a IR |
687 | stmt_vinfo = vinfo_for_stmt (stmt); |
688 | relevant = STMT_VINFO_RELEVANT (stmt_vinfo); | |
ebfd146a | 689 | |
b28ead45 AH |
690 | /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is |
691 | propagated as is to the DEF_STMTs of its USEs. | |
ebfd146a IR |
692 | |
693 | One exception is when STMT has been identified as defining a reduction | |
b28ead45 | 694 | variable; in this case we set the relevance to vect_used_by_reduction. |
ebfd146a | 695 | This is because we distinguish between two kinds of relevant stmts - |
b8698a0f | 696 | those that are used by a reduction computation, and those that are |
ff802fa1 | 697 | (also) used by a regular computation. This allows us later on to |
b8698a0f | 698 | identify stmts that are used solely by a reduction, and therefore the |
7c5222ff | 699 | order of the results that they produce does not have to be kept. */ |
ebfd146a | 700 | |
b28ead45 | 701 | switch (STMT_VINFO_DEF_TYPE (stmt_vinfo)) |
ebfd146a | 702 | { |
06066f92 | 703 | case vect_reduction_def: |
b28ead45 AH |
704 | gcc_assert (relevant != vect_unused_in_scope); |
705 | if (relevant != vect_unused_in_scope | |
706 | && relevant != vect_used_in_scope | |
707 | && relevant != vect_used_by_reduction | |
708 | && relevant != vect_used_only_live) | |
06066f92 | 709 | { |
b28ead45 AH |
710 | if (dump_enabled_p ()) |
711 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
712 | "unsupported use of reduction.\n"); | |
713 | return false; | |
06066f92 | 714 | } |
06066f92 | 715 | break; |
b8698a0f | 716 | |
06066f92 | 717 | case vect_nested_cycle: |
b28ead45 AH |
718 | if (relevant != vect_unused_in_scope |
719 | && relevant != vect_used_in_outer_by_reduction | |
720 | && relevant != vect_used_in_outer) | |
06066f92 | 721 | { |
73fbfcad | 722 | if (dump_enabled_p ()) |
78c60e3d | 723 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 724 | "unsupported use of nested cycle.\n"); |
7c5222ff | 725 | |
06066f92 IR |
726 | return false; |
727 | } | |
b8698a0f L |
728 | break; |
729 | ||
06066f92 | 730 | case vect_double_reduction_def: |
b28ead45 AH |
731 | if (relevant != vect_unused_in_scope |
732 | && relevant != vect_used_by_reduction | |
733 | && relevant != vect_used_only_live) | |
06066f92 | 734 | { |
73fbfcad | 735 | if (dump_enabled_p ()) |
78c60e3d | 736 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 737 | "unsupported use of double reduction.\n"); |
7c5222ff | 738 | |
7c5222ff | 739 | return false; |
06066f92 | 740 | } |
b8698a0f | 741 | break; |
7c5222ff | 742 | |
06066f92 IR |
743 | default: |
744 | break; | |
7c5222ff | 745 | } |
b8698a0f | 746 | |
aec7ae7d | 747 | if (is_pattern_stmt_p (stmt_vinfo)) |
9d5e7640 IR |
748 | { |
749 | /* Pattern statements are not inserted into the code, so | |
750 | FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we | |
751 | have to scan the RHS or function arguments instead. */ | |
752 | if (is_gimple_assign (stmt)) | |
753 | { | |
69d2aade JJ |
754 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
755 | tree op = gimple_assign_rhs1 (stmt); | |
756 | ||
757 | i = 1; | |
758 | if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op)) | |
759 | { | |
760 | if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo, | |
b28ead45 | 761 | relevant, &worklist, false) |
69d2aade | 762 | || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo, |
b28ead45 | 763 | relevant, &worklist, false)) |
566d377a | 764 | return false; |
69d2aade JJ |
765 | i = 2; |
766 | } | |
767 | for (; i < gimple_num_ops (stmt); i++) | |
9d5e7640 | 768 | { |
69d2aade | 769 | op = gimple_op (stmt, i); |
afbe6325 | 770 | if (TREE_CODE (op) == SSA_NAME |
b28ead45 | 771 | && !process_use (stmt, op, loop_vinfo, relevant, |
afbe6325 | 772 | &worklist, false)) |
07687835 | 773 | return false; |
9d5e7640 IR |
774 | } |
775 | } | |
776 | else if (is_gimple_call (stmt)) | |
777 | { | |
778 | for (i = 0; i < gimple_call_num_args (stmt); i++) | |
779 | { | |
780 | tree arg = gimple_call_arg (stmt, i); | |
b28ead45 | 781 | if (!process_use (stmt, arg, loop_vinfo, relevant, |
aec7ae7d | 782 | &worklist, false)) |
07687835 | 783 | return false; |
9d5e7640 IR |
784 | } |
785 | } | |
786 | } | |
787 | else | |
788 | FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE) | |
789 | { | |
790 | tree op = USE_FROM_PTR (use_p); | |
b28ead45 | 791 | if (!process_use (stmt, op, loop_vinfo, relevant, |
aec7ae7d | 792 | &worklist, false)) |
07687835 | 793 | return false; |
9d5e7640 | 794 | } |
aec7ae7d | 795 | |
3bab6342 | 796 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo)) |
aec7ae7d | 797 | { |
134c85ca RS |
798 | gather_scatter_info gs_info; |
799 | if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info)) | |
800 | gcc_unreachable (); | |
801 | if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant, | |
802 | &worklist, true)) | |
566d377a | 803 | return false; |
aec7ae7d | 804 | } |
ebfd146a IR |
805 | } /* while worklist */ |
806 | ||
ebfd146a IR |
807 | return true; |
808 | } | |
809 | ||
810 | ||
b8698a0f | 811 | /* Function vect_model_simple_cost. |
ebfd146a | 812 | |
b8698a0f | 813 | Models cost for simple operations, i.e. those that only emit ncopies of a |
ebfd146a IR |
814 | single op. Right now, this does not account for multiple insns that could |
815 | be generated for the single vector op. We will handle that shortly. */ | |
816 | ||
817 | void | |
b8698a0f | 818 | vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, |
92345349 | 819 | enum vect_def_type *dt, |
4fc5ebf1 | 820 | int ndts, |
92345349 BS |
821 | stmt_vector_for_cost *prologue_cost_vec, |
822 | stmt_vector_for_cost *body_cost_vec) | |
ebfd146a IR |
823 | { |
824 | int i; | |
92345349 | 825 | int inside_cost = 0, prologue_cost = 0; |
ebfd146a IR |
826 | |
827 | /* The SLP costs were already calculated during SLP tree build. */ | |
828 | if (PURE_SLP_STMT (stmt_info)) | |
829 | return; | |
830 | ||
4fc5ebf1 JG |
831 | /* Cost the "broadcast" of a scalar operand in to a vector operand. |
832 | Use scalar_to_vec to cost the broadcast, as elsewhere in the vector | |
833 | cost model. */ | |
834 | for (i = 0; i < ndts; i++) | |
92345349 | 835 | if (dt[i] == vect_constant_def || dt[i] == vect_external_def) |
4fc5ebf1 | 836 | prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec, |
92345349 | 837 | stmt_info, 0, vect_prologue); |
c3e7ee41 BS |
838 | |
839 | /* Pass the inside-of-loop statements to the target-specific cost model. */ | |
92345349 BS |
840 | inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt, |
841 | stmt_info, 0, vect_body); | |
c3e7ee41 | 842 | |
73fbfcad | 843 | if (dump_enabled_p ()) |
78c60e3d SS |
844 | dump_printf_loc (MSG_NOTE, vect_location, |
845 | "vect_model_simple_cost: inside_cost = %d, " | |
e645e942 | 846 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
ebfd146a IR |
847 | } |
848 | ||
849 | ||
8bd37302 BS |
850 | /* Model cost for type demotion and promotion operations. PWR is normally |
851 | zero for single-step promotions and demotions. It will be one if | |
852 | two-step promotion/demotion is required, and so on. Each additional | |
853 | step doubles the number of instructions required. */ | |
854 | ||
855 | static void | |
856 | vect_model_promotion_demotion_cost (stmt_vec_info stmt_info, | |
857 | enum vect_def_type *dt, int pwr) | |
858 | { | |
859 | int i, tmp; | |
92345349 | 860 | int inside_cost = 0, prologue_cost = 0; |
c3e7ee41 BS |
861 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
862 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
863 | void *target_cost_data; | |
8bd37302 BS |
864 | |
865 | /* The SLP costs were already calculated during SLP tree build. */ | |
866 | if (PURE_SLP_STMT (stmt_info)) | |
867 | return; | |
868 | ||
c3e7ee41 BS |
869 | if (loop_vinfo) |
870 | target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); | |
871 | else | |
872 | target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo); | |
873 | ||
8bd37302 BS |
874 | for (i = 0; i < pwr + 1; i++) |
875 | { | |
876 | tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ? | |
877 | (i + 1) : i; | |
c3e7ee41 | 878 | inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp), |
92345349 BS |
879 | vec_promote_demote, stmt_info, 0, |
880 | vect_body); | |
8bd37302 BS |
881 | } |
882 | ||
883 | /* FORNOW: Assuming maximum 2 args per stmts. */ | |
884 | for (i = 0; i < 2; i++) | |
92345349 BS |
885 | if (dt[i] == vect_constant_def || dt[i] == vect_external_def) |
886 | prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt, | |
887 | stmt_info, 0, vect_prologue); | |
8bd37302 | 888 | |
73fbfcad | 889 | if (dump_enabled_p ()) |
78c60e3d SS |
890 | dump_printf_loc (MSG_NOTE, vect_location, |
891 | "vect_model_promotion_demotion_cost: inside_cost = %d, " | |
e645e942 | 892 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
8bd37302 BS |
893 | } |
894 | ||
ebfd146a IR |
895 | /* Function vect_model_store_cost |
896 | ||
0d0293ac MM |
897 | Models cost for stores. In the case of grouped accesses, one access |
898 | has the overhead of the grouped access attributed to it. */ | |
ebfd146a IR |
899 | |
900 | void | |
b8698a0f | 901 | vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, |
2de001ee RS |
902 | vect_memory_access_type memory_access_type, |
903 | enum vect_def_type dt, slp_tree slp_node, | |
92345349 BS |
904 | stmt_vector_for_cost *prologue_cost_vec, |
905 | stmt_vector_for_cost *body_cost_vec) | |
ebfd146a | 906 | { |
92345349 | 907 | unsigned int inside_cost = 0, prologue_cost = 0; |
892a981f RS |
908 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
909 | gimple *first_stmt = STMT_VINFO_STMT (stmt_info); | |
910 | bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info); | |
ebfd146a | 911 | |
8644a673 | 912 | if (dt == vect_constant_def || dt == vect_external_def) |
92345349 BS |
913 | prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec, |
914 | stmt_info, 0, vect_prologue); | |
ebfd146a | 915 | |
892a981f RS |
916 | /* Grouped stores update all elements in the group at once, |
917 | so we want the DR for the first statement. */ | |
918 | if (!slp_node && grouped_access_p) | |
720f5239 | 919 | { |
892a981f RS |
920 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
921 | dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
720f5239 | 922 | } |
ebfd146a | 923 | |
892a981f RS |
924 | /* True if we should include any once-per-group costs as well as |
925 | the cost of the statement itself. For SLP we only get called | |
926 | once per group anyhow. */ | |
927 | bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info)); | |
928 | ||
272c6793 | 929 | /* We assume that the cost of a single store-lanes instruction is |
0d0293ac | 930 | equivalent to the cost of GROUP_SIZE separate stores. If a grouped |
272c6793 | 931 | access is instead being provided by a permute-and-store operation, |
2de001ee RS |
932 | include the cost of the permutes. */ |
933 | if (first_stmt_p | |
934 | && memory_access_type == VMAT_CONTIGUOUS_PERMUTE) | |
ebfd146a | 935 | { |
e1377713 ES |
936 | /* Uses a high and low interleave or shuffle operations for each |
937 | needed permute. */ | |
892a981f | 938 | int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
e1377713 | 939 | int nstmts = ncopies * ceil_log2 (group_size) * group_size; |
92345349 BS |
940 | inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm, |
941 | stmt_info, 0, vect_body); | |
ebfd146a | 942 | |
73fbfcad | 943 | if (dump_enabled_p ()) |
78c60e3d | 944 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 945 | "vect_model_store_cost: strided group_size = %d .\n", |
78c60e3d | 946 | group_size); |
ebfd146a IR |
947 | } |
948 | ||
cee62fee | 949 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
ebfd146a | 950 | /* Costs of the stores. */ |
067bc855 RB |
951 | if (memory_access_type == VMAT_ELEMENTWISE |
952 | || memory_access_type == VMAT_GATHER_SCATTER) | |
2de001ee RS |
953 | /* N scalar stores plus extracting the elements. */ |
954 | inside_cost += record_stmt_cost (body_cost_vec, | |
955 | ncopies * TYPE_VECTOR_SUBPARTS (vectype), | |
956 | scalar_store, stmt_info, 0, vect_body); | |
f2e2a985 | 957 | else |
892a981f | 958 | vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec); |
ebfd146a | 959 | |
2de001ee RS |
960 | if (memory_access_type == VMAT_ELEMENTWISE |
961 | || memory_access_type == VMAT_STRIDED_SLP) | |
cee62fee MM |
962 | inside_cost += record_stmt_cost (body_cost_vec, |
963 | ncopies * TYPE_VECTOR_SUBPARTS (vectype), | |
964 | vec_to_scalar, stmt_info, 0, vect_body); | |
965 | ||
73fbfcad | 966 | if (dump_enabled_p ()) |
78c60e3d SS |
967 | dump_printf_loc (MSG_NOTE, vect_location, |
968 | "vect_model_store_cost: inside_cost = %d, " | |
e645e942 | 969 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
ebfd146a IR |
970 | } |
971 | ||
972 | ||
720f5239 IR |
973 | /* Calculate cost of DR's memory access. */ |
974 | void | |
975 | vect_get_store_cost (struct data_reference *dr, int ncopies, | |
c3e7ee41 | 976 | unsigned int *inside_cost, |
92345349 | 977 | stmt_vector_for_cost *body_cost_vec) |
720f5239 IR |
978 | { |
979 | int alignment_support_scheme = vect_supportable_dr_alignment (dr, false); | |
355fe088 | 980 | gimple *stmt = DR_STMT (dr); |
c3e7ee41 | 981 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
720f5239 IR |
982 | |
983 | switch (alignment_support_scheme) | |
984 | { | |
985 | case dr_aligned: | |
986 | { | |
92345349 BS |
987 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
988 | vector_store, stmt_info, 0, | |
989 | vect_body); | |
720f5239 | 990 | |
73fbfcad | 991 | if (dump_enabled_p ()) |
78c60e3d | 992 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 993 | "vect_model_store_cost: aligned.\n"); |
720f5239 IR |
994 | break; |
995 | } | |
996 | ||
997 | case dr_unaligned_supported: | |
998 | { | |
720f5239 | 999 | /* Here, we assign an additional cost for the unaligned store. */ |
92345349 | 1000 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
c3e7ee41 | 1001 | unaligned_store, stmt_info, |
92345349 | 1002 | DR_MISALIGNMENT (dr), vect_body); |
73fbfcad | 1003 | if (dump_enabled_p ()) |
78c60e3d SS |
1004 | dump_printf_loc (MSG_NOTE, vect_location, |
1005 | "vect_model_store_cost: unaligned supported by " | |
e645e942 | 1006 | "hardware.\n"); |
720f5239 IR |
1007 | break; |
1008 | } | |
1009 | ||
38eec4c6 UW |
1010 | case dr_unaligned_unsupported: |
1011 | { | |
1012 | *inside_cost = VECT_MAX_COST; | |
1013 | ||
73fbfcad | 1014 | if (dump_enabled_p ()) |
78c60e3d | 1015 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 1016 | "vect_model_store_cost: unsupported access.\n"); |
38eec4c6 UW |
1017 | break; |
1018 | } | |
1019 | ||
720f5239 IR |
1020 | default: |
1021 | gcc_unreachable (); | |
1022 | } | |
1023 | } | |
1024 | ||
1025 | ||
ebfd146a IR |
1026 | /* Function vect_model_load_cost |
1027 | ||
892a981f RS |
1028 | Models cost for loads. In the case of grouped accesses, one access has |
1029 | the overhead of the grouped access attributed to it. Since unaligned | |
b8698a0f | 1030 | accesses are supported for loads, we also account for the costs of the |
ebfd146a IR |
1031 | access scheme chosen. */ |
1032 | ||
1033 | void | |
92345349 | 1034 | vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, |
2de001ee RS |
1035 | vect_memory_access_type memory_access_type, |
1036 | slp_tree slp_node, | |
92345349 BS |
1037 | stmt_vector_for_cost *prologue_cost_vec, |
1038 | stmt_vector_for_cost *body_cost_vec) | |
ebfd146a | 1039 | { |
892a981f RS |
1040 | gimple *first_stmt = STMT_VINFO_STMT (stmt_info); |
1041 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
92345349 | 1042 | unsigned int inside_cost = 0, prologue_cost = 0; |
892a981f | 1043 | bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info); |
ebfd146a | 1044 | |
892a981f RS |
1045 | /* Grouped loads read all elements in the group at once, |
1046 | so we want the DR for the first statement. */ | |
1047 | if (!slp_node && grouped_access_p) | |
ebfd146a | 1048 | { |
892a981f RS |
1049 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
1050 | dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
ebfd146a IR |
1051 | } |
1052 | ||
892a981f RS |
1053 | /* True if we should include any once-per-group costs as well as |
1054 | the cost of the statement itself. For SLP we only get called | |
1055 | once per group anyhow. */ | |
1056 | bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info)); | |
1057 | ||
272c6793 | 1058 | /* We assume that the cost of a single load-lanes instruction is |
0d0293ac | 1059 | equivalent to the cost of GROUP_SIZE separate loads. If a grouped |
272c6793 | 1060 | access is instead being provided by a load-and-permute operation, |
2de001ee RS |
1061 | include the cost of the permutes. */ |
1062 | if (first_stmt_p | |
1063 | && memory_access_type == VMAT_CONTIGUOUS_PERMUTE) | |
ebfd146a | 1064 | { |
2c23db6d ES |
1065 | /* Uses an even and odd extract operations or shuffle operations |
1066 | for each needed permute. */ | |
892a981f | 1067 | int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
2c23db6d ES |
1068 | int nstmts = ncopies * ceil_log2 (group_size) * group_size; |
1069 | inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm, | |
1070 | stmt_info, 0, vect_body); | |
ebfd146a | 1071 | |
73fbfcad | 1072 | if (dump_enabled_p ()) |
e645e942 TJ |
1073 | dump_printf_loc (MSG_NOTE, vect_location, |
1074 | "vect_model_load_cost: strided group_size = %d .\n", | |
78c60e3d | 1075 | group_size); |
ebfd146a IR |
1076 | } |
1077 | ||
1078 | /* The loads themselves. */ | |
067bc855 RB |
1079 | if (memory_access_type == VMAT_ELEMENTWISE |
1080 | || memory_access_type == VMAT_GATHER_SCATTER) | |
a82960aa | 1081 | { |
a21892ad BS |
1082 | /* N scalar loads plus gathering them into a vector. */ |
1083 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
92345349 | 1084 | inside_cost += record_stmt_cost (body_cost_vec, |
c3e7ee41 | 1085 | ncopies * TYPE_VECTOR_SUBPARTS (vectype), |
92345349 | 1086 | scalar_load, stmt_info, 0, vect_body); |
a82960aa RG |
1087 | } |
1088 | else | |
892a981f | 1089 | vect_get_load_cost (dr, ncopies, first_stmt_p, |
92345349 BS |
1090 | &inside_cost, &prologue_cost, |
1091 | prologue_cost_vec, body_cost_vec, true); | |
2de001ee RS |
1092 | if (memory_access_type == VMAT_ELEMENTWISE |
1093 | || memory_access_type == VMAT_STRIDED_SLP) | |
892a981f RS |
1094 | inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct, |
1095 | stmt_info, 0, vect_body); | |
720f5239 | 1096 | |
73fbfcad | 1097 | if (dump_enabled_p ()) |
78c60e3d SS |
1098 | dump_printf_loc (MSG_NOTE, vect_location, |
1099 | "vect_model_load_cost: inside_cost = %d, " | |
e645e942 | 1100 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
720f5239 IR |
1101 | } |
1102 | ||
1103 | ||
1104 | /* Calculate cost of DR's memory access. */ | |
1105 | void | |
1106 | vect_get_load_cost (struct data_reference *dr, int ncopies, | |
c3e7ee41 | 1107 | bool add_realign_cost, unsigned int *inside_cost, |
92345349 BS |
1108 | unsigned int *prologue_cost, |
1109 | stmt_vector_for_cost *prologue_cost_vec, | |
1110 | stmt_vector_for_cost *body_cost_vec, | |
1111 | bool record_prologue_costs) | |
720f5239 IR |
1112 | { |
1113 | int alignment_support_scheme = vect_supportable_dr_alignment (dr, false); | |
355fe088 | 1114 | gimple *stmt = DR_STMT (dr); |
c3e7ee41 | 1115 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
720f5239 IR |
1116 | |
1117 | switch (alignment_support_scheme) | |
ebfd146a IR |
1118 | { |
1119 | case dr_aligned: | |
1120 | { | |
92345349 BS |
1121 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, |
1122 | stmt_info, 0, vect_body); | |
ebfd146a | 1123 | |
73fbfcad | 1124 | if (dump_enabled_p ()) |
78c60e3d | 1125 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1126 | "vect_model_load_cost: aligned.\n"); |
ebfd146a IR |
1127 | |
1128 | break; | |
1129 | } | |
1130 | case dr_unaligned_supported: | |
1131 | { | |
720f5239 | 1132 | /* Here, we assign an additional cost for the unaligned load. */ |
92345349 | 1133 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
c3e7ee41 | 1134 | unaligned_load, stmt_info, |
92345349 | 1135 | DR_MISALIGNMENT (dr), vect_body); |
c3e7ee41 | 1136 | |
73fbfcad | 1137 | if (dump_enabled_p ()) |
78c60e3d SS |
1138 | dump_printf_loc (MSG_NOTE, vect_location, |
1139 | "vect_model_load_cost: unaligned supported by " | |
e645e942 | 1140 | "hardware.\n"); |
ebfd146a IR |
1141 | |
1142 | break; | |
1143 | } | |
1144 | case dr_explicit_realign: | |
1145 | { | |
92345349 BS |
1146 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2, |
1147 | vector_load, stmt_info, 0, vect_body); | |
1148 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, | |
1149 | vec_perm, stmt_info, 0, vect_body); | |
ebfd146a IR |
1150 | |
1151 | /* FIXME: If the misalignment remains fixed across the iterations of | |
1152 | the containing loop, the following cost should be added to the | |
92345349 | 1153 | prologue costs. */ |
ebfd146a | 1154 | if (targetm.vectorize.builtin_mask_for_load) |
92345349 BS |
1155 | *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt, |
1156 | stmt_info, 0, vect_body); | |
ebfd146a | 1157 | |
73fbfcad | 1158 | if (dump_enabled_p ()) |
e645e942 TJ |
1159 | dump_printf_loc (MSG_NOTE, vect_location, |
1160 | "vect_model_load_cost: explicit realign\n"); | |
8bd37302 | 1161 | |
ebfd146a IR |
1162 | break; |
1163 | } | |
1164 | case dr_explicit_realign_optimized: | |
1165 | { | |
73fbfcad | 1166 | if (dump_enabled_p ()) |
e645e942 | 1167 | dump_printf_loc (MSG_NOTE, vect_location, |
78c60e3d | 1168 | "vect_model_load_cost: unaligned software " |
e645e942 | 1169 | "pipelined.\n"); |
ebfd146a IR |
1170 | |
1171 | /* Unaligned software pipeline has a load of an address, an initial | |
ff802fa1 | 1172 | load, and possibly a mask operation to "prime" the loop. However, |
0d0293ac | 1173 | if this is an access in a group of loads, which provide grouped |
ebfd146a | 1174 | access, then the above cost should only be considered for one |
ff802fa1 | 1175 | access in the group. Inside the loop, there is a load op |
ebfd146a IR |
1176 | and a realignment op. */ |
1177 | ||
92345349 | 1178 | if (add_realign_cost && record_prologue_costs) |
ebfd146a | 1179 | { |
92345349 BS |
1180 | *prologue_cost += record_stmt_cost (prologue_cost_vec, 2, |
1181 | vector_stmt, stmt_info, | |
1182 | 0, vect_prologue); | |
ebfd146a | 1183 | if (targetm.vectorize.builtin_mask_for_load) |
92345349 BS |
1184 | *prologue_cost += record_stmt_cost (prologue_cost_vec, 1, |
1185 | vector_stmt, stmt_info, | |
1186 | 0, vect_prologue); | |
ebfd146a IR |
1187 | } |
1188 | ||
92345349 BS |
1189 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, |
1190 | stmt_info, 0, vect_body); | |
1191 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm, | |
1192 | stmt_info, 0, vect_body); | |
8bd37302 | 1193 | |
73fbfcad | 1194 | if (dump_enabled_p ()) |
78c60e3d | 1195 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 TJ |
1196 | "vect_model_load_cost: explicit realign optimized" |
1197 | "\n"); | |
8bd37302 | 1198 | |
ebfd146a IR |
1199 | break; |
1200 | } | |
1201 | ||
38eec4c6 UW |
1202 | case dr_unaligned_unsupported: |
1203 | { | |
1204 | *inside_cost = VECT_MAX_COST; | |
1205 | ||
73fbfcad | 1206 | if (dump_enabled_p ()) |
78c60e3d | 1207 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 1208 | "vect_model_load_cost: unsupported access.\n"); |
38eec4c6 UW |
1209 | break; |
1210 | } | |
1211 | ||
ebfd146a IR |
1212 | default: |
1213 | gcc_unreachable (); | |
1214 | } | |
ebfd146a IR |
1215 | } |
1216 | ||
418b7df3 RG |
1217 | /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in |
1218 | the loop preheader for the vectorized stmt STMT. */ | |
ebfd146a | 1219 | |
418b7df3 | 1220 | static void |
355fe088 | 1221 | vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi) |
ebfd146a | 1222 | { |
ebfd146a | 1223 | if (gsi) |
418b7df3 | 1224 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
ebfd146a IR |
1225 | else |
1226 | { | |
418b7df3 | 1227 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); |
ebfd146a | 1228 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); |
b8698a0f | 1229 | |
a70d6342 IR |
1230 | if (loop_vinfo) |
1231 | { | |
1232 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
418b7df3 RG |
1233 | basic_block new_bb; |
1234 | edge pe; | |
a70d6342 IR |
1235 | |
1236 | if (nested_in_vect_loop_p (loop, stmt)) | |
1237 | loop = loop->inner; | |
b8698a0f | 1238 | |
a70d6342 | 1239 | pe = loop_preheader_edge (loop); |
418b7df3 | 1240 | new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); |
a70d6342 IR |
1241 | gcc_assert (!new_bb); |
1242 | } | |
1243 | else | |
1244 | { | |
1245 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo); | |
1246 | basic_block bb; | |
1247 | gimple_stmt_iterator gsi_bb_start; | |
1248 | ||
1249 | gcc_assert (bb_vinfo); | |
1250 | bb = BB_VINFO_BB (bb_vinfo); | |
12aaf609 | 1251 | gsi_bb_start = gsi_after_labels (bb); |
418b7df3 | 1252 | gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT); |
a70d6342 | 1253 | } |
ebfd146a IR |
1254 | } |
1255 | ||
73fbfcad | 1256 | if (dump_enabled_p ()) |
ebfd146a | 1257 | { |
78c60e3d SS |
1258 | dump_printf_loc (MSG_NOTE, vect_location, |
1259 | "created new init_stmt: "); | |
1260 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); | |
ebfd146a | 1261 | } |
418b7df3 RG |
1262 | } |
1263 | ||
1264 | /* Function vect_init_vector. | |
ebfd146a | 1265 | |
5467ee52 RG |
1266 | Insert a new stmt (INIT_STMT) that initializes a new variable of type |
1267 | TYPE with the value VAL. If TYPE is a vector type and VAL does not have | |
1268 | vector type a vector with all elements equal to VAL is created first. | |
1269 | Place the initialization at BSI if it is not NULL. Otherwise, place the | |
1270 | initialization at the loop preheader. | |
418b7df3 RG |
1271 | Return the DEF of INIT_STMT. |
1272 | It will be used in the vectorization of STMT. */ | |
1273 | ||
1274 | tree | |
355fe088 | 1275 | vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi) |
418b7df3 | 1276 | { |
355fe088 | 1277 | gimple *init_stmt; |
418b7df3 RG |
1278 | tree new_temp; |
1279 | ||
e412ece4 RB |
1280 | /* We abuse this function to push sth to a SSA name with initial 'val'. */ |
1281 | if (! useless_type_conversion_p (type, TREE_TYPE (val))) | |
418b7df3 | 1282 | { |
e412ece4 RB |
1283 | gcc_assert (TREE_CODE (type) == VECTOR_TYPE); |
1284 | if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val))) | |
418b7df3 | 1285 | { |
5a308cf1 IE |
1286 | /* Scalar boolean value should be transformed into |
1287 | all zeros or all ones value before building a vector. */ | |
1288 | if (VECTOR_BOOLEAN_TYPE_P (type)) | |
1289 | { | |
b3d51f23 IE |
1290 | tree true_val = build_all_ones_cst (TREE_TYPE (type)); |
1291 | tree false_val = build_zero_cst (TREE_TYPE (type)); | |
5a308cf1 IE |
1292 | |
1293 | if (CONSTANT_CLASS_P (val)) | |
1294 | val = integer_zerop (val) ? false_val : true_val; | |
1295 | else | |
1296 | { | |
1297 | new_temp = make_ssa_name (TREE_TYPE (type)); | |
1298 | init_stmt = gimple_build_assign (new_temp, COND_EXPR, | |
1299 | val, true_val, false_val); | |
1300 | vect_init_vector_1 (stmt, init_stmt, gsi); | |
1301 | val = new_temp; | |
1302 | } | |
1303 | } | |
1304 | else if (CONSTANT_CLASS_P (val)) | |
42fd8198 | 1305 | val = fold_convert (TREE_TYPE (type), val); |
418b7df3 RG |
1306 | else |
1307 | { | |
b731b390 | 1308 | new_temp = make_ssa_name (TREE_TYPE (type)); |
e412ece4 RB |
1309 | if (! INTEGRAL_TYPE_P (TREE_TYPE (val))) |
1310 | init_stmt = gimple_build_assign (new_temp, | |
1311 | fold_build1 (VIEW_CONVERT_EXPR, | |
1312 | TREE_TYPE (type), | |
1313 | val)); | |
1314 | else | |
1315 | init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val); | |
418b7df3 | 1316 | vect_init_vector_1 (stmt, init_stmt, gsi); |
5467ee52 | 1317 | val = new_temp; |
418b7df3 RG |
1318 | } |
1319 | } | |
5467ee52 | 1320 | val = build_vector_from_val (type, val); |
418b7df3 RG |
1321 | } |
1322 | ||
0e22bb5a RB |
1323 | new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_"); |
1324 | init_stmt = gimple_build_assign (new_temp, val); | |
418b7df3 | 1325 | vect_init_vector_1 (stmt, init_stmt, gsi); |
0e22bb5a | 1326 | return new_temp; |
ebfd146a IR |
1327 | } |
1328 | ||
c83a894c | 1329 | /* Function vect_get_vec_def_for_operand_1. |
a70d6342 | 1330 | |
c83a894c AH |
1331 | For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type |
1332 | DT that will be used in the vectorized stmt. */ | |
ebfd146a IR |
1333 | |
1334 | tree | |
c83a894c | 1335 | vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt) |
ebfd146a IR |
1336 | { |
1337 | tree vec_oprnd; | |
355fe088 | 1338 | gimple *vec_stmt; |
ebfd146a | 1339 | stmt_vec_info def_stmt_info = NULL; |
ebfd146a IR |
1340 | |
1341 | switch (dt) | |
1342 | { | |
81c40241 | 1343 | /* operand is a constant or a loop invariant. */ |
ebfd146a | 1344 | case vect_constant_def: |
81c40241 | 1345 | case vect_external_def: |
c83a894c AH |
1346 | /* Code should use vect_get_vec_def_for_operand. */ |
1347 | gcc_unreachable (); | |
ebfd146a | 1348 | |
81c40241 | 1349 | /* operand is defined inside the loop. */ |
8644a673 | 1350 | case vect_internal_def: |
ebfd146a | 1351 | { |
ebfd146a IR |
1352 | /* Get the def from the vectorized stmt. */ |
1353 | def_stmt_info = vinfo_for_stmt (def_stmt); | |
83197f37 | 1354 | |
ebfd146a | 1355 | vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info); |
83197f37 IR |
1356 | /* Get vectorized pattern statement. */ |
1357 | if (!vec_stmt | |
1358 | && STMT_VINFO_IN_PATTERN_P (def_stmt_info) | |
1359 | && !STMT_VINFO_RELEVANT (def_stmt_info)) | |
1360 | vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt ( | |
1361 | STMT_VINFO_RELATED_STMT (def_stmt_info))); | |
ebfd146a IR |
1362 | gcc_assert (vec_stmt); |
1363 | if (gimple_code (vec_stmt) == GIMPLE_PHI) | |
1364 | vec_oprnd = PHI_RESULT (vec_stmt); | |
1365 | else if (is_gimple_call (vec_stmt)) | |
1366 | vec_oprnd = gimple_call_lhs (vec_stmt); | |
1367 | else | |
1368 | vec_oprnd = gimple_assign_lhs (vec_stmt); | |
1369 | return vec_oprnd; | |
1370 | } | |
1371 | ||
c78e3652 | 1372 | /* operand is defined by a loop header phi. */ |
ebfd146a | 1373 | case vect_reduction_def: |
06066f92 | 1374 | case vect_double_reduction_def: |
7c5222ff | 1375 | case vect_nested_cycle: |
ebfd146a IR |
1376 | case vect_induction_def: |
1377 | { | |
1378 | gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI); | |
1379 | ||
1380 | /* Get the def from the vectorized stmt. */ | |
1381 | def_stmt_info = vinfo_for_stmt (def_stmt); | |
1382 | vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info); | |
6dbbece6 RG |
1383 | if (gimple_code (vec_stmt) == GIMPLE_PHI) |
1384 | vec_oprnd = PHI_RESULT (vec_stmt); | |
1385 | else | |
1386 | vec_oprnd = gimple_get_lhs (vec_stmt); | |
ebfd146a IR |
1387 | return vec_oprnd; |
1388 | } | |
1389 | ||
1390 | default: | |
1391 | gcc_unreachable (); | |
1392 | } | |
1393 | } | |
1394 | ||
1395 | ||
c83a894c AH |
1396 | /* Function vect_get_vec_def_for_operand. |
1397 | ||
1398 | OP is an operand in STMT. This function returns a (vector) def that will be | |
1399 | used in the vectorized stmt for STMT. | |
1400 | ||
1401 | In the case that OP is an SSA_NAME which is defined in the loop, then | |
1402 | STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def. | |
1403 | ||
1404 | In case OP is an invariant or constant, a new stmt that creates a vector def | |
1405 | needs to be introduced. VECTYPE may be used to specify a required type for | |
1406 | vector invariant. */ | |
1407 | ||
1408 | tree | |
1409 | vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype) | |
1410 | { | |
1411 | gimple *def_stmt; | |
1412 | enum vect_def_type dt; | |
1413 | bool is_simple_use; | |
1414 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); | |
1415 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); | |
1416 | ||
1417 | if (dump_enabled_p ()) | |
1418 | { | |
1419 | dump_printf_loc (MSG_NOTE, vect_location, | |
1420 | "vect_get_vec_def_for_operand: "); | |
1421 | dump_generic_expr (MSG_NOTE, TDF_SLIM, op); | |
1422 | dump_printf (MSG_NOTE, "\n"); | |
1423 | } | |
1424 | ||
1425 | is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt); | |
1426 | gcc_assert (is_simple_use); | |
1427 | if (def_stmt && dump_enabled_p ()) | |
1428 | { | |
1429 | dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = "); | |
1430 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); | |
1431 | } | |
1432 | ||
1433 | if (dt == vect_constant_def || dt == vect_external_def) | |
1434 | { | |
1435 | tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo); | |
1436 | tree vector_type; | |
1437 | ||
1438 | if (vectype) | |
1439 | vector_type = vectype; | |
2568d8a1 | 1440 | else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op)) |
c83a894c AH |
1441 | && VECTOR_BOOLEAN_TYPE_P (stmt_vectype)) |
1442 | vector_type = build_same_sized_truth_vector_type (stmt_vectype); | |
1443 | else | |
1444 | vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); | |
1445 | ||
1446 | gcc_assert (vector_type); | |
1447 | return vect_init_vector (stmt, op, vector_type, NULL); | |
1448 | } | |
1449 | else | |
1450 | return vect_get_vec_def_for_operand_1 (def_stmt, dt); | |
1451 | } | |
1452 | ||
1453 | ||
ebfd146a IR |
1454 | /* Function vect_get_vec_def_for_stmt_copy |
1455 | ||
ff802fa1 | 1456 | Return a vector-def for an operand. This function is used when the |
b8698a0f L |
1457 | vectorized stmt to be created (by the caller to this function) is a "copy" |
1458 | created in case the vectorized result cannot fit in one vector, and several | |
ff802fa1 | 1459 | copies of the vector-stmt are required. In this case the vector-def is |
ebfd146a | 1460 | retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field |
b8698a0f | 1461 | of the stmt that defines VEC_OPRND. |
ebfd146a IR |
1462 | DT is the type of the vector def VEC_OPRND. |
1463 | ||
1464 | Context: | |
1465 | In case the vectorization factor (VF) is bigger than the number | |
1466 | of elements that can fit in a vectype (nunits), we have to generate | |
ff802fa1 | 1467 | more than one vector stmt to vectorize the scalar stmt. This situation |
b8698a0f | 1468 | arises when there are multiple data-types operated upon in the loop; the |
ebfd146a IR |
1469 | smallest data-type determines the VF, and as a result, when vectorizing |
1470 | stmts operating on wider types we need to create 'VF/nunits' "copies" of the | |
1471 | vector stmt (each computing a vector of 'nunits' results, and together | |
b8698a0f | 1472 | computing 'VF' results in each iteration). This function is called when |
ebfd146a IR |
1473 | vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in |
1474 | which VF=16 and nunits=4, so the number of copies required is 4): | |
1475 | ||
1476 | scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT | |
b8698a0f | 1477 | |
ebfd146a IR |
1478 | S1: x = load VS1.0: vx.0 = memref0 VS1.1 |
1479 | VS1.1: vx.1 = memref1 VS1.2 | |
1480 | VS1.2: vx.2 = memref2 VS1.3 | |
b8698a0f | 1481 | VS1.3: vx.3 = memref3 |
ebfd146a IR |
1482 | |
1483 | S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1 | |
1484 | VSnew.1: vz1 = vx.1 + ... VSnew.2 | |
1485 | VSnew.2: vz2 = vx.2 + ... VSnew.3 | |
1486 | VSnew.3: vz3 = vx.3 + ... | |
1487 | ||
1488 | The vectorization of S1 is explained in vectorizable_load. | |
1489 | The vectorization of S2: | |
b8698a0f L |
1490 | To create the first vector-stmt out of the 4 copies - VSnew.0 - |
1491 | the function 'vect_get_vec_def_for_operand' is called to | |
ff802fa1 | 1492 | get the relevant vector-def for each operand of S2. For operand x it |
ebfd146a IR |
1493 | returns the vector-def 'vx.0'. |
1494 | ||
b8698a0f L |
1495 | To create the remaining copies of the vector-stmt (VSnew.j), this |
1496 | function is called to get the relevant vector-def for each operand. It is | |
1497 | obtained from the respective VS1.j stmt, which is recorded in the | |
ebfd146a IR |
1498 | STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND. |
1499 | ||
b8698a0f L |
1500 | For example, to obtain the vector-def 'vx.1' in order to create the |
1501 | vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'. | |
1502 | Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the | |
ebfd146a IR |
1503 | STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1', |
1504 | and return its def ('vx.1'). | |
1505 | Overall, to create the above sequence this function will be called 3 times: | |
1506 | vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0); | |
1507 | vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1); | |
1508 | vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */ | |
1509 | ||
1510 | tree | |
1511 | vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd) | |
1512 | { | |
355fe088 | 1513 | gimple *vec_stmt_for_operand; |
ebfd146a IR |
1514 | stmt_vec_info def_stmt_info; |
1515 | ||
1516 | /* Do nothing; can reuse same def. */ | |
8644a673 | 1517 | if (dt == vect_external_def || dt == vect_constant_def ) |
ebfd146a IR |
1518 | return vec_oprnd; |
1519 | ||
1520 | vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd); | |
1521 | def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand); | |
1522 | gcc_assert (def_stmt_info); | |
1523 | vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info); | |
1524 | gcc_assert (vec_stmt_for_operand); | |
ebfd146a IR |
1525 | if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI) |
1526 | vec_oprnd = PHI_RESULT (vec_stmt_for_operand); | |
1527 | else | |
1528 | vec_oprnd = gimple_get_lhs (vec_stmt_for_operand); | |
1529 | return vec_oprnd; | |
1530 | } | |
1531 | ||
1532 | ||
1533 | /* Get vectorized definitions for the operands to create a copy of an original | |
ff802fa1 | 1534 | stmt. See vect_get_vec_def_for_stmt_copy () for details. */ |
ebfd146a | 1535 | |
c78e3652 | 1536 | void |
b8698a0f | 1537 | vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt, |
9771b263 DN |
1538 | vec<tree> *vec_oprnds0, |
1539 | vec<tree> *vec_oprnds1) | |
ebfd146a | 1540 | { |
9771b263 | 1541 | tree vec_oprnd = vec_oprnds0->pop (); |
ebfd146a IR |
1542 | |
1543 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd); | |
9771b263 | 1544 | vec_oprnds0->quick_push (vec_oprnd); |
ebfd146a | 1545 | |
9771b263 | 1546 | if (vec_oprnds1 && vec_oprnds1->length ()) |
ebfd146a | 1547 | { |
9771b263 | 1548 | vec_oprnd = vec_oprnds1->pop (); |
ebfd146a | 1549 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd); |
9771b263 | 1550 | vec_oprnds1->quick_push (vec_oprnd); |
ebfd146a IR |
1551 | } |
1552 | } | |
1553 | ||
1554 | ||
c78e3652 | 1555 | /* Get vectorized definitions for OP0 and OP1. */ |
ebfd146a | 1556 | |
c78e3652 | 1557 | void |
355fe088 | 1558 | vect_get_vec_defs (tree op0, tree op1, gimple *stmt, |
9771b263 DN |
1559 | vec<tree> *vec_oprnds0, |
1560 | vec<tree> *vec_oprnds1, | |
306b0c92 | 1561 | slp_tree slp_node) |
ebfd146a IR |
1562 | { |
1563 | if (slp_node) | |
d092494c IR |
1564 | { |
1565 | int nops = (op1 == NULL_TREE) ? 1 : 2; | |
ef062b13 TS |
1566 | auto_vec<tree> ops (nops); |
1567 | auto_vec<vec<tree> > vec_defs (nops); | |
d092494c | 1568 | |
9771b263 | 1569 | ops.quick_push (op0); |
d092494c | 1570 | if (op1) |
9771b263 | 1571 | ops.quick_push (op1); |
d092494c | 1572 | |
306b0c92 | 1573 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
d092494c | 1574 | |
37b5ec8f | 1575 | *vec_oprnds0 = vec_defs[0]; |
d092494c | 1576 | if (op1) |
37b5ec8f | 1577 | *vec_oprnds1 = vec_defs[1]; |
d092494c | 1578 | } |
ebfd146a IR |
1579 | else |
1580 | { | |
1581 | tree vec_oprnd; | |
1582 | ||
9771b263 | 1583 | vec_oprnds0->create (1); |
81c40241 | 1584 | vec_oprnd = vect_get_vec_def_for_operand (op0, stmt); |
9771b263 | 1585 | vec_oprnds0->quick_push (vec_oprnd); |
ebfd146a IR |
1586 | |
1587 | if (op1) | |
1588 | { | |
9771b263 | 1589 | vec_oprnds1->create (1); |
81c40241 | 1590 | vec_oprnd = vect_get_vec_def_for_operand (op1, stmt); |
9771b263 | 1591 | vec_oprnds1->quick_push (vec_oprnd); |
ebfd146a IR |
1592 | } |
1593 | } | |
1594 | } | |
1595 | ||
1596 | ||
1597 | /* Function vect_finish_stmt_generation. | |
1598 | ||
1599 | Insert a new stmt. */ | |
1600 | ||
1601 | void | |
355fe088 | 1602 | vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt, |
ebfd146a IR |
1603 | gimple_stmt_iterator *gsi) |
1604 | { | |
1605 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
310213d4 | 1606 | vec_info *vinfo = stmt_info->vinfo; |
ebfd146a IR |
1607 | |
1608 | gcc_assert (gimple_code (stmt) != GIMPLE_LABEL); | |
1609 | ||
54e8e2c3 RG |
1610 | if (!gsi_end_p (*gsi) |
1611 | && gimple_has_mem_ops (vec_stmt)) | |
1612 | { | |
355fe088 | 1613 | gimple *at_stmt = gsi_stmt (*gsi); |
54e8e2c3 RG |
1614 | tree vuse = gimple_vuse (at_stmt); |
1615 | if (vuse && TREE_CODE (vuse) == SSA_NAME) | |
1616 | { | |
1617 | tree vdef = gimple_vdef (at_stmt); | |
1618 | gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt)); | |
1619 | /* If we have an SSA vuse and insert a store, update virtual | |
1620 | SSA form to avoid triggering the renamer. Do so only | |
1621 | if we can easily see all uses - which is what almost always | |
1622 | happens with the way vectorized stmts are inserted. */ | |
1623 | if ((vdef && TREE_CODE (vdef) == SSA_NAME) | |
1624 | && ((is_gimple_assign (vec_stmt) | |
1625 | && !is_gimple_reg (gimple_assign_lhs (vec_stmt))) | |
1626 | || (is_gimple_call (vec_stmt) | |
1627 | && !(gimple_call_flags (vec_stmt) | |
1628 | & (ECF_CONST|ECF_PURE|ECF_NOVOPS))))) | |
1629 | { | |
1630 | tree new_vdef = copy_ssa_name (vuse, vec_stmt); | |
1631 | gimple_set_vdef (vec_stmt, new_vdef); | |
1632 | SET_USE (gimple_vuse_op (at_stmt), new_vdef); | |
1633 | } | |
1634 | } | |
1635 | } | |
ebfd146a IR |
1636 | gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT); |
1637 | ||
310213d4 | 1638 | set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo)); |
ebfd146a | 1639 | |
73fbfcad | 1640 | if (dump_enabled_p ()) |
ebfd146a | 1641 | { |
78c60e3d SS |
1642 | dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: "); |
1643 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0); | |
ebfd146a IR |
1644 | } |
1645 | ||
ad885386 | 1646 | gimple_set_location (vec_stmt, gimple_location (stmt)); |
8e91d222 JJ |
1647 | |
1648 | /* While EH edges will generally prevent vectorization, stmt might | |
1649 | e.g. be in a must-not-throw region. Ensure newly created stmts | |
1650 | that could throw are part of the same region. */ | |
1651 | int lp_nr = lookup_stmt_eh_lp (stmt); | |
1652 | if (lp_nr != 0 && stmt_could_throw_p (vec_stmt)) | |
1653 | add_stmt_to_eh_lp (vec_stmt, lp_nr); | |
ebfd146a IR |
1654 | } |
1655 | ||
70439f0d RS |
1656 | /* We want to vectorize a call to combined function CFN with function |
1657 | decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN | |
1658 | as the types of all inputs. Check whether this is possible using | |
1659 | an internal function, returning its code if so or IFN_LAST if not. */ | |
ebfd146a | 1660 | |
70439f0d RS |
1661 | static internal_fn |
1662 | vectorizable_internal_function (combined_fn cfn, tree fndecl, | |
1663 | tree vectype_out, tree vectype_in) | |
ebfd146a | 1664 | { |
70439f0d RS |
1665 | internal_fn ifn; |
1666 | if (internal_fn_p (cfn)) | |
1667 | ifn = as_internal_fn (cfn); | |
1668 | else | |
1669 | ifn = associated_internal_fn (fndecl); | |
1670 | if (ifn != IFN_LAST && direct_internal_fn_p (ifn)) | |
1671 | { | |
1672 | const direct_internal_fn_info &info = direct_internal_fn (ifn); | |
1673 | if (info.vectorizable) | |
1674 | { | |
1675 | tree type0 = (info.type0 < 0 ? vectype_out : vectype_in); | |
1676 | tree type1 = (info.type1 < 0 ? vectype_out : vectype_in); | |
d95ab70a RS |
1677 | if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1), |
1678 | OPTIMIZE_FOR_SPEED)) | |
70439f0d RS |
1679 | return ifn; |
1680 | } | |
1681 | } | |
1682 | return IFN_LAST; | |
ebfd146a IR |
1683 | } |
1684 | ||
5ce9450f | 1685 | |
355fe088 | 1686 | static tree permute_vec_elements (tree, tree, tree, gimple *, |
5ce9450f JJ |
1687 | gimple_stmt_iterator *); |
1688 | ||
62da9e14 RS |
1689 | /* STMT is a non-strided load or store, meaning that it accesses |
1690 | elements with a known constant step. Return -1 if that step | |
1691 | is negative, 0 if it is zero, and 1 if it is greater than zero. */ | |
1692 | ||
1693 | static int | |
1694 | compare_step_with_zero (gimple *stmt) | |
1695 | { | |
1696 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
3f5e8a76 RS |
1697 | data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
1698 | return tree_int_cst_compare (vect_dr_behavior (dr)->step, | |
1699 | size_zero_node); | |
62da9e14 RS |
1700 | } |
1701 | ||
1702 | /* If the target supports a permute mask that reverses the elements in | |
1703 | a vector of type VECTYPE, return that mask, otherwise return null. */ | |
1704 | ||
1705 | static tree | |
1706 | perm_mask_for_reverse (tree vectype) | |
1707 | { | |
1708 | int i, nunits; | |
1709 | unsigned char *sel; | |
1710 | ||
1711 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
1712 | sel = XALLOCAVEC (unsigned char, nunits); | |
1713 | ||
1714 | for (i = 0; i < nunits; ++i) | |
1715 | sel[i] = nunits - 1 - i; | |
1716 | ||
1717 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
1718 | return NULL_TREE; | |
1719 | return vect_gen_perm_mask_checked (vectype, sel); | |
1720 | } | |
5ce9450f | 1721 | |
2de001ee RS |
1722 | /* A subroutine of get_load_store_type, with a subset of the same |
1723 | arguments. Handle the case where STMT is part of a grouped load | |
1724 | or store. | |
1725 | ||
1726 | For stores, the statements in the group are all consecutive | |
1727 | and there is no gap at the end. For loads, the statements in the | |
1728 | group might not be consecutive; there can be gaps between statements | |
1729 | as well as at the end. */ | |
1730 | ||
1731 | static bool | |
1732 | get_group_load_store_type (gimple *stmt, tree vectype, bool slp, | |
1733 | vec_load_store_type vls_type, | |
1734 | vect_memory_access_type *memory_access_type) | |
1735 | { | |
1736 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1737 | vec_info *vinfo = stmt_info->vinfo; | |
1738 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
1739 | struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; | |
1740 | gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info); | |
1741 | unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); | |
1742 | bool single_element_p = (stmt == first_stmt | |
1743 | && !GROUP_NEXT_ELEMENT (stmt_info)); | |
1744 | unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt)); | |
522fcdd7 | 1745 | unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype); |
2de001ee RS |
1746 | |
1747 | /* True if the vectorized statements would access beyond the last | |
1748 | statement in the group. */ | |
1749 | bool overrun_p = false; | |
1750 | ||
1751 | /* True if we can cope with such overrun by peeling for gaps, so that | |
1752 | there is at least one final scalar iteration after the vector loop. */ | |
1753 | bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner); | |
1754 | ||
1755 | /* There can only be a gap at the end of the group if the stride is | |
1756 | known at compile time. */ | |
1757 | gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0); | |
1758 | ||
1759 | /* Stores can't yet have gaps. */ | |
1760 | gcc_assert (slp || vls_type == VLS_LOAD || gap == 0); | |
1761 | ||
1762 | if (slp) | |
1763 | { | |
1764 | if (STMT_VINFO_STRIDED_P (stmt_info)) | |
1765 | { | |
1766 | /* Try to use consecutive accesses of GROUP_SIZE elements, | |
1767 | separated by the stride, until we have a complete vector. | |
1768 | Fall back to scalar accesses if that isn't possible. */ | |
1769 | if (nunits % group_size == 0) | |
1770 | *memory_access_type = VMAT_STRIDED_SLP; | |
1771 | else | |
1772 | *memory_access_type = VMAT_ELEMENTWISE; | |
1773 | } | |
1774 | else | |
1775 | { | |
1776 | overrun_p = loop_vinfo && gap != 0; | |
1777 | if (overrun_p && vls_type != VLS_LOAD) | |
1778 | { | |
1779 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1780 | "Grouped store with gaps requires" | |
1781 | " non-consecutive accesses\n"); | |
1782 | return false; | |
1783 | } | |
f9ef2c76 RB |
1784 | /* If the access is aligned an overrun is fine. */ |
1785 | if (overrun_p | |
1786 | && aligned_access_p | |
1787 | (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))) | |
1788 | overrun_p = false; | |
2de001ee RS |
1789 | if (overrun_p && !can_overrun_p) |
1790 | { | |
1791 | if (dump_enabled_p ()) | |
1792 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1793 | "Peeling for outer loop is not supported\n"); | |
1794 | return false; | |
1795 | } | |
1796 | *memory_access_type = VMAT_CONTIGUOUS; | |
1797 | } | |
1798 | } | |
1799 | else | |
1800 | { | |
1801 | /* We can always handle this case using elementwise accesses, | |
1802 | but see if something more efficient is available. */ | |
1803 | *memory_access_type = VMAT_ELEMENTWISE; | |
1804 | ||
1805 | /* If there is a gap at the end of the group then these optimizations | |
1806 | would access excess elements in the last iteration. */ | |
1807 | bool would_overrun_p = (gap != 0); | |
522fcdd7 RB |
1808 | /* If the access is aligned an overrun is fine, but only if the |
1809 | overrun is not inside an unused vector (if the gap is as large | |
1810 | or larger than a vector). */ | |
f9ef2c76 | 1811 | if (would_overrun_p |
522fcdd7 RB |
1812 | && gap < nunits |
1813 | && aligned_access_p | |
1814 | (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))) | |
f9ef2c76 | 1815 | would_overrun_p = false; |
2de001ee | 1816 | if (!STMT_VINFO_STRIDED_P (stmt_info) |
62da9e14 RS |
1817 | && (can_overrun_p || !would_overrun_p) |
1818 | && compare_step_with_zero (stmt) > 0) | |
2de001ee RS |
1819 | { |
1820 | /* First try using LOAD/STORE_LANES. */ | |
1821 | if (vls_type == VLS_LOAD | |
1822 | ? vect_load_lanes_supported (vectype, group_size) | |
1823 | : vect_store_lanes_supported (vectype, group_size)) | |
1824 | { | |
1825 | *memory_access_type = VMAT_LOAD_STORE_LANES; | |
1826 | overrun_p = would_overrun_p; | |
1827 | } | |
1828 | ||
1829 | /* If that fails, try using permuting loads. */ | |
1830 | if (*memory_access_type == VMAT_ELEMENTWISE | |
1831 | && (vls_type == VLS_LOAD | |
1832 | ? vect_grouped_load_supported (vectype, single_element_p, | |
1833 | group_size) | |
1834 | : vect_grouped_store_supported (vectype, group_size))) | |
1835 | { | |
1836 | *memory_access_type = VMAT_CONTIGUOUS_PERMUTE; | |
1837 | overrun_p = would_overrun_p; | |
1838 | } | |
1839 | } | |
1840 | } | |
1841 | ||
1842 | if (vls_type != VLS_LOAD && first_stmt == stmt) | |
1843 | { | |
1844 | /* STMT is the leader of the group. Check the operands of all the | |
1845 | stmts of the group. */ | |
1846 | gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info); | |
1847 | while (next_stmt) | |
1848 | { | |
1849 | gcc_assert (gimple_assign_single_p (next_stmt)); | |
1850 | tree op = gimple_assign_rhs1 (next_stmt); | |
1851 | gimple *def_stmt; | |
1852 | enum vect_def_type dt; | |
1853 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt)) | |
1854 | { | |
1855 | if (dump_enabled_p ()) | |
1856 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1857 | "use not simple.\n"); | |
1858 | return false; | |
1859 | } | |
1860 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); | |
1861 | } | |
1862 | } | |
1863 | ||
1864 | if (overrun_p) | |
1865 | { | |
1866 | gcc_assert (can_overrun_p); | |
1867 | if (dump_enabled_p ()) | |
1868 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1869 | "Data access with gaps requires scalar " | |
1870 | "epilogue loop\n"); | |
1871 | LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; | |
1872 | } | |
1873 | ||
1874 | return true; | |
1875 | } | |
1876 | ||
62da9e14 RS |
1877 | /* A subroutine of get_load_store_type, with a subset of the same |
1878 | arguments. Handle the case where STMT is a load or store that | |
1879 | accesses consecutive elements with a negative step. */ | |
1880 | ||
1881 | static vect_memory_access_type | |
1882 | get_negative_load_store_type (gimple *stmt, tree vectype, | |
1883 | vec_load_store_type vls_type, | |
1884 | unsigned int ncopies) | |
1885 | { | |
1886 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1887 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
1888 | dr_alignment_support alignment_support_scheme; | |
1889 | ||
1890 | if (ncopies > 1) | |
1891 | { | |
1892 | if (dump_enabled_p ()) | |
1893 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1894 | "multiple types with negative step.\n"); | |
1895 | return VMAT_ELEMENTWISE; | |
1896 | } | |
1897 | ||
1898 | alignment_support_scheme = vect_supportable_dr_alignment (dr, false); | |
1899 | if (alignment_support_scheme != dr_aligned | |
1900 | && alignment_support_scheme != dr_unaligned_supported) | |
1901 | { | |
1902 | if (dump_enabled_p ()) | |
1903 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1904 | "negative step but alignment required.\n"); | |
1905 | return VMAT_ELEMENTWISE; | |
1906 | } | |
1907 | ||
1908 | if (vls_type == VLS_STORE_INVARIANT) | |
1909 | { | |
1910 | if (dump_enabled_p ()) | |
1911 | dump_printf_loc (MSG_NOTE, vect_location, | |
1912 | "negative step with invariant source;" | |
1913 | " no permute needed.\n"); | |
1914 | return VMAT_CONTIGUOUS_DOWN; | |
1915 | } | |
1916 | ||
1917 | if (!perm_mask_for_reverse (vectype)) | |
1918 | { | |
1919 | if (dump_enabled_p ()) | |
1920 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1921 | "negative step and reversing not supported.\n"); | |
1922 | return VMAT_ELEMENTWISE; | |
1923 | } | |
1924 | ||
1925 | return VMAT_CONTIGUOUS_REVERSE; | |
1926 | } | |
1927 | ||
2de001ee RS |
1928 | /* Analyze load or store statement STMT of type VLS_TYPE. Return true |
1929 | if there is a memory access type that the vectorized form can use, | |
1930 | storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers | |
1931 | or scatters, fill in GS_INFO accordingly. | |
1932 | ||
1933 | SLP says whether we're performing SLP rather than loop vectorization. | |
62da9e14 RS |
1934 | VECTYPE is the vector type that the vectorized statements will use. |
1935 | NCOPIES is the number of vector statements that will be needed. */ | |
2de001ee RS |
1936 | |
1937 | static bool | |
1938 | get_load_store_type (gimple *stmt, tree vectype, bool slp, | |
62da9e14 | 1939 | vec_load_store_type vls_type, unsigned int ncopies, |
2de001ee RS |
1940 | vect_memory_access_type *memory_access_type, |
1941 | gather_scatter_info *gs_info) | |
1942 | { | |
1943 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1944 | vec_info *vinfo = stmt_info->vinfo; | |
1945 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
1946 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
1947 | { | |
1948 | *memory_access_type = VMAT_GATHER_SCATTER; | |
1949 | gimple *def_stmt; | |
1950 | if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info)) | |
1951 | gcc_unreachable (); | |
1952 | else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt, | |
1953 | &gs_info->offset_dt, | |
1954 | &gs_info->offset_vectype)) | |
1955 | { | |
1956 | if (dump_enabled_p ()) | |
1957 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1958 | "%s index use not simple.\n", | |
1959 | vls_type == VLS_LOAD ? "gather" : "scatter"); | |
1960 | return false; | |
1961 | } | |
1962 | } | |
1963 | else if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) | |
1964 | { | |
1965 | if (!get_group_load_store_type (stmt, vectype, slp, vls_type, | |
1966 | memory_access_type)) | |
1967 | return false; | |
1968 | } | |
1969 | else if (STMT_VINFO_STRIDED_P (stmt_info)) | |
1970 | { | |
1971 | gcc_assert (!slp); | |
1972 | *memory_access_type = VMAT_ELEMENTWISE; | |
1973 | } | |
1974 | else | |
62da9e14 RS |
1975 | { |
1976 | int cmp = compare_step_with_zero (stmt); | |
1977 | if (cmp < 0) | |
1978 | *memory_access_type = get_negative_load_store_type | |
1979 | (stmt, vectype, vls_type, ncopies); | |
1980 | else if (cmp == 0) | |
1981 | { | |
1982 | gcc_assert (vls_type == VLS_LOAD); | |
1983 | *memory_access_type = VMAT_INVARIANT; | |
1984 | } | |
1985 | else | |
1986 | *memory_access_type = VMAT_CONTIGUOUS; | |
1987 | } | |
2de001ee RS |
1988 | |
1989 | /* FIXME: At the moment the cost model seems to underestimate the | |
1990 | cost of using elementwise accesses. This check preserves the | |
1991 | traditional behavior until that can be fixed. */ | |
1992 | if (*memory_access_type == VMAT_ELEMENTWISE | |
1993 | && !STMT_VINFO_STRIDED_P (stmt_info)) | |
1994 | { | |
1995 | if (dump_enabled_p ()) | |
1996 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1997 | "not falling back to elementwise accesses\n"); | |
1998 | return false; | |
1999 | } | |
2000 | return true; | |
2001 | } | |
2002 | ||
5ce9450f JJ |
2003 | /* Function vectorizable_mask_load_store. |
2004 | ||
2005 | Check if STMT performs a conditional load or store that can be vectorized. | |
2006 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
2007 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
2008 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
2009 | ||
2010 | static bool | |
355fe088 TS |
2011 | vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi, |
2012 | gimple **vec_stmt, slp_tree slp_node) | |
5ce9450f JJ |
2013 | { |
2014 | tree vec_dest = NULL; | |
2015 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
2016 | stmt_vec_info prev_stmt_info; | |
2017 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
2018 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2019 | bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); | |
2020 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
2021 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
57e2f6ad | 2022 | tree rhs_vectype = NULL_TREE; |
045c1278 | 2023 | tree mask_vectype; |
5ce9450f | 2024 | tree elem_type; |
355fe088 | 2025 | gimple *new_stmt; |
5ce9450f JJ |
2026 | tree dummy; |
2027 | tree dataref_ptr = NULL_TREE; | |
355fe088 | 2028 | gimple *ptr_incr; |
5ce9450f JJ |
2029 | int nunits = TYPE_VECTOR_SUBPARTS (vectype); |
2030 | int ncopies; | |
2031 | int i, j; | |
2032 | bool inv_p; | |
134c85ca | 2033 | gather_scatter_info gs_info; |
2de001ee | 2034 | vec_load_store_type vls_type; |
5ce9450f | 2035 | tree mask; |
355fe088 | 2036 | gimple *def_stmt; |
5ce9450f JJ |
2037 | enum vect_def_type dt; |
2038 | ||
2039 | if (slp_node != NULL) | |
2040 | return false; | |
2041 | ||
2042 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
2043 | gcc_assert (ncopies >= 1); | |
2044 | ||
5ce9450f | 2045 | mask = gimple_call_arg (stmt, 2); |
045c1278 | 2046 | |
2568d8a1 | 2047 | if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask))) |
5ce9450f JJ |
2048 | return false; |
2049 | ||
2050 | /* FORNOW. This restriction should be relaxed. */ | |
2051 | if (nested_in_vect_loop && ncopies > 1) | |
2052 | { | |
2053 | if (dump_enabled_p ()) | |
2054 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2055 | "multiple types in nested loop."); | |
2056 | return false; | |
2057 | } | |
2058 | ||
2059 | if (!STMT_VINFO_RELEVANT_P (stmt_info)) | |
2060 | return false; | |
2061 | ||
66c16fd9 RB |
2062 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
2063 | && ! vec_stmt) | |
5ce9450f JJ |
2064 | return false; |
2065 | ||
2066 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
2067 | return false; | |
2068 | ||
2069 | elem_type = TREE_TYPE (vectype); | |
2070 | ||
045c1278 IE |
2071 | if (TREE_CODE (mask) != SSA_NAME) |
2072 | return false; | |
2073 | ||
2074 | if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype)) | |
2075 | return false; | |
2076 | ||
2077 | if (!mask_vectype) | |
2078 | mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype)); | |
2079 | ||
dc6a3147 IE |
2080 | if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype) |
2081 | || TYPE_VECTOR_SUBPARTS (mask_vectype) != TYPE_VECTOR_SUBPARTS (vectype)) | |
045c1278 IE |
2082 | return false; |
2083 | ||
2de001ee | 2084 | if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE) |
57e2f6ad IE |
2085 | { |
2086 | tree rhs = gimple_call_arg (stmt, 3); | |
2087 | if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype)) | |
2088 | return false; | |
2de001ee RS |
2089 | if (dt == vect_constant_def || dt == vect_external_def) |
2090 | vls_type = VLS_STORE_INVARIANT; | |
2091 | else | |
2092 | vls_type = VLS_STORE; | |
57e2f6ad | 2093 | } |
2de001ee RS |
2094 | else |
2095 | vls_type = VLS_LOAD; | |
57e2f6ad | 2096 | |
2de001ee | 2097 | vect_memory_access_type memory_access_type; |
62da9e14 | 2098 | if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies, |
2de001ee RS |
2099 | &memory_access_type, &gs_info)) |
2100 | return false; | |
03b9e8e4 | 2101 | |
2de001ee RS |
2102 | if (memory_access_type == VMAT_GATHER_SCATTER) |
2103 | { | |
134c85ca | 2104 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
03b9e8e4 JJ |
2105 | tree masktype |
2106 | = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))); | |
2107 | if (TREE_CODE (masktype) == INTEGER_TYPE) | |
2108 | { | |
2109 | if (dump_enabled_p ()) | |
2110 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2111 | "masked gather with integer mask not supported."); | |
2112 | return false; | |
2113 | } | |
5ce9450f | 2114 | } |
2de001ee RS |
2115 | else if (memory_access_type != VMAT_CONTIGUOUS) |
2116 | { | |
2117 | if (dump_enabled_p ()) | |
2118 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2119 | "unsupported access type for masked %s.\n", | |
2120 | vls_type == VLS_LOAD ? "load" : "store"); | |
2121 | return false; | |
2122 | } | |
5ce9450f | 2123 | else if (!VECTOR_MODE_P (TYPE_MODE (vectype)) |
045c1278 IE |
2124 | || !can_vec_mask_load_store_p (TYPE_MODE (vectype), |
2125 | TYPE_MODE (mask_vectype), | |
2de001ee | 2126 | vls_type == VLS_LOAD) |
57e2f6ad IE |
2127 | || (rhs_vectype |
2128 | && !useless_type_conversion_p (vectype, rhs_vectype))) | |
5ce9450f JJ |
2129 | return false; |
2130 | ||
5ce9450f JJ |
2131 | if (!vec_stmt) /* transformation not required. */ |
2132 | { | |
2de001ee | 2133 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; |
5ce9450f | 2134 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; |
2de001ee RS |
2135 | if (vls_type == VLS_LOAD) |
2136 | vect_model_load_cost (stmt_info, ncopies, memory_access_type, | |
2137 | NULL, NULL, NULL); | |
5ce9450f | 2138 | else |
2de001ee RS |
2139 | vect_model_store_cost (stmt_info, ncopies, memory_access_type, |
2140 | dt, NULL, NULL, NULL); | |
5ce9450f JJ |
2141 | return true; |
2142 | } | |
2de001ee | 2143 | gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); |
5ce9450f | 2144 | |
67b8dbac | 2145 | /* Transform. */ |
5ce9450f | 2146 | |
2de001ee | 2147 | if (memory_access_type == VMAT_GATHER_SCATTER) |
5ce9450f JJ |
2148 | { |
2149 | tree vec_oprnd0 = NULL_TREE, op; | |
134c85ca | 2150 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
5ce9450f | 2151 | tree rettype, srctype, ptrtype, idxtype, masktype, scaletype; |
acdcd61b | 2152 | tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale; |
5ce9450f | 2153 | tree perm_mask = NULL_TREE, prev_res = NULL_TREE; |
acdcd61b | 2154 | tree mask_perm_mask = NULL_TREE; |
5ce9450f JJ |
2155 | edge pe = loop_preheader_edge (loop); |
2156 | gimple_seq seq; | |
2157 | basic_block new_bb; | |
2158 | enum { NARROW, NONE, WIDEN } modifier; | |
134c85ca | 2159 | int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); |
5ce9450f | 2160 | |
134c85ca | 2161 | rettype = TREE_TYPE (TREE_TYPE (gs_info.decl)); |
acdcd61b JJ |
2162 | srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); |
2163 | ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2164 | idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2165 | masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2166 | scaletype = TREE_VALUE (arglist); | |
2167 | gcc_checking_assert (types_compatible_p (srctype, rettype) | |
2168 | && types_compatible_p (srctype, masktype)); | |
2169 | ||
5ce9450f JJ |
2170 | if (nunits == gather_off_nunits) |
2171 | modifier = NONE; | |
2172 | else if (nunits == gather_off_nunits / 2) | |
2173 | { | |
2174 | unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits); | |
2175 | modifier = WIDEN; | |
2176 | ||
2177 | for (i = 0; i < gather_off_nunits; ++i) | |
2178 | sel[i] = i | nunits; | |
2179 | ||
134c85ca | 2180 | perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel); |
5ce9450f JJ |
2181 | } |
2182 | else if (nunits == gather_off_nunits * 2) | |
2183 | { | |
2184 | unsigned char *sel = XALLOCAVEC (unsigned char, nunits); | |
2185 | modifier = NARROW; | |
2186 | ||
2187 | for (i = 0; i < nunits; ++i) | |
2188 | sel[i] = i < gather_off_nunits | |
2189 | ? i : i + nunits - gather_off_nunits; | |
2190 | ||
557be5a8 | 2191 | perm_mask = vect_gen_perm_mask_checked (vectype, sel); |
5ce9450f | 2192 | ncopies *= 2; |
acdcd61b JJ |
2193 | for (i = 0; i < nunits; ++i) |
2194 | sel[i] = i | gather_off_nunits; | |
557be5a8 | 2195 | mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel); |
5ce9450f JJ |
2196 | } |
2197 | else | |
2198 | gcc_unreachable (); | |
2199 | ||
5ce9450f JJ |
2200 | vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype); |
2201 | ||
134c85ca | 2202 | ptr = fold_convert (ptrtype, gs_info.base); |
5ce9450f JJ |
2203 | if (!is_gimple_min_invariant (ptr)) |
2204 | { | |
2205 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
2206 | new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
2207 | gcc_assert (!new_bb); | |
2208 | } | |
2209 | ||
134c85ca | 2210 | scale = build_int_cst (scaletype, gs_info.scale); |
5ce9450f JJ |
2211 | |
2212 | prev_stmt_info = NULL; | |
2213 | for (j = 0; j < ncopies; ++j) | |
2214 | { | |
2215 | if (modifier == WIDEN && (j & 1)) | |
2216 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, | |
2217 | perm_mask, stmt, gsi); | |
2218 | else if (j == 0) | |
2219 | op = vec_oprnd0 | |
134c85ca | 2220 | = vect_get_vec_def_for_operand (gs_info.offset, stmt); |
5ce9450f JJ |
2221 | else |
2222 | op = vec_oprnd0 | |
134c85ca | 2223 | = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0); |
5ce9450f JJ |
2224 | |
2225 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
2226 | { | |
2227 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)) | |
2228 | == TYPE_VECTOR_SUBPARTS (idxtype)); | |
0e22bb5a | 2229 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); |
5ce9450f JJ |
2230 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); |
2231 | new_stmt | |
0d0e4a03 | 2232 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
5ce9450f JJ |
2233 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2234 | op = var; | |
2235 | } | |
2236 | ||
acdcd61b JJ |
2237 | if (mask_perm_mask && (j & 1)) |
2238 | mask_op = permute_vec_elements (mask_op, mask_op, | |
2239 | mask_perm_mask, stmt, gsi); | |
5ce9450f JJ |
2240 | else |
2241 | { | |
acdcd61b | 2242 | if (j == 0) |
81c40241 | 2243 | vec_mask = vect_get_vec_def_for_operand (mask, stmt); |
acdcd61b JJ |
2244 | else |
2245 | { | |
81c40241 | 2246 | vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt); |
acdcd61b JJ |
2247 | vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask); |
2248 | } | |
5ce9450f | 2249 | |
acdcd61b JJ |
2250 | mask_op = vec_mask; |
2251 | if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask))) | |
2252 | { | |
2253 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)) | |
2254 | == TYPE_VECTOR_SUBPARTS (masktype)); | |
0e22bb5a | 2255 | var = vect_get_new_ssa_name (masktype, vect_simple_var); |
acdcd61b JJ |
2256 | mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op); |
2257 | new_stmt | |
0d0e4a03 | 2258 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op); |
acdcd61b JJ |
2259 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2260 | mask_op = var; | |
2261 | } | |
5ce9450f JJ |
2262 | } |
2263 | ||
2264 | new_stmt | |
134c85ca | 2265 | = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op, |
5ce9450f JJ |
2266 | scale); |
2267 | ||
2268 | if (!useless_type_conversion_p (vectype, rettype)) | |
2269 | { | |
2270 | gcc_assert (TYPE_VECTOR_SUBPARTS (vectype) | |
2271 | == TYPE_VECTOR_SUBPARTS (rettype)); | |
0e22bb5a | 2272 | op = vect_get_new_ssa_name (rettype, vect_simple_var); |
5ce9450f JJ |
2273 | gimple_call_set_lhs (new_stmt, op); |
2274 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
b731b390 | 2275 | var = make_ssa_name (vec_dest); |
5ce9450f | 2276 | op = build1 (VIEW_CONVERT_EXPR, vectype, op); |
0d0e4a03 | 2277 | new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
5ce9450f JJ |
2278 | } |
2279 | else | |
2280 | { | |
2281 | var = make_ssa_name (vec_dest, new_stmt); | |
2282 | gimple_call_set_lhs (new_stmt, var); | |
2283 | } | |
2284 | ||
2285 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2286 | ||
2287 | if (modifier == NARROW) | |
2288 | { | |
2289 | if ((j & 1) == 0) | |
2290 | { | |
2291 | prev_res = var; | |
2292 | continue; | |
2293 | } | |
2294 | var = permute_vec_elements (prev_res, var, | |
2295 | perm_mask, stmt, gsi); | |
2296 | new_stmt = SSA_NAME_DEF_STMT (var); | |
2297 | } | |
2298 | ||
2299 | if (prev_stmt_info == NULL) | |
2300 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
2301 | else | |
2302 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2303 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2304 | } | |
3efe2e2c JJ |
2305 | |
2306 | /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed | |
2307 | from the IL. */ | |
e6f5c25d IE |
2308 | if (STMT_VINFO_RELATED_STMT (stmt_info)) |
2309 | { | |
2310 | stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
2311 | stmt_info = vinfo_for_stmt (stmt); | |
2312 | } | |
3efe2e2c JJ |
2313 | tree lhs = gimple_call_lhs (stmt); |
2314 | new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); | |
2315 | set_vinfo_for_stmt (new_stmt, stmt_info); | |
2316 | set_vinfo_for_stmt (stmt, NULL); | |
2317 | STMT_VINFO_STMT (stmt_info) = new_stmt; | |
2318 | gsi_replace (gsi, new_stmt, true); | |
5ce9450f JJ |
2319 | return true; |
2320 | } | |
2de001ee | 2321 | else if (vls_type != VLS_LOAD) |
5ce9450f JJ |
2322 | { |
2323 | tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE; | |
2324 | prev_stmt_info = NULL; | |
2d4dc223 | 2325 | LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true; |
5ce9450f JJ |
2326 | for (i = 0; i < ncopies; i++) |
2327 | { | |
2328 | unsigned align, misalign; | |
2329 | ||
2330 | if (i == 0) | |
2331 | { | |
2332 | tree rhs = gimple_call_arg (stmt, 3); | |
81c40241 RB |
2333 | vec_rhs = vect_get_vec_def_for_operand (rhs, stmt); |
2334 | vec_mask = vect_get_vec_def_for_operand (mask, stmt); | |
5ce9450f JJ |
2335 | /* We should have catched mismatched types earlier. */ |
2336 | gcc_assert (useless_type_conversion_p (vectype, | |
2337 | TREE_TYPE (vec_rhs))); | |
2338 | dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL, | |
2339 | NULL_TREE, &dummy, gsi, | |
2340 | &ptr_incr, false, &inv_p); | |
2341 | gcc_assert (!inv_p); | |
2342 | } | |
2343 | else | |
2344 | { | |
81c40241 | 2345 | vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt); |
5ce9450f | 2346 | vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs); |
81c40241 | 2347 | vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt); |
5ce9450f JJ |
2348 | vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask); |
2349 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, | |
2350 | TYPE_SIZE_UNIT (vectype)); | |
2351 | } | |
2352 | ||
2353 | align = TYPE_ALIGN_UNIT (vectype); | |
2354 | if (aligned_access_p (dr)) | |
2355 | misalign = 0; | |
2356 | else if (DR_MISALIGNMENT (dr) == -1) | |
2357 | { | |
2358 | align = TYPE_ALIGN_UNIT (elem_type); | |
2359 | misalign = 0; | |
2360 | } | |
2361 | else | |
2362 | misalign = DR_MISALIGNMENT (dr); | |
2363 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, | |
2364 | misalign); | |
08554c26 | 2365 | tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), |
146ec50f | 2366 | misalign ? least_bit_hwi (misalign) : align); |
a844293d | 2367 | gcall *call |
5ce9450f | 2368 | = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr, |
08554c26 | 2369 | ptr, vec_mask, vec_rhs); |
a844293d RS |
2370 | gimple_call_set_nothrow (call, true); |
2371 | new_stmt = call; | |
5ce9450f JJ |
2372 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2373 | if (i == 0) | |
2374 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
2375 | else | |
2376 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2377 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2378 | } | |
2379 | } | |
2380 | else | |
2381 | { | |
2382 | tree vec_mask = NULL_TREE; | |
2383 | prev_stmt_info = NULL; | |
2384 | vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype); | |
2385 | for (i = 0; i < ncopies; i++) | |
2386 | { | |
2387 | unsigned align, misalign; | |
2388 | ||
2389 | if (i == 0) | |
2390 | { | |
81c40241 | 2391 | vec_mask = vect_get_vec_def_for_operand (mask, stmt); |
5ce9450f JJ |
2392 | dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL, |
2393 | NULL_TREE, &dummy, gsi, | |
2394 | &ptr_incr, false, &inv_p); | |
2395 | gcc_assert (!inv_p); | |
2396 | } | |
2397 | else | |
2398 | { | |
81c40241 | 2399 | vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt); |
5ce9450f JJ |
2400 | vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask); |
2401 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, | |
2402 | TYPE_SIZE_UNIT (vectype)); | |
2403 | } | |
2404 | ||
2405 | align = TYPE_ALIGN_UNIT (vectype); | |
2406 | if (aligned_access_p (dr)) | |
2407 | misalign = 0; | |
2408 | else if (DR_MISALIGNMENT (dr) == -1) | |
2409 | { | |
2410 | align = TYPE_ALIGN_UNIT (elem_type); | |
2411 | misalign = 0; | |
2412 | } | |
2413 | else | |
2414 | misalign = DR_MISALIGNMENT (dr); | |
2415 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, | |
2416 | misalign); | |
08554c26 | 2417 | tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), |
146ec50f | 2418 | misalign ? least_bit_hwi (misalign) : align); |
a844293d | 2419 | gcall *call |
5ce9450f | 2420 | = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr, |
08554c26 | 2421 | ptr, vec_mask); |
a844293d RS |
2422 | gimple_call_set_lhs (call, make_ssa_name (vec_dest)); |
2423 | gimple_call_set_nothrow (call, true); | |
2424 | vect_finish_stmt_generation (stmt, call, gsi); | |
5ce9450f | 2425 | if (i == 0) |
a844293d | 2426 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call; |
5ce9450f | 2427 | else |
a844293d RS |
2428 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = call; |
2429 | prev_stmt_info = vinfo_for_stmt (call); | |
5ce9450f JJ |
2430 | } |
2431 | } | |
2432 | ||
2de001ee | 2433 | if (vls_type == VLS_LOAD) |
3efe2e2c JJ |
2434 | { |
2435 | /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed | |
2436 | from the IL. */ | |
e6f5c25d IE |
2437 | if (STMT_VINFO_RELATED_STMT (stmt_info)) |
2438 | { | |
2439 | stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
2440 | stmt_info = vinfo_for_stmt (stmt); | |
2441 | } | |
3efe2e2c JJ |
2442 | tree lhs = gimple_call_lhs (stmt); |
2443 | new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); | |
2444 | set_vinfo_for_stmt (new_stmt, stmt_info); | |
2445 | set_vinfo_for_stmt (stmt, NULL); | |
2446 | STMT_VINFO_STMT (stmt_info) = new_stmt; | |
2447 | gsi_replace (gsi, new_stmt, true); | |
2448 | } | |
2449 | ||
5ce9450f JJ |
2450 | return true; |
2451 | } | |
2452 | ||
37b14185 RB |
2453 | /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */ |
2454 | ||
2455 | static bool | |
2456 | vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi, | |
2457 | gimple **vec_stmt, slp_tree slp_node, | |
2458 | tree vectype_in, enum vect_def_type *dt) | |
2459 | { | |
2460 | tree op, vectype; | |
2461 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
2462 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
2463 | unsigned ncopies, nunits; | |
2464 | ||
2465 | op = gimple_call_arg (stmt, 0); | |
2466 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2467 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
2468 | ||
2469 | /* Multiple types in SLP are handled by creating the appropriate number of | |
2470 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
2471 | case of SLP. */ | |
2472 | if (slp_node) | |
2473 | ncopies = 1; | |
2474 | else | |
2475 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
2476 | ||
2477 | gcc_assert (ncopies >= 1); | |
2478 | ||
2479 | tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in); | |
2480 | if (! char_vectype) | |
2481 | return false; | |
2482 | ||
2483 | unsigned char *elts | |
2484 | = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (char_vectype)); | |
2485 | unsigned char *elt = elts; | |
2486 | unsigned word_bytes = TYPE_VECTOR_SUBPARTS (char_vectype) / nunits; | |
2487 | for (unsigned i = 0; i < nunits; ++i) | |
2488 | for (unsigned j = 0; j < word_bytes; ++j) | |
2489 | *elt++ = (i + 1) * word_bytes - j - 1; | |
2490 | ||
2491 | if (! can_vec_perm_p (TYPE_MODE (char_vectype), false, elts)) | |
2492 | return false; | |
2493 | ||
2494 | if (! vec_stmt) | |
2495 | { | |
2496 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; | |
2497 | if (dump_enabled_p ()) | |
2498 | dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ===" | |
2499 | "\n"); | |
2500 | if (! PURE_SLP_STMT (stmt_info)) | |
2501 | { | |
2502 | add_stmt_cost (stmt_info->vinfo->target_cost_data, | |
2503 | 1, vector_stmt, stmt_info, 0, vect_prologue); | |
2504 | add_stmt_cost (stmt_info->vinfo->target_cost_data, | |
2505 | ncopies, vec_perm, stmt_info, 0, vect_body); | |
2506 | } | |
2507 | return true; | |
2508 | } | |
2509 | ||
2510 | tree *telts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (char_vectype)); | |
2511 | for (unsigned i = 0; i < TYPE_VECTOR_SUBPARTS (char_vectype); ++i) | |
2512 | telts[i] = build_int_cst (char_type_node, elts[i]); | |
2513 | tree bswap_vconst = build_vector (char_vectype, telts); | |
2514 | ||
2515 | /* Transform. */ | |
2516 | vec<tree> vec_oprnds = vNULL; | |
2517 | gimple *new_stmt = NULL; | |
2518 | stmt_vec_info prev_stmt_info = NULL; | |
2519 | for (unsigned j = 0; j < ncopies; j++) | |
2520 | { | |
2521 | /* Handle uses. */ | |
2522 | if (j == 0) | |
306b0c92 | 2523 | vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node); |
37b14185 RB |
2524 | else |
2525 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL); | |
2526 | ||
2527 | /* Arguments are ready. create the new vector stmt. */ | |
2528 | unsigned i; | |
2529 | tree vop; | |
2530 | FOR_EACH_VEC_ELT (vec_oprnds, i, vop) | |
2531 | { | |
2532 | tree tem = make_ssa_name (char_vectype); | |
2533 | new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
2534 | char_vectype, vop)); | |
2535 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2536 | tree tem2 = make_ssa_name (char_vectype); | |
2537 | new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR, | |
2538 | tem, tem, bswap_vconst); | |
2539 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2540 | tem = make_ssa_name (vectype); | |
2541 | new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
2542 | vectype, tem2)); | |
2543 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
2544 | if (slp_node) | |
2545 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); | |
2546 | } | |
2547 | ||
2548 | if (slp_node) | |
2549 | continue; | |
2550 | ||
2551 | if (j == 0) | |
2552 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
2553 | else | |
2554 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2555 | ||
2556 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2557 | } | |
2558 | ||
2559 | vec_oprnds.release (); | |
2560 | return true; | |
2561 | } | |
2562 | ||
b1b6836e RS |
2563 | /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have |
2564 | integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT | |
2565 | in a single step. On success, store the binary pack code in | |
2566 | *CONVERT_CODE. */ | |
2567 | ||
2568 | static bool | |
2569 | simple_integer_narrowing (tree vectype_out, tree vectype_in, | |
2570 | tree_code *convert_code) | |
2571 | { | |
2572 | if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out)) | |
2573 | || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in))) | |
2574 | return false; | |
2575 | ||
2576 | tree_code code; | |
2577 | int multi_step_cvt = 0; | |
2578 | auto_vec <tree, 8> interm_types; | |
2579 | if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in, | |
2580 | &code, &multi_step_cvt, | |
2581 | &interm_types) | |
2582 | || multi_step_cvt) | |
2583 | return false; | |
2584 | ||
2585 | *convert_code = code; | |
2586 | return true; | |
2587 | } | |
5ce9450f | 2588 | |
ebfd146a IR |
2589 | /* Function vectorizable_call. |
2590 | ||
538dd0b7 | 2591 | Check if GS performs a function call that can be vectorized. |
b8698a0f | 2592 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized |
ebfd146a IR |
2593 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
2594 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
2595 | ||
2596 | static bool | |
355fe088 | 2597 | vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt, |
190c2236 | 2598 | slp_tree slp_node) |
ebfd146a | 2599 | { |
538dd0b7 | 2600 | gcall *stmt; |
ebfd146a IR |
2601 | tree vec_dest; |
2602 | tree scalar_dest; | |
2603 | tree op, type; | |
2604 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; | |
538dd0b7 | 2605 | stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info; |
ebfd146a IR |
2606 | tree vectype_out, vectype_in; |
2607 | int nunits_in; | |
2608 | int nunits_out; | |
2609 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
190c2236 | 2610 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 2611 | vec_info *vinfo = stmt_info->vinfo; |
81c40241 | 2612 | tree fndecl, new_temp, rhs_type; |
355fe088 | 2613 | gimple *def_stmt; |
0502fb85 UB |
2614 | enum vect_def_type dt[3] |
2615 | = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 2616 | int ndts = 3; |
355fe088 | 2617 | gimple *new_stmt = NULL; |
ebfd146a | 2618 | int ncopies, j; |
6e1aa848 | 2619 | vec<tree> vargs = vNULL; |
ebfd146a IR |
2620 | enum { NARROW, NONE, WIDEN } modifier; |
2621 | size_t i, nargs; | |
9d5e7640 | 2622 | tree lhs; |
ebfd146a | 2623 | |
190c2236 | 2624 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
2625 | return false; |
2626 | ||
66c16fd9 RB |
2627 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
2628 | && ! vec_stmt) | |
ebfd146a IR |
2629 | return false; |
2630 | ||
538dd0b7 DM |
2631 | /* Is GS a vectorizable call? */ |
2632 | stmt = dyn_cast <gcall *> (gs); | |
2633 | if (!stmt) | |
ebfd146a IR |
2634 | return false; |
2635 | ||
5ce9450f JJ |
2636 | if (gimple_call_internal_p (stmt) |
2637 | && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD | |
2638 | || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)) | |
2639 | return vectorizable_mask_load_store (stmt, gsi, vec_stmt, | |
2640 | slp_node); | |
2641 | ||
0136f8f0 AH |
2642 | if (gimple_call_lhs (stmt) == NULL_TREE |
2643 | || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) | |
ebfd146a IR |
2644 | return false; |
2645 | ||
0136f8f0 | 2646 | gcc_checking_assert (!stmt_can_throw_internal (stmt)); |
5a2c1986 | 2647 | |
b690cc0f RG |
2648 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
2649 | ||
ebfd146a IR |
2650 | /* Process function arguments. */ |
2651 | rhs_type = NULL_TREE; | |
b690cc0f | 2652 | vectype_in = NULL_TREE; |
ebfd146a IR |
2653 | nargs = gimple_call_num_args (stmt); |
2654 | ||
1b1562a5 MM |
2655 | /* Bail out if the function has more than three arguments, we do not have |
2656 | interesting builtin functions to vectorize with more than two arguments | |
2657 | except for fma. No arguments is also not good. */ | |
2658 | if (nargs == 0 || nargs > 3) | |
ebfd146a IR |
2659 | return false; |
2660 | ||
74bf76ed JJ |
2661 | /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */ |
2662 | if (gimple_call_internal_p (stmt) | |
2663 | && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE) | |
2664 | { | |
2665 | nargs = 0; | |
2666 | rhs_type = unsigned_type_node; | |
2667 | } | |
2668 | ||
ebfd146a IR |
2669 | for (i = 0; i < nargs; i++) |
2670 | { | |
b690cc0f RG |
2671 | tree opvectype; |
2672 | ||
ebfd146a IR |
2673 | op = gimple_call_arg (stmt, i); |
2674 | ||
2675 | /* We can only handle calls with arguments of the same type. */ | |
2676 | if (rhs_type | |
8533c9d8 | 2677 | && !types_compatible_p (rhs_type, TREE_TYPE (op))) |
ebfd146a | 2678 | { |
73fbfcad | 2679 | if (dump_enabled_p ()) |
78c60e3d | 2680 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 2681 | "argument types differ.\n"); |
ebfd146a IR |
2682 | return false; |
2683 | } | |
b690cc0f RG |
2684 | if (!rhs_type) |
2685 | rhs_type = TREE_TYPE (op); | |
ebfd146a | 2686 | |
81c40241 | 2687 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype)) |
ebfd146a | 2688 | { |
73fbfcad | 2689 | if (dump_enabled_p ()) |
78c60e3d | 2690 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 2691 | "use not simple.\n"); |
ebfd146a IR |
2692 | return false; |
2693 | } | |
ebfd146a | 2694 | |
b690cc0f RG |
2695 | if (!vectype_in) |
2696 | vectype_in = opvectype; | |
2697 | else if (opvectype | |
2698 | && opvectype != vectype_in) | |
2699 | { | |
73fbfcad | 2700 | if (dump_enabled_p ()) |
78c60e3d | 2701 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 2702 | "argument vector types differ.\n"); |
b690cc0f RG |
2703 | return false; |
2704 | } | |
2705 | } | |
2706 | /* If all arguments are external or constant defs use a vector type with | |
2707 | the same size as the output vector type. */ | |
ebfd146a | 2708 | if (!vectype_in) |
b690cc0f | 2709 | vectype_in = get_same_sized_vectype (rhs_type, vectype_out); |
7d8930a0 IR |
2710 | if (vec_stmt) |
2711 | gcc_assert (vectype_in); | |
2712 | if (!vectype_in) | |
2713 | { | |
73fbfcad | 2714 | if (dump_enabled_p ()) |
7d8930a0 | 2715 | { |
78c60e3d SS |
2716 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2717 | "no vectype for scalar type "); | |
2718 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
e645e942 | 2719 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
7d8930a0 IR |
2720 | } |
2721 | ||
2722 | return false; | |
2723 | } | |
ebfd146a IR |
2724 | |
2725 | /* FORNOW */ | |
b690cc0f RG |
2726 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); |
2727 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
ebfd146a IR |
2728 | if (nunits_in == nunits_out / 2) |
2729 | modifier = NARROW; | |
2730 | else if (nunits_out == nunits_in) | |
2731 | modifier = NONE; | |
2732 | else if (nunits_out == nunits_in / 2) | |
2733 | modifier = WIDEN; | |
2734 | else | |
2735 | return false; | |
2736 | ||
70439f0d RS |
2737 | /* We only handle functions that do not read or clobber memory. */ |
2738 | if (gimple_vuse (stmt)) | |
2739 | { | |
2740 | if (dump_enabled_p ()) | |
2741 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2742 | "function reads from or writes to memory.\n"); | |
2743 | return false; | |
2744 | } | |
2745 | ||
ebfd146a IR |
2746 | /* For now, we only vectorize functions if a target specific builtin |
2747 | is available. TODO -- in some cases, it might be profitable to | |
2748 | insert the calls for pieces of the vector, in order to be able | |
2749 | to vectorize other operations in the loop. */ | |
70439f0d RS |
2750 | fndecl = NULL_TREE; |
2751 | internal_fn ifn = IFN_LAST; | |
2752 | combined_fn cfn = gimple_call_combined_fn (stmt); | |
2753 | tree callee = gimple_call_fndecl (stmt); | |
2754 | ||
2755 | /* First try using an internal function. */ | |
b1b6836e RS |
2756 | tree_code convert_code = ERROR_MARK; |
2757 | if (cfn != CFN_LAST | |
2758 | && (modifier == NONE | |
2759 | || (modifier == NARROW | |
2760 | && simple_integer_narrowing (vectype_out, vectype_in, | |
2761 | &convert_code)))) | |
70439f0d RS |
2762 | ifn = vectorizable_internal_function (cfn, callee, vectype_out, |
2763 | vectype_in); | |
2764 | ||
2765 | /* If that fails, try asking for a target-specific built-in function. */ | |
2766 | if (ifn == IFN_LAST) | |
2767 | { | |
2768 | if (cfn != CFN_LAST) | |
2769 | fndecl = targetm.vectorize.builtin_vectorized_function | |
2770 | (cfn, vectype_out, vectype_in); | |
2771 | else | |
2772 | fndecl = targetm.vectorize.builtin_md_vectorized_function | |
2773 | (callee, vectype_out, vectype_in); | |
2774 | } | |
2775 | ||
2776 | if (ifn == IFN_LAST && !fndecl) | |
ebfd146a | 2777 | { |
70439f0d | 2778 | if (cfn == CFN_GOMP_SIMD_LANE |
74bf76ed JJ |
2779 | && !slp_node |
2780 | && loop_vinfo | |
2781 | && LOOP_VINFO_LOOP (loop_vinfo)->simduid | |
2782 | && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME | |
2783 | && LOOP_VINFO_LOOP (loop_vinfo)->simduid | |
2784 | == SSA_NAME_VAR (gimple_call_arg (stmt, 0))) | |
2785 | { | |
2786 | /* We can handle IFN_GOMP_SIMD_LANE by returning a | |
2787 | { 0, 1, 2, ... vf - 1 } vector. */ | |
2788 | gcc_assert (nargs == 0); | |
2789 | } | |
37b14185 RB |
2790 | else if (modifier == NONE |
2791 | && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16) | |
2792 | || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32) | |
2793 | || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64))) | |
2794 | return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node, | |
2795 | vectype_in, dt); | |
74bf76ed JJ |
2796 | else |
2797 | { | |
2798 | if (dump_enabled_p ()) | |
2799 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
e645e942 | 2800 | "function is not vectorizable.\n"); |
74bf76ed JJ |
2801 | return false; |
2802 | } | |
ebfd146a IR |
2803 | } |
2804 | ||
fce57248 | 2805 | if (slp_node) |
190c2236 | 2806 | ncopies = 1; |
b1b6836e | 2807 | else if (modifier == NARROW && ifn == IFN_LAST) |
ebfd146a IR |
2808 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; |
2809 | else | |
2810 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; | |
2811 | ||
2812 | /* Sanity check: make sure that at least one copy of the vectorized stmt | |
2813 | needs to be generated. */ | |
2814 | gcc_assert (ncopies >= 1); | |
2815 | ||
2816 | if (!vec_stmt) /* transformation not required. */ | |
2817 | { | |
2818 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; | |
73fbfcad | 2819 | if (dump_enabled_p ()) |
e645e942 TJ |
2820 | dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ===" |
2821 | "\n"); | |
4fc5ebf1 | 2822 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
b1b6836e RS |
2823 | if (ifn != IFN_LAST && modifier == NARROW && !slp_node) |
2824 | add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2, | |
2825 | vec_promote_demote, stmt_info, 0, vect_body); | |
2826 | ||
ebfd146a IR |
2827 | return true; |
2828 | } | |
2829 | ||
67b8dbac | 2830 | /* Transform. */ |
ebfd146a | 2831 | |
73fbfcad | 2832 | if (dump_enabled_p ()) |
e645e942 | 2833 | dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n"); |
ebfd146a IR |
2834 | |
2835 | /* Handle def. */ | |
2836 | scalar_dest = gimple_call_lhs (stmt); | |
2837 | vec_dest = vect_create_destination_var (scalar_dest, vectype_out); | |
2838 | ||
2839 | prev_stmt_info = NULL; | |
b1b6836e | 2840 | if (modifier == NONE || ifn != IFN_LAST) |
ebfd146a | 2841 | { |
b1b6836e | 2842 | tree prev_res = NULL_TREE; |
ebfd146a IR |
2843 | for (j = 0; j < ncopies; ++j) |
2844 | { | |
2845 | /* Build argument list for the vectorized call. */ | |
2846 | if (j == 0) | |
9771b263 | 2847 | vargs.create (nargs); |
ebfd146a | 2848 | else |
9771b263 | 2849 | vargs.truncate (0); |
ebfd146a | 2850 | |
190c2236 JJ |
2851 | if (slp_node) |
2852 | { | |
ef062b13 | 2853 | auto_vec<vec<tree> > vec_defs (nargs); |
9771b263 | 2854 | vec<tree> vec_oprnds0; |
190c2236 JJ |
2855 | |
2856 | for (i = 0; i < nargs; i++) | |
9771b263 | 2857 | vargs.quick_push (gimple_call_arg (stmt, i)); |
306b0c92 | 2858 | vect_get_slp_defs (vargs, slp_node, &vec_defs); |
37b5ec8f | 2859 | vec_oprnds0 = vec_defs[0]; |
190c2236 JJ |
2860 | |
2861 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 2862 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0) |
190c2236 JJ |
2863 | { |
2864 | size_t k; | |
2865 | for (k = 0; k < nargs; k++) | |
2866 | { | |
37b5ec8f | 2867 | vec<tree> vec_oprndsk = vec_defs[k]; |
9771b263 | 2868 | vargs[k] = vec_oprndsk[i]; |
190c2236 | 2869 | } |
b1b6836e RS |
2870 | if (modifier == NARROW) |
2871 | { | |
2872 | tree half_res = make_ssa_name (vectype_in); | |
a844293d RS |
2873 | gcall *call |
2874 | = gimple_build_call_internal_vec (ifn, vargs); | |
2875 | gimple_call_set_lhs (call, half_res); | |
2876 | gimple_call_set_nothrow (call, true); | |
2877 | new_stmt = call; | |
b1b6836e RS |
2878 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2879 | if ((i & 1) == 0) | |
2880 | { | |
2881 | prev_res = half_res; | |
2882 | continue; | |
2883 | } | |
2884 | new_temp = make_ssa_name (vec_dest); | |
2885 | new_stmt = gimple_build_assign (new_temp, convert_code, | |
2886 | prev_res, half_res); | |
2887 | } | |
70439f0d | 2888 | else |
b1b6836e | 2889 | { |
a844293d | 2890 | gcall *call; |
b1b6836e | 2891 | if (ifn != IFN_LAST) |
a844293d | 2892 | call = gimple_build_call_internal_vec (ifn, vargs); |
b1b6836e | 2893 | else |
a844293d RS |
2894 | call = gimple_build_call_vec (fndecl, vargs); |
2895 | new_temp = make_ssa_name (vec_dest, call); | |
2896 | gimple_call_set_lhs (call, new_temp); | |
2897 | gimple_call_set_nothrow (call, true); | |
2898 | new_stmt = call; | |
b1b6836e | 2899 | } |
190c2236 | 2900 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
9771b263 | 2901 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
190c2236 JJ |
2902 | } |
2903 | ||
2904 | for (i = 0; i < nargs; i++) | |
2905 | { | |
37b5ec8f | 2906 | vec<tree> vec_oprndsi = vec_defs[i]; |
9771b263 | 2907 | vec_oprndsi.release (); |
190c2236 | 2908 | } |
190c2236 JJ |
2909 | continue; |
2910 | } | |
2911 | ||
ebfd146a IR |
2912 | for (i = 0; i < nargs; i++) |
2913 | { | |
2914 | op = gimple_call_arg (stmt, i); | |
2915 | if (j == 0) | |
2916 | vec_oprnd0 | |
81c40241 | 2917 | = vect_get_vec_def_for_operand (op, stmt); |
ebfd146a | 2918 | else |
63827fb8 IR |
2919 | { |
2920 | vec_oprnd0 = gimple_call_arg (new_stmt, i); | |
2921 | vec_oprnd0 | |
2922 | = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); | |
2923 | } | |
ebfd146a | 2924 | |
9771b263 | 2925 | vargs.quick_push (vec_oprnd0); |
ebfd146a IR |
2926 | } |
2927 | ||
74bf76ed JJ |
2928 | if (gimple_call_internal_p (stmt) |
2929 | && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE) | |
2930 | { | |
2931 | tree *v = XALLOCAVEC (tree, nunits_out); | |
2932 | int k; | |
2933 | for (k = 0; k < nunits_out; ++k) | |
2934 | v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k); | |
2935 | tree cst = build_vector (vectype_out, v); | |
2936 | tree new_var | |
0e22bb5a | 2937 | = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_"); |
355fe088 | 2938 | gimple *init_stmt = gimple_build_assign (new_var, cst); |
74bf76ed | 2939 | vect_init_vector_1 (stmt, init_stmt, NULL); |
b731b390 | 2940 | new_temp = make_ssa_name (vec_dest); |
0e22bb5a | 2941 | new_stmt = gimple_build_assign (new_temp, new_var); |
74bf76ed | 2942 | } |
b1b6836e RS |
2943 | else if (modifier == NARROW) |
2944 | { | |
2945 | tree half_res = make_ssa_name (vectype_in); | |
a844293d RS |
2946 | gcall *call = gimple_build_call_internal_vec (ifn, vargs); |
2947 | gimple_call_set_lhs (call, half_res); | |
2948 | gimple_call_set_nothrow (call, true); | |
2949 | new_stmt = call; | |
b1b6836e RS |
2950 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2951 | if ((j & 1) == 0) | |
2952 | { | |
2953 | prev_res = half_res; | |
2954 | continue; | |
2955 | } | |
2956 | new_temp = make_ssa_name (vec_dest); | |
2957 | new_stmt = gimple_build_assign (new_temp, convert_code, | |
2958 | prev_res, half_res); | |
2959 | } | |
74bf76ed JJ |
2960 | else |
2961 | { | |
a844293d | 2962 | gcall *call; |
70439f0d | 2963 | if (ifn != IFN_LAST) |
a844293d | 2964 | call = gimple_build_call_internal_vec (ifn, vargs); |
70439f0d | 2965 | else |
a844293d | 2966 | call = gimple_build_call_vec (fndecl, vargs); |
74bf76ed | 2967 | new_temp = make_ssa_name (vec_dest, new_stmt); |
a844293d RS |
2968 | gimple_call_set_lhs (call, new_temp); |
2969 | gimple_call_set_nothrow (call, true); | |
2970 | new_stmt = call; | |
74bf76ed | 2971 | } |
ebfd146a IR |
2972 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
2973 | ||
b1b6836e | 2974 | if (j == (modifier == NARROW ? 1 : 0)) |
ebfd146a IR |
2975 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; |
2976 | else | |
2977 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
2978 | ||
2979 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
2980 | } | |
b1b6836e RS |
2981 | } |
2982 | else if (modifier == NARROW) | |
2983 | { | |
ebfd146a IR |
2984 | for (j = 0; j < ncopies; ++j) |
2985 | { | |
2986 | /* Build argument list for the vectorized call. */ | |
2987 | if (j == 0) | |
9771b263 | 2988 | vargs.create (nargs * 2); |
ebfd146a | 2989 | else |
9771b263 | 2990 | vargs.truncate (0); |
ebfd146a | 2991 | |
190c2236 JJ |
2992 | if (slp_node) |
2993 | { | |
ef062b13 | 2994 | auto_vec<vec<tree> > vec_defs (nargs); |
9771b263 | 2995 | vec<tree> vec_oprnds0; |
190c2236 JJ |
2996 | |
2997 | for (i = 0; i < nargs; i++) | |
9771b263 | 2998 | vargs.quick_push (gimple_call_arg (stmt, i)); |
306b0c92 | 2999 | vect_get_slp_defs (vargs, slp_node, &vec_defs); |
37b5ec8f | 3000 | vec_oprnds0 = vec_defs[0]; |
190c2236 JJ |
3001 | |
3002 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 3003 | for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2) |
190c2236 JJ |
3004 | { |
3005 | size_t k; | |
9771b263 | 3006 | vargs.truncate (0); |
190c2236 JJ |
3007 | for (k = 0; k < nargs; k++) |
3008 | { | |
37b5ec8f | 3009 | vec<tree> vec_oprndsk = vec_defs[k]; |
9771b263 DN |
3010 | vargs.quick_push (vec_oprndsk[i]); |
3011 | vargs.quick_push (vec_oprndsk[i + 1]); | |
190c2236 | 3012 | } |
a844293d | 3013 | gcall *call; |
70439f0d | 3014 | if (ifn != IFN_LAST) |
a844293d | 3015 | call = gimple_build_call_internal_vec (ifn, vargs); |
70439f0d | 3016 | else |
a844293d RS |
3017 | call = gimple_build_call_vec (fndecl, vargs); |
3018 | new_temp = make_ssa_name (vec_dest, call); | |
3019 | gimple_call_set_lhs (call, new_temp); | |
3020 | gimple_call_set_nothrow (call, true); | |
3021 | new_stmt = call; | |
190c2236 | 3022 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
9771b263 | 3023 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
190c2236 JJ |
3024 | } |
3025 | ||
3026 | for (i = 0; i < nargs; i++) | |
3027 | { | |
37b5ec8f | 3028 | vec<tree> vec_oprndsi = vec_defs[i]; |
9771b263 | 3029 | vec_oprndsi.release (); |
190c2236 | 3030 | } |
190c2236 JJ |
3031 | continue; |
3032 | } | |
3033 | ||
ebfd146a IR |
3034 | for (i = 0; i < nargs; i++) |
3035 | { | |
3036 | op = gimple_call_arg (stmt, i); | |
3037 | if (j == 0) | |
3038 | { | |
3039 | vec_oprnd0 | |
81c40241 | 3040 | = vect_get_vec_def_for_operand (op, stmt); |
ebfd146a | 3041 | vec_oprnd1 |
63827fb8 | 3042 | = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); |
ebfd146a IR |
3043 | } |
3044 | else | |
3045 | { | |
336ecb65 | 3046 | vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1); |
ebfd146a | 3047 | vec_oprnd0 |
63827fb8 | 3048 | = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1); |
ebfd146a | 3049 | vec_oprnd1 |
63827fb8 | 3050 | = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); |
ebfd146a IR |
3051 | } |
3052 | ||
9771b263 DN |
3053 | vargs.quick_push (vec_oprnd0); |
3054 | vargs.quick_push (vec_oprnd1); | |
ebfd146a IR |
3055 | } |
3056 | ||
b1b6836e | 3057 | new_stmt = gimple_build_call_vec (fndecl, vargs); |
ebfd146a IR |
3058 | new_temp = make_ssa_name (vec_dest, new_stmt); |
3059 | gimple_call_set_lhs (new_stmt, new_temp); | |
ebfd146a IR |
3060 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3061 | ||
3062 | if (j == 0) | |
3063 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; | |
3064 | else | |
3065 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
3066 | ||
3067 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
3068 | } | |
3069 | ||
3070 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
ebfd146a | 3071 | } |
b1b6836e RS |
3072 | else |
3073 | /* No current target implements this case. */ | |
3074 | return false; | |
ebfd146a | 3075 | |
9771b263 | 3076 | vargs.release (); |
ebfd146a | 3077 | |
ebfd146a IR |
3078 | /* The call in STMT might prevent it from being removed in dce. |
3079 | We however cannot remove it here, due to the way the ssa name | |
3080 | it defines is mapped to the new definition. So just replace | |
3081 | rhs of the statement with something harmless. */ | |
3082 | ||
dd34c087 JJ |
3083 | if (slp_node) |
3084 | return true; | |
3085 | ||
ebfd146a | 3086 | type = TREE_TYPE (scalar_dest); |
9d5e7640 IR |
3087 | if (is_pattern_stmt_p (stmt_info)) |
3088 | lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info)); | |
3089 | else | |
3090 | lhs = gimple_call_lhs (stmt); | |
3cc2fa2a | 3091 | |
9d5e7640 | 3092 | new_stmt = gimple_build_assign (lhs, build_zero_cst (type)); |
ebfd146a | 3093 | set_vinfo_for_stmt (new_stmt, stmt_info); |
dd34c087 | 3094 | set_vinfo_for_stmt (stmt, NULL); |
ebfd146a IR |
3095 | STMT_VINFO_STMT (stmt_info) = new_stmt; |
3096 | gsi_replace (gsi, new_stmt, false); | |
ebfd146a IR |
3097 | |
3098 | return true; | |
3099 | } | |
3100 | ||
3101 | ||
0136f8f0 AH |
3102 | struct simd_call_arg_info |
3103 | { | |
3104 | tree vectype; | |
3105 | tree op; | |
0136f8f0 | 3106 | HOST_WIDE_INT linear_step; |
34e82342 | 3107 | enum vect_def_type dt; |
0136f8f0 | 3108 | unsigned int align; |
17b658af | 3109 | bool simd_lane_linear; |
0136f8f0 AH |
3110 | }; |
3111 | ||
17b658af JJ |
3112 | /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME, |
3113 | is linear within simd lane (but not within whole loop), note it in | |
3114 | *ARGINFO. */ | |
3115 | ||
3116 | static void | |
3117 | vect_simd_lane_linear (tree op, struct loop *loop, | |
3118 | struct simd_call_arg_info *arginfo) | |
3119 | { | |
355fe088 | 3120 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
17b658af JJ |
3121 | |
3122 | if (!is_gimple_assign (def_stmt) | |
3123 | || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR | |
3124 | || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt))) | |
3125 | return; | |
3126 | ||
3127 | tree base = gimple_assign_rhs1 (def_stmt); | |
3128 | HOST_WIDE_INT linear_step = 0; | |
3129 | tree v = gimple_assign_rhs2 (def_stmt); | |
3130 | while (TREE_CODE (v) == SSA_NAME) | |
3131 | { | |
3132 | tree t; | |
3133 | def_stmt = SSA_NAME_DEF_STMT (v); | |
3134 | if (is_gimple_assign (def_stmt)) | |
3135 | switch (gimple_assign_rhs_code (def_stmt)) | |
3136 | { | |
3137 | case PLUS_EXPR: | |
3138 | t = gimple_assign_rhs2 (def_stmt); | |
3139 | if (linear_step || TREE_CODE (t) != INTEGER_CST) | |
3140 | return; | |
3141 | base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t); | |
3142 | v = gimple_assign_rhs1 (def_stmt); | |
3143 | continue; | |
3144 | case MULT_EXPR: | |
3145 | t = gimple_assign_rhs2 (def_stmt); | |
3146 | if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t)) | |
3147 | return; | |
3148 | linear_step = tree_to_shwi (t); | |
3149 | v = gimple_assign_rhs1 (def_stmt); | |
3150 | continue; | |
3151 | CASE_CONVERT: | |
3152 | t = gimple_assign_rhs1 (def_stmt); | |
3153 | if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE | |
3154 | || (TYPE_PRECISION (TREE_TYPE (v)) | |
3155 | < TYPE_PRECISION (TREE_TYPE (t)))) | |
3156 | return; | |
3157 | if (!linear_step) | |
3158 | linear_step = 1; | |
3159 | v = t; | |
3160 | continue; | |
3161 | default: | |
3162 | return; | |
3163 | } | |
8e4284d0 | 3164 | else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE) |
17b658af JJ |
3165 | && loop->simduid |
3166 | && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME | |
3167 | && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0)) | |
3168 | == loop->simduid)) | |
3169 | { | |
3170 | if (!linear_step) | |
3171 | linear_step = 1; | |
3172 | arginfo->linear_step = linear_step; | |
3173 | arginfo->op = base; | |
3174 | arginfo->simd_lane_linear = true; | |
3175 | return; | |
3176 | } | |
3177 | } | |
3178 | } | |
3179 | ||
0136f8f0 AH |
3180 | /* Function vectorizable_simd_clone_call. |
3181 | ||
3182 | Check if STMT performs a function call that can be vectorized | |
3183 | by calling a simd clone of the function. | |
3184 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
3185 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. | |
3186 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
3187 | ||
3188 | static bool | |
355fe088 TS |
3189 | vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, |
3190 | gimple **vec_stmt, slp_tree slp_node) | |
0136f8f0 AH |
3191 | { |
3192 | tree vec_dest; | |
3193 | tree scalar_dest; | |
3194 | tree op, type; | |
3195 | tree vec_oprnd0 = NULL_TREE; | |
3196 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info; | |
3197 | tree vectype; | |
3198 | unsigned int nunits; | |
3199 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
3200 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
310213d4 | 3201 | vec_info *vinfo = stmt_info->vinfo; |
0136f8f0 | 3202 | struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; |
81c40241 | 3203 | tree fndecl, new_temp; |
355fe088 TS |
3204 | gimple *def_stmt; |
3205 | gimple *new_stmt = NULL; | |
0136f8f0 | 3206 | int ncopies, j; |
00426f9a | 3207 | auto_vec<simd_call_arg_info> arginfo; |
0136f8f0 AH |
3208 | vec<tree> vargs = vNULL; |
3209 | size_t i, nargs; | |
3210 | tree lhs, rtype, ratype; | |
3211 | vec<constructor_elt, va_gc> *ret_ctor_elts; | |
3212 | ||
3213 | /* Is STMT a vectorizable call? */ | |
3214 | if (!is_gimple_call (stmt)) | |
3215 | return false; | |
3216 | ||
3217 | fndecl = gimple_call_fndecl (stmt); | |
3218 | if (fndecl == NULL_TREE) | |
3219 | return false; | |
3220 | ||
d52f5295 | 3221 | struct cgraph_node *node = cgraph_node::get (fndecl); |
0136f8f0 AH |
3222 | if (node == NULL || node->simd_clones == NULL) |
3223 | return false; | |
3224 | ||
3225 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
3226 | return false; | |
3227 | ||
66c16fd9 RB |
3228 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
3229 | && ! vec_stmt) | |
0136f8f0 AH |
3230 | return false; |
3231 | ||
3232 | if (gimple_call_lhs (stmt) | |
3233 | && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) | |
3234 | return false; | |
3235 | ||
3236 | gcc_checking_assert (!stmt_can_throw_internal (stmt)); | |
3237 | ||
3238 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
3239 | ||
3240 | if (loop_vinfo && nested_in_vect_loop_p (loop, stmt)) | |
3241 | return false; | |
3242 | ||
3243 | /* FORNOW */ | |
fce57248 | 3244 | if (slp_node) |
0136f8f0 AH |
3245 | return false; |
3246 | ||
3247 | /* Process function arguments. */ | |
3248 | nargs = gimple_call_num_args (stmt); | |
3249 | ||
3250 | /* Bail out if the function has zero arguments. */ | |
3251 | if (nargs == 0) | |
3252 | return false; | |
3253 | ||
00426f9a | 3254 | arginfo.reserve (nargs, true); |
0136f8f0 AH |
3255 | |
3256 | for (i = 0; i < nargs; i++) | |
3257 | { | |
3258 | simd_call_arg_info thisarginfo; | |
3259 | affine_iv iv; | |
3260 | ||
3261 | thisarginfo.linear_step = 0; | |
3262 | thisarginfo.align = 0; | |
3263 | thisarginfo.op = NULL_TREE; | |
17b658af | 3264 | thisarginfo.simd_lane_linear = false; |
0136f8f0 AH |
3265 | |
3266 | op = gimple_call_arg (stmt, i); | |
81c40241 RB |
3267 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt, |
3268 | &thisarginfo.vectype) | |
0136f8f0 AH |
3269 | || thisarginfo.dt == vect_uninitialized_def) |
3270 | { | |
3271 | if (dump_enabled_p ()) | |
3272 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3273 | "use not simple.\n"); | |
0136f8f0 AH |
3274 | return false; |
3275 | } | |
3276 | ||
3277 | if (thisarginfo.dt == vect_constant_def | |
3278 | || thisarginfo.dt == vect_external_def) | |
3279 | gcc_assert (thisarginfo.vectype == NULL_TREE); | |
3280 | else | |
3281 | gcc_assert (thisarginfo.vectype != NULL_TREE); | |
3282 | ||
6c9e85fb JJ |
3283 | /* For linear arguments, the analyze phase should have saved |
3284 | the base and step in STMT_VINFO_SIMD_CLONE_INFO. */ | |
17b658af JJ |
3285 | if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length () |
3286 | && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]) | |
6c9e85fb JJ |
3287 | { |
3288 | gcc_assert (vec_stmt); | |
3289 | thisarginfo.linear_step | |
17b658af | 3290 | = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]); |
6c9e85fb | 3291 | thisarginfo.op |
17b658af JJ |
3292 | = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1]; |
3293 | thisarginfo.simd_lane_linear | |
3294 | = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3] | |
3295 | == boolean_true_node); | |
6c9e85fb JJ |
3296 | /* If loop has been peeled for alignment, we need to adjust it. */ |
3297 | tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo); | |
3298 | tree n2 = LOOP_VINFO_NITERS (loop_vinfo); | |
17b658af | 3299 | if (n1 != n2 && !thisarginfo.simd_lane_linear) |
6c9e85fb JJ |
3300 | { |
3301 | tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2); | |
17b658af | 3302 | tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]; |
6c9e85fb JJ |
3303 | tree opt = TREE_TYPE (thisarginfo.op); |
3304 | bias = fold_convert (TREE_TYPE (step), bias); | |
3305 | bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step); | |
3306 | thisarginfo.op | |
3307 | = fold_build2 (POINTER_TYPE_P (opt) | |
3308 | ? POINTER_PLUS_EXPR : PLUS_EXPR, opt, | |
3309 | thisarginfo.op, bias); | |
3310 | } | |
3311 | } | |
3312 | else if (!vec_stmt | |
3313 | && thisarginfo.dt != vect_constant_def | |
3314 | && thisarginfo.dt != vect_external_def | |
3315 | && loop_vinfo | |
3316 | && TREE_CODE (op) == SSA_NAME | |
3317 | && simple_iv (loop, loop_containing_stmt (stmt), op, | |
3318 | &iv, false) | |
3319 | && tree_fits_shwi_p (iv.step)) | |
0136f8f0 AH |
3320 | { |
3321 | thisarginfo.linear_step = tree_to_shwi (iv.step); | |
3322 | thisarginfo.op = iv.base; | |
3323 | } | |
3324 | else if ((thisarginfo.dt == vect_constant_def | |
3325 | || thisarginfo.dt == vect_external_def) | |
3326 | && POINTER_TYPE_P (TREE_TYPE (op))) | |
3327 | thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT; | |
17b658af JJ |
3328 | /* Addresses of array elements indexed by GOMP_SIMD_LANE are |
3329 | linear too. */ | |
3330 | if (POINTER_TYPE_P (TREE_TYPE (op)) | |
3331 | && !thisarginfo.linear_step | |
3332 | && !vec_stmt | |
3333 | && thisarginfo.dt != vect_constant_def | |
3334 | && thisarginfo.dt != vect_external_def | |
3335 | && loop_vinfo | |
3336 | && !slp_node | |
3337 | && TREE_CODE (op) == SSA_NAME) | |
3338 | vect_simd_lane_linear (op, loop, &thisarginfo); | |
0136f8f0 AH |
3339 | |
3340 | arginfo.quick_push (thisarginfo); | |
3341 | } | |
3342 | ||
3343 | unsigned int badness = 0; | |
3344 | struct cgraph_node *bestn = NULL; | |
6c9e85fb JJ |
3345 | if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ()) |
3346 | bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]); | |
0136f8f0 AH |
3347 | else |
3348 | for (struct cgraph_node *n = node->simd_clones; n != NULL; | |
3349 | n = n->simdclone->next_clone) | |
3350 | { | |
3351 | unsigned int this_badness = 0; | |
3352 | if (n->simdclone->simdlen | |
3353 | > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
3354 | || n->simdclone->nargs != nargs) | |
3355 | continue; | |
3356 | if (n->simdclone->simdlen | |
3357 | < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)) | |
3358 | this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)) | |
3359 | - exact_log2 (n->simdclone->simdlen)) * 1024; | |
3360 | if (n->simdclone->inbranch) | |
3361 | this_badness += 2048; | |
3362 | int target_badness = targetm.simd_clone.usable (n); | |
3363 | if (target_badness < 0) | |
3364 | continue; | |
3365 | this_badness += target_badness * 512; | |
3366 | /* FORNOW: Have to add code to add the mask argument. */ | |
3367 | if (n->simdclone->inbranch) | |
3368 | continue; | |
3369 | for (i = 0; i < nargs; i++) | |
3370 | { | |
3371 | switch (n->simdclone->args[i].arg_type) | |
3372 | { | |
3373 | case SIMD_CLONE_ARG_TYPE_VECTOR: | |
3374 | if (!useless_type_conversion_p | |
3375 | (n->simdclone->args[i].orig_type, | |
3376 | TREE_TYPE (gimple_call_arg (stmt, i)))) | |
3377 | i = -1; | |
3378 | else if (arginfo[i].dt == vect_constant_def | |
3379 | || arginfo[i].dt == vect_external_def | |
3380 | || arginfo[i].linear_step) | |
3381 | this_badness += 64; | |
3382 | break; | |
3383 | case SIMD_CLONE_ARG_TYPE_UNIFORM: | |
3384 | if (arginfo[i].dt != vect_constant_def | |
3385 | && arginfo[i].dt != vect_external_def) | |
3386 | i = -1; | |
3387 | break; | |
3388 | case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: | |
d9a6bd32 | 3389 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: |
0136f8f0 AH |
3390 | if (arginfo[i].dt == vect_constant_def |
3391 | || arginfo[i].dt == vect_external_def | |
3392 | || (arginfo[i].linear_step | |
3393 | != n->simdclone->args[i].linear_step)) | |
3394 | i = -1; | |
3395 | break; | |
3396 | case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: | |
d9a6bd32 JJ |
3397 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: |
3398 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: | |
e01d41e5 JJ |
3399 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: |
3400 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: | |
3401 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: | |
0136f8f0 AH |
3402 | /* FORNOW */ |
3403 | i = -1; | |
3404 | break; | |
3405 | case SIMD_CLONE_ARG_TYPE_MASK: | |
3406 | gcc_unreachable (); | |
3407 | } | |
3408 | if (i == (size_t) -1) | |
3409 | break; | |
3410 | if (n->simdclone->args[i].alignment > arginfo[i].align) | |
3411 | { | |
3412 | i = -1; | |
3413 | break; | |
3414 | } | |
3415 | if (arginfo[i].align) | |
3416 | this_badness += (exact_log2 (arginfo[i].align) | |
3417 | - exact_log2 (n->simdclone->args[i].alignment)); | |
3418 | } | |
3419 | if (i == (size_t) -1) | |
3420 | continue; | |
3421 | if (bestn == NULL || this_badness < badness) | |
3422 | { | |
3423 | bestn = n; | |
3424 | badness = this_badness; | |
3425 | } | |
3426 | } | |
3427 | ||
3428 | if (bestn == NULL) | |
00426f9a | 3429 | return false; |
0136f8f0 AH |
3430 | |
3431 | for (i = 0; i < nargs; i++) | |
3432 | if ((arginfo[i].dt == vect_constant_def | |
3433 | || arginfo[i].dt == vect_external_def) | |
3434 | && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) | |
3435 | { | |
3436 | arginfo[i].vectype | |
3437 | = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt, | |
3438 | i))); | |
3439 | if (arginfo[i].vectype == NULL | |
3440 | || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype) | |
3441 | > bestn->simdclone->simdlen)) | |
00426f9a | 3442 | return false; |
0136f8f0 AH |
3443 | } |
3444 | ||
3445 | fndecl = bestn->decl; | |
3446 | nunits = bestn->simdclone->simdlen; | |
3447 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
3448 | ||
3449 | /* If the function isn't const, only allow it in simd loops where user | |
3450 | has asserted that at least nunits consecutive iterations can be | |
3451 | performed using SIMD instructions. */ | |
3452 | if ((loop == NULL || (unsigned) loop->safelen < nunits) | |
3453 | && gimple_vuse (stmt)) | |
00426f9a | 3454 | return false; |
0136f8f0 AH |
3455 | |
3456 | /* Sanity check: make sure that at least one copy of the vectorized stmt | |
3457 | needs to be generated. */ | |
3458 | gcc_assert (ncopies >= 1); | |
3459 | ||
3460 | if (!vec_stmt) /* transformation not required. */ | |
3461 | { | |
6c9e85fb JJ |
3462 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl); |
3463 | for (i = 0; i < nargs; i++) | |
7adb26f2 JJ |
3464 | if ((bestn->simdclone->args[i].arg_type |
3465 | == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) | |
3466 | || (bestn->simdclone->args[i].arg_type | |
3467 | == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)) | |
6c9e85fb | 3468 | { |
17b658af | 3469 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3 |
6c9e85fb JJ |
3470 | + 1); |
3471 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op); | |
3472 | tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op)) | |
3473 | ? size_type_node : TREE_TYPE (arginfo[i].op); | |
3474 | tree ls = build_int_cst (lst, arginfo[i].linear_step); | |
3475 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls); | |
17b658af JJ |
3476 | tree sll = arginfo[i].simd_lane_linear |
3477 | ? boolean_true_node : boolean_false_node; | |
3478 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll); | |
6c9e85fb | 3479 | } |
0136f8f0 AH |
3480 | STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type; |
3481 | if (dump_enabled_p ()) | |
3482 | dump_printf_loc (MSG_NOTE, vect_location, | |
3483 | "=== vectorizable_simd_clone_call ===\n"); | |
3484 | /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */ | |
0136f8f0 AH |
3485 | return true; |
3486 | } | |
3487 | ||
67b8dbac | 3488 | /* Transform. */ |
0136f8f0 AH |
3489 | |
3490 | if (dump_enabled_p ()) | |
3491 | dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n"); | |
3492 | ||
3493 | /* Handle def. */ | |
3494 | scalar_dest = gimple_call_lhs (stmt); | |
3495 | vec_dest = NULL_TREE; | |
3496 | rtype = NULL_TREE; | |
3497 | ratype = NULL_TREE; | |
3498 | if (scalar_dest) | |
3499 | { | |
3500 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
3501 | rtype = TREE_TYPE (TREE_TYPE (fndecl)); | |
3502 | if (TREE_CODE (rtype) == ARRAY_TYPE) | |
3503 | { | |
3504 | ratype = rtype; | |
3505 | rtype = TREE_TYPE (ratype); | |
3506 | } | |
3507 | } | |
3508 | ||
3509 | prev_stmt_info = NULL; | |
3510 | for (j = 0; j < ncopies; ++j) | |
3511 | { | |
3512 | /* Build argument list for the vectorized call. */ | |
3513 | if (j == 0) | |
3514 | vargs.create (nargs); | |
3515 | else | |
3516 | vargs.truncate (0); | |
3517 | ||
3518 | for (i = 0; i < nargs; i++) | |
3519 | { | |
3520 | unsigned int k, l, m, o; | |
3521 | tree atype; | |
3522 | op = gimple_call_arg (stmt, i); | |
3523 | switch (bestn->simdclone->args[i].arg_type) | |
3524 | { | |
3525 | case SIMD_CLONE_ARG_TYPE_VECTOR: | |
3526 | atype = bestn->simdclone->args[i].vector_type; | |
3527 | o = nunits / TYPE_VECTOR_SUBPARTS (atype); | |
3528 | for (m = j * o; m < (j + 1) * o; m++) | |
3529 | { | |
3530 | if (TYPE_VECTOR_SUBPARTS (atype) | |
3531 | < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)) | |
3532 | { | |
3533 | unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype)); | |
3534 | k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype) | |
3535 | / TYPE_VECTOR_SUBPARTS (atype)); | |
3536 | gcc_assert ((k & (k - 1)) == 0); | |
3537 | if (m == 0) | |
3538 | vec_oprnd0 | |
81c40241 | 3539 | = vect_get_vec_def_for_operand (op, stmt); |
0136f8f0 AH |
3540 | else |
3541 | { | |
3542 | vec_oprnd0 = arginfo[i].op; | |
3543 | if ((m & (k - 1)) == 0) | |
3544 | vec_oprnd0 | |
3545 | = vect_get_vec_def_for_stmt_copy (arginfo[i].dt, | |
3546 | vec_oprnd0); | |
3547 | } | |
3548 | arginfo[i].op = vec_oprnd0; | |
3549 | vec_oprnd0 | |
3550 | = build3 (BIT_FIELD_REF, atype, vec_oprnd0, | |
92e29a5e | 3551 | bitsize_int (prec), |
0136f8f0 AH |
3552 | bitsize_int ((m & (k - 1)) * prec)); |
3553 | new_stmt | |
b731b390 | 3554 | = gimple_build_assign (make_ssa_name (atype), |
0136f8f0 AH |
3555 | vec_oprnd0); |
3556 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3557 | vargs.safe_push (gimple_assign_lhs (new_stmt)); | |
3558 | } | |
3559 | else | |
3560 | { | |
3561 | k = (TYPE_VECTOR_SUBPARTS (atype) | |
3562 | / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)); | |
3563 | gcc_assert ((k & (k - 1)) == 0); | |
3564 | vec<constructor_elt, va_gc> *ctor_elts; | |
3565 | if (k != 1) | |
3566 | vec_alloc (ctor_elts, k); | |
3567 | else | |
3568 | ctor_elts = NULL; | |
3569 | for (l = 0; l < k; l++) | |
3570 | { | |
3571 | if (m == 0 && l == 0) | |
3572 | vec_oprnd0 | |
81c40241 | 3573 | = vect_get_vec_def_for_operand (op, stmt); |
0136f8f0 AH |
3574 | else |
3575 | vec_oprnd0 | |
3576 | = vect_get_vec_def_for_stmt_copy (arginfo[i].dt, | |
3577 | arginfo[i].op); | |
3578 | arginfo[i].op = vec_oprnd0; | |
3579 | if (k == 1) | |
3580 | break; | |
3581 | CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE, | |
3582 | vec_oprnd0); | |
3583 | } | |
3584 | if (k == 1) | |
3585 | vargs.safe_push (vec_oprnd0); | |
3586 | else | |
3587 | { | |
3588 | vec_oprnd0 = build_constructor (atype, ctor_elts); | |
3589 | new_stmt | |
b731b390 | 3590 | = gimple_build_assign (make_ssa_name (atype), |
0136f8f0 AH |
3591 | vec_oprnd0); |
3592 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3593 | vargs.safe_push (gimple_assign_lhs (new_stmt)); | |
3594 | } | |
3595 | } | |
3596 | } | |
3597 | break; | |
3598 | case SIMD_CLONE_ARG_TYPE_UNIFORM: | |
3599 | vargs.safe_push (op); | |
3600 | break; | |
3601 | case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: | |
7adb26f2 | 3602 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: |
0136f8f0 AH |
3603 | if (j == 0) |
3604 | { | |
3605 | gimple_seq stmts; | |
3606 | arginfo[i].op | |
3607 | = force_gimple_operand (arginfo[i].op, &stmts, true, | |
3608 | NULL_TREE); | |
3609 | if (stmts != NULL) | |
3610 | { | |
3611 | basic_block new_bb; | |
3612 | edge pe = loop_preheader_edge (loop); | |
3613 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
3614 | gcc_assert (!new_bb); | |
3615 | } | |
17b658af JJ |
3616 | if (arginfo[i].simd_lane_linear) |
3617 | { | |
3618 | vargs.safe_push (arginfo[i].op); | |
3619 | break; | |
3620 | } | |
b731b390 | 3621 | tree phi_res = copy_ssa_name (op); |
538dd0b7 | 3622 | gphi *new_phi = create_phi_node (phi_res, loop->header); |
0136f8f0 | 3623 | set_vinfo_for_stmt (new_phi, |
310213d4 | 3624 | new_stmt_vec_info (new_phi, loop_vinfo)); |
0136f8f0 AH |
3625 | add_phi_arg (new_phi, arginfo[i].op, |
3626 | loop_preheader_edge (loop), UNKNOWN_LOCATION); | |
3627 | enum tree_code code | |
3628 | = POINTER_TYPE_P (TREE_TYPE (op)) | |
3629 | ? POINTER_PLUS_EXPR : PLUS_EXPR; | |
3630 | tree type = POINTER_TYPE_P (TREE_TYPE (op)) | |
3631 | ? sizetype : TREE_TYPE (op); | |
807e902e KZ |
3632 | widest_int cst |
3633 | = wi::mul (bestn->simdclone->args[i].linear_step, | |
3634 | ncopies * nunits); | |
3635 | tree tcst = wide_int_to_tree (type, cst); | |
b731b390 | 3636 | tree phi_arg = copy_ssa_name (op); |
0d0e4a03 JJ |
3637 | new_stmt |
3638 | = gimple_build_assign (phi_arg, code, phi_res, tcst); | |
0136f8f0 AH |
3639 | gimple_stmt_iterator si = gsi_after_labels (loop->header); |
3640 | gsi_insert_after (&si, new_stmt, GSI_NEW_STMT); | |
3641 | set_vinfo_for_stmt (new_stmt, | |
310213d4 | 3642 | new_stmt_vec_info (new_stmt, loop_vinfo)); |
0136f8f0 AH |
3643 | add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop), |
3644 | UNKNOWN_LOCATION); | |
3645 | arginfo[i].op = phi_res; | |
3646 | vargs.safe_push (phi_res); | |
3647 | } | |
3648 | else | |
3649 | { | |
3650 | enum tree_code code | |
3651 | = POINTER_TYPE_P (TREE_TYPE (op)) | |
3652 | ? POINTER_PLUS_EXPR : PLUS_EXPR; | |
3653 | tree type = POINTER_TYPE_P (TREE_TYPE (op)) | |
3654 | ? sizetype : TREE_TYPE (op); | |
807e902e KZ |
3655 | widest_int cst |
3656 | = wi::mul (bestn->simdclone->args[i].linear_step, | |
3657 | j * nunits); | |
3658 | tree tcst = wide_int_to_tree (type, cst); | |
b731b390 | 3659 | new_temp = make_ssa_name (TREE_TYPE (op)); |
0d0e4a03 JJ |
3660 | new_stmt = gimple_build_assign (new_temp, code, |
3661 | arginfo[i].op, tcst); | |
0136f8f0 AH |
3662 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3663 | vargs.safe_push (new_temp); | |
3664 | } | |
3665 | break; | |
7adb26f2 JJ |
3666 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: |
3667 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: | |
0136f8f0 | 3668 | case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: |
e01d41e5 JJ |
3669 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: |
3670 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: | |
3671 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: | |
0136f8f0 AH |
3672 | default: |
3673 | gcc_unreachable (); | |
3674 | } | |
3675 | } | |
3676 | ||
3677 | new_stmt = gimple_build_call_vec (fndecl, vargs); | |
3678 | if (vec_dest) | |
3679 | { | |
3680 | gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits); | |
3681 | if (ratype) | |
b731b390 | 3682 | new_temp = create_tmp_var (ratype); |
0136f8f0 AH |
3683 | else if (TYPE_VECTOR_SUBPARTS (vectype) |
3684 | == TYPE_VECTOR_SUBPARTS (rtype)) | |
3685 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
3686 | else | |
3687 | new_temp = make_ssa_name (rtype, new_stmt); | |
3688 | gimple_call_set_lhs (new_stmt, new_temp); | |
3689 | } | |
3690 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3691 | ||
3692 | if (vec_dest) | |
3693 | { | |
3694 | if (TYPE_VECTOR_SUBPARTS (vectype) < nunits) | |
3695 | { | |
3696 | unsigned int k, l; | |
3697 | unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype)); | |
3698 | k = nunits / TYPE_VECTOR_SUBPARTS (vectype); | |
3699 | gcc_assert ((k & (k - 1)) == 0); | |
3700 | for (l = 0; l < k; l++) | |
3701 | { | |
3702 | tree t; | |
3703 | if (ratype) | |
3704 | { | |
3705 | t = build_fold_addr_expr (new_temp); | |
3706 | t = build2 (MEM_REF, vectype, t, | |
3707 | build_int_cst (TREE_TYPE (t), | |
3708 | l * prec / BITS_PER_UNIT)); | |
3709 | } | |
3710 | else | |
3711 | t = build3 (BIT_FIELD_REF, vectype, new_temp, | |
92e29a5e | 3712 | bitsize_int (prec), bitsize_int (l * prec)); |
0136f8f0 | 3713 | new_stmt |
b731b390 | 3714 | = gimple_build_assign (make_ssa_name (vectype), t); |
0136f8f0 AH |
3715 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3716 | if (j == 0 && l == 0) | |
3717 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
3718 | else | |
3719 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
3720 | ||
3721 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
3722 | } | |
3723 | ||
3724 | if (ratype) | |
3725 | { | |
3726 | tree clobber = build_constructor (ratype, NULL); | |
3727 | TREE_THIS_VOLATILE (clobber) = 1; | |
3728 | new_stmt = gimple_build_assign (new_temp, clobber); | |
3729 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3730 | } | |
3731 | continue; | |
3732 | } | |
3733 | else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits) | |
3734 | { | |
3735 | unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype) | |
3736 | / TYPE_VECTOR_SUBPARTS (rtype)); | |
3737 | gcc_assert ((k & (k - 1)) == 0); | |
3738 | if ((j & (k - 1)) == 0) | |
3739 | vec_alloc (ret_ctor_elts, k); | |
3740 | if (ratype) | |
3741 | { | |
3742 | unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype); | |
3743 | for (m = 0; m < o; m++) | |
3744 | { | |
3745 | tree tem = build4 (ARRAY_REF, rtype, new_temp, | |
3746 | size_int (m), NULL_TREE, NULL_TREE); | |
3747 | new_stmt | |
b731b390 | 3748 | = gimple_build_assign (make_ssa_name (rtype), tem); |
0136f8f0 AH |
3749 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3750 | CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, | |
3751 | gimple_assign_lhs (new_stmt)); | |
3752 | } | |
3753 | tree clobber = build_constructor (ratype, NULL); | |
3754 | TREE_THIS_VOLATILE (clobber) = 1; | |
3755 | new_stmt = gimple_build_assign (new_temp, clobber); | |
3756 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3757 | } | |
3758 | else | |
3759 | CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp); | |
3760 | if ((j & (k - 1)) != k - 1) | |
3761 | continue; | |
3762 | vec_oprnd0 = build_constructor (vectype, ret_ctor_elts); | |
3763 | new_stmt | |
b731b390 | 3764 | = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0); |
0136f8f0 AH |
3765 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3766 | ||
3767 | if ((unsigned) j == k - 1) | |
3768 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
3769 | else | |
3770 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
3771 | ||
3772 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
3773 | continue; | |
3774 | } | |
3775 | else if (ratype) | |
3776 | { | |
3777 | tree t = build_fold_addr_expr (new_temp); | |
3778 | t = build2 (MEM_REF, vectype, t, | |
3779 | build_int_cst (TREE_TYPE (t), 0)); | |
3780 | new_stmt | |
b731b390 | 3781 | = gimple_build_assign (make_ssa_name (vec_dest), t); |
0136f8f0 AH |
3782 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3783 | tree clobber = build_constructor (ratype, NULL); | |
3784 | TREE_THIS_VOLATILE (clobber) = 1; | |
3785 | vect_finish_stmt_generation (stmt, | |
3786 | gimple_build_assign (new_temp, | |
3787 | clobber), gsi); | |
3788 | } | |
3789 | } | |
3790 | ||
3791 | if (j == 0) | |
3792 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
3793 | else | |
3794 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
3795 | ||
3796 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
3797 | } | |
3798 | ||
3799 | vargs.release (); | |
3800 | ||
3801 | /* The call in STMT might prevent it from being removed in dce. | |
3802 | We however cannot remove it here, due to the way the ssa name | |
3803 | it defines is mapped to the new definition. So just replace | |
3804 | rhs of the statement with something harmless. */ | |
3805 | ||
3806 | if (slp_node) | |
3807 | return true; | |
3808 | ||
3809 | if (scalar_dest) | |
3810 | { | |
3811 | type = TREE_TYPE (scalar_dest); | |
3812 | if (is_pattern_stmt_p (stmt_info)) | |
3813 | lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info)); | |
3814 | else | |
3815 | lhs = gimple_call_lhs (stmt); | |
3816 | new_stmt = gimple_build_assign (lhs, build_zero_cst (type)); | |
3817 | } | |
3818 | else | |
3819 | new_stmt = gimple_build_nop (); | |
3820 | set_vinfo_for_stmt (new_stmt, stmt_info); | |
3821 | set_vinfo_for_stmt (stmt, NULL); | |
3822 | STMT_VINFO_STMT (stmt_info) = new_stmt; | |
2865f32a | 3823 | gsi_replace (gsi, new_stmt, true); |
0136f8f0 AH |
3824 | unlink_stmt_vdef (stmt); |
3825 | ||
3826 | return true; | |
3827 | } | |
3828 | ||
3829 | ||
ebfd146a IR |
3830 | /* Function vect_gen_widened_results_half |
3831 | ||
3832 | Create a vector stmt whose code, type, number of arguments, and result | |
b8698a0f | 3833 | variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are |
ff802fa1 | 3834 | VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI. |
ebfd146a IR |
3835 | In the case that CODE is a CALL_EXPR, this means that a call to DECL |
3836 | needs to be created (DECL is a function-decl of a target-builtin). | |
3837 | STMT is the original scalar stmt that we are vectorizing. */ | |
3838 | ||
355fe088 | 3839 | static gimple * |
ebfd146a IR |
3840 | vect_gen_widened_results_half (enum tree_code code, |
3841 | tree decl, | |
3842 | tree vec_oprnd0, tree vec_oprnd1, int op_type, | |
3843 | tree vec_dest, gimple_stmt_iterator *gsi, | |
355fe088 | 3844 | gimple *stmt) |
b8698a0f | 3845 | { |
355fe088 | 3846 | gimple *new_stmt; |
b8698a0f L |
3847 | tree new_temp; |
3848 | ||
3849 | /* Generate half of the widened result: */ | |
3850 | if (code == CALL_EXPR) | |
3851 | { | |
3852 | /* Target specific support */ | |
ebfd146a IR |
3853 | if (op_type == binary_op) |
3854 | new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1); | |
3855 | else | |
3856 | new_stmt = gimple_build_call (decl, 1, vec_oprnd0); | |
3857 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
3858 | gimple_call_set_lhs (new_stmt, new_temp); | |
b8698a0f L |
3859 | } |
3860 | else | |
ebfd146a | 3861 | { |
b8698a0f L |
3862 | /* Generic support */ |
3863 | gcc_assert (op_type == TREE_CODE_LENGTH (code)); | |
ebfd146a IR |
3864 | if (op_type != binary_op) |
3865 | vec_oprnd1 = NULL; | |
0d0e4a03 | 3866 | new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1); |
ebfd146a IR |
3867 | new_temp = make_ssa_name (vec_dest, new_stmt); |
3868 | gimple_assign_set_lhs (new_stmt, new_temp); | |
b8698a0f | 3869 | } |
ebfd146a IR |
3870 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
3871 | ||
ebfd146a IR |
3872 | return new_stmt; |
3873 | } | |
3874 | ||
4a00c761 JJ |
3875 | |
3876 | /* Get vectorized definitions for loop-based vectorization. For the first | |
3877 | operand we call vect_get_vec_def_for_operand() (with OPRND containing | |
3878 | scalar operand), and for the rest we get a copy with | |
3879 | vect_get_vec_def_for_stmt_copy() using the previous vector definition | |
3880 | (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details. | |
3881 | The vectors are collected into VEC_OPRNDS. */ | |
3882 | ||
3883 | static void | |
355fe088 | 3884 | vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt, |
9771b263 | 3885 | vec<tree> *vec_oprnds, int multi_step_cvt) |
4a00c761 JJ |
3886 | { |
3887 | tree vec_oprnd; | |
3888 | ||
3889 | /* Get first vector operand. */ | |
3890 | /* All the vector operands except the very first one (that is scalar oprnd) | |
3891 | are stmt copies. */ | |
3892 | if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE) | |
81c40241 | 3893 | vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt); |
4a00c761 JJ |
3894 | else |
3895 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd); | |
3896 | ||
9771b263 | 3897 | vec_oprnds->quick_push (vec_oprnd); |
4a00c761 JJ |
3898 | |
3899 | /* Get second vector operand. */ | |
3900 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd); | |
9771b263 | 3901 | vec_oprnds->quick_push (vec_oprnd); |
4a00c761 JJ |
3902 | |
3903 | *oprnd = vec_oprnd; | |
3904 | ||
3905 | /* For conversion in multiple steps, continue to get operands | |
3906 | recursively. */ | |
3907 | if (multi_step_cvt) | |
3908 | vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1); | |
3909 | } | |
3910 | ||
3911 | ||
3912 | /* Create vectorized demotion statements for vector operands from VEC_OPRNDS. | |
3913 | For multi-step conversions store the resulting vectors and call the function | |
3914 | recursively. */ | |
3915 | ||
3916 | static void | |
9771b263 | 3917 | vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds, |
355fe088 | 3918 | int multi_step_cvt, gimple *stmt, |
9771b263 | 3919 | vec<tree> vec_dsts, |
4a00c761 JJ |
3920 | gimple_stmt_iterator *gsi, |
3921 | slp_tree slp_node, enum tree_code code, | |
3922 | stmt_vec_info *prev_stmt_info) | |
3923 | { | |
3924 | unsigned int i; | |
3925 | tree vop0, vop1, new_tmp, vec_dest; | |
355fe088 | 3926 | gimple *new_stmt; |
4a00c761 JJ |
3927 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
3928 | ||
9771b263 | 3929 | vec_dest = vec_dsts.pop (); |
4a00c761 | 3930 | |
9771b263 | 3931 | for (i = 0; i < vec_oprnds->length (); i += 2) |
4a00c761 JJ |
3932 | { |
3933 | /* Create demotion operation. */ | |
9771b263 DN |
3934 | vop0 = (*vec_oprnds)[i]; |
3935 | vop1 = (*vec_oprnds)[i + 1]; | |
0d0e4a03 | 3936 | new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); |
4a00c761 JJ |
3937 | new_tmp = make_ssa_name (vec_dest, new_stmt); |
3938 | gimple_assign_set_lhs (new_stmt, new_tmp); | |
3939 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
3940 | ||
3941 | if (multi_step_cvt) | |
3942 | /* Store the resulting vector for next recursive call. */ | |
9771b263 | 3943 | (*vec_oprnds)[i/2] = new_tmp; |
4a00c761 JJ |
3944 | else |
3945 | { | |
3946 | /* This is the last step of the conversion sequence. Store the | |
3947 | vectors in SLP_NODE or in vector info of the scalar statement | |
3948 | (or in STMT_VINFO_RELATED_STMT chain). */ | |
3949 | if (slp_node) | |
9771b263 | 3950 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
4a00c761 | 3951 | else |
c689ce1e RB |
3952 | { |
3953 | if (!*prev_stmt_info) | |
3954 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; | |
3955 | else | |
3956 | STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt; | |
4a00c761 | 3957 | |
c689ce1e RB |
3958 | *prev_stmt_info = vinfo_for_stmt (new_stmt); |
3959 | } | |
4a00c761 JJ |
3960 | } |
3961 | } | |
3962 | ||
3963 | /* For multi-step demotion operations we first generate demotion operations | |
3964 | from the source type to the intermediate types, and then combine the | |
3965 | results (stored in VEC_OPRNDS) in demotion operation to the destination | |
3966 | type. */ | |
3967 | if (multi_step_cvt) | |
3968 | { | |
3969 | /* At each level of recursion we have half of the operands we had at the | |
3970 | previous level. */ | |
9771b263 | 3971 | vec_oprnds->truncate ((i+1)/2); |
4a00c761 JJ |
3972 | vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1, |
3973 | stmt, vec_dsts, gsi, slp_node, | |
3974 | VEC_PACK_TRUNC_EXPR, | |
3975 | prev_stmt_info); | |
3976 | } | |
3977 | ||
9771b263 | 3978 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
3979 | } |
3980 | ||
3981 | ||
3982 | /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0 | |
3983 | and VEC_OPRNDS1 (for binary operations). For multi-step conversions store | |
3984 | the resulting vectors and call the function recursively. */ | |
3985 | ||
3986 | static void | |
9771b263 DN |
3987 | vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0, |
3988 | vec<tree> *vec_oprnds1, | |
355fe088 | 3989 | gimple *stmt, tree vec_dest, |
4a00c761 JJ |
3990 | gimple_stmt_iterator *gsi, |
3991 | enum tree_code code1, | |
3992 | enum tree_code code2, tree decl1, | |
3993 | tree decl2, int op_type) | |
3994 | { | |
3995 | int i; | |
3996 | tree vop0, vop1, new_tmp1, new_tmp2; | |
355fe088 | 3997 | gimple *new_stmt1, *new_stmt2; |
6e1aa848 | 3998 | vec<tree> vec_tmp = vNULL; |
4a00c761 | 3999 | |
9771b263 DN |
4000 | vec_tmp.create (vec_oprnds0->length () * 2); |
4001 | FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0) | |
4a00c761 JJ |
4002 | { |
4003 | if (op_type == binary_op) | |
9771b263 | 4004 | vop1 = (*vec_oprnds1)[i]; |
4a00c761 JJ |
4005 | else |
4006 | vop1 = NULL_TREE; | |
4007 | ||
4008 | /* Generate the two halves of promotion operation. */ | |
4009 | new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1, | |
4010 | op_type, vec_dest, gsi, stmt); | |
4011 | new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1, | |
4012 | op_type, vec_dest, gsi, stmt); | |
4013 | if (is_gimple_call (new_stmt1)) | |
4014 | { | |
4015 | new_tmp1 = gimple_call_lhs (new_stmt1); | |
4016 | new_tmp2 = gimple_call_lhs (new_stmt2); | |
4017 | } | |
4018 | else | |
4019 | { | |
4020 | new_tmp1 = gimple_assign_lhs (new_stmt1); | |
4021 | new_tmp2 = gimple_assign_lhs (new_stmt2); | |
4022 | } | |
4023 | ||
4024 | /* Store the results for the next step. */ | |
9771b263 DN |
4025 | vec_tmp.quick_push (new_tmp1); |
4026 | vec_tmp.quick_push (new_tmp2); | |
4a00c761 JJ |
4027 | } |
4028 | ||
689eaba3 | 4029 | vec_oprnds0->release (); |
4a00c761 JJ |
4030 | *vec_oprnds0 = vec_tmp; |
4031 | } | |
4032 | ||
4033 | ||
b8698a0f L |
4034 | /* Check if STMT performs a conversion operation, that can be vectorized. |
4035 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
4a00c761 | 4036 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. |
ebfd146a IR |
4037 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ |
4038 | ||
4039 | static bool | |
355fe088 TS |
4040 | vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, |
4041 | gimple **vec_stmt, slp_tree slp_node) | |
ebfd146a IR |
4042 | { |
4043 | tree vec_dest; | |
4044 | tree scalar_dest; | |
4a00c761 | 4045 | tree op0, op1 = NULL_TREE; |
ebfd146a IR |
4046 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; |
4047 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
4048 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4049 | enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; | |
4a00c761 | 4050 | enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK; |
ebfd146a IR |
4051 | tree decl1 = NULL_TREE, decl2 = NULL_TREE; |
4052 | tree new_temp; | |
355fe088 | 4053 | gimple *def_stmt; |
ebfd146a | 4054 | enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
4fc5ebf1 | 4055 | int ndts = 2; |
355fe088 | 4056 | gimple *new_stmt = NULL; |
ebfd146a IR |
4057 | stmt_vec_info prev_stmt_info; |
4058 | int nunits_in; | |
4059 | int nunits_out; | |
4060 | tree vectype_out, vectype_in; | |
4a00c761 JJ |
4061 | int ncopies, i, j; |
4062 | tree lhs_type, rhs_type; | |
ebfd146a | 4063 | enum { NARROW, NONE, WIDEN } modifier; |
6e1aa848 DN |
4064 | vec<tree> vec_oprnds0 = vNULL; |
4065 | vec<tree> vec_oprnds1 = vNULL; | |
ebfd146a | 4066 | tree vop0; |
4a00c761 | 4067 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 4068 | vec_info *vinfo = stmt_info->vinfo; |
4a00c761 | 4069 | int multi_step_cvt = 0; |
6e1aa848 | 4070 | vec<tree> interm_types = vNULL; |
4a00c761 JJ |
4071 | tree last_oprnd, intermediate_type, cvt_type = NULL_TREE; |
4072 | int op_type; | |
4a00c761 | 4073 | unsigned short fltsz; |
ebfd146a IR |
4074 | |
4075 | /* Is STMT a vectorizable conversion? */ | |
4076 | ||
4a00c761 | 4077 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
4078 | return false; |
4079 | ||
66c16fd9 RB |
4080 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
4081 | && ! vec_stmt) | |
ebfd146a IR |
4082 | return false; |
4083 | ||
4084 | if (!is_gimple_assign (stmt)) | |
4085 | return false; | |
4086 | ||
4087 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
4088 | return false; | |
4089 | ||
4090 | code = gimple_assign_rhs_code (stmt); | |
4a00c761 JJ |
4091 | if (!CONVERT_EXPR_CODE_P (code) |
4092 | && code != FIX_TRUNC_EXPR | |
4093 | && code != FLOAT_EXPR | |
4094 | && code != WIDEN_MULT_EXPR | |
4095 | && code != WIDEN_LSHIFT_EXPR) | |
ebfd146a IR |
4096 | return false; |
4097 | ||
4a00c761 JJ |
4098 | op_type = TREE_CODE_LENGTH (code); |
4099 | ||
ebfd146a | 4100 | /* Check types of lhs and rhs. */ |
b690cc0f | 4101 | scalar_dest = gimple_assign_lhs (stmt); |
4a00c761 | 4102 | lhs_type = TREE_TYPE (scalar_dest); |
b690cc0f RG |
4103 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
4104 | ||
ebfd146a IR |
4105 | op0 = gimple_assign_rhs1 (stmt); |
4106 | rhs_type = TREE_TYPE (op0); | |
4a00c761 JJ |
4107 | |
4108 | if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) | |
4109 | && !((INTEGRAL_TYPE_P (lhs_type) | |
4110 | && INTEGRAL_TYPE_P (rhs_type)) | |
4111 | || (SCALAR_FLOAT_TYPE_P (lhs_type) | |
4112 | && SCALAR_FLOAT_TYPE_P (rhs_type)))) | |
4113 | return false; | |
4114 | ||
e6f5c25d IE |
4115 | if (!VECTOR_BOOLEAN_TYPE_P (vectype_out) |
4116 | && ((INTEGRAL_TYPE_P (lhs_type) | |
2be65d9e | 4117 | && !type_has_mode_precision_p (lhs_type)) |
e6f5c25d | 4118 | || (INTEGRAL_TYPE_P (rhs_type) |
2be65d9e | 4119 | && !type_has_mode_precision_p (rhs_type)))) |
4a00c761 | 4120 | { |
73fbfcad | 4121 | if (dump_enabled_p ()) |
78c60e3d | 4122 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 TJ |
4123 | "type conversion to/from bit-precision unsupported." |
4124 | "\n"); | |
4a00c761 JJ |
4125 | return false; |
4126 | } | |
4127 | ||
b690cc0f | 4128 | /* Check the operands of the operation. */ |
81c40241 | 4129 | if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in)) |
b690cc0f | 4130 | { |
73fbfcad | 4131 | if (dump_enabled_p ()) |
78c60e3d | 4132 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4133 | "use not simple.\n"); |
b690cc0f RG |
4134 | return false; |
4135 | } | |
4a00c761 JJ |
4136 | if (op_type == binary_op) |
4137 | { | |
4138 | bool ok; | |
4139 | ||
4140 | op1 = gimple_assign_rhs2 (stmt); | |
4141 | gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR); | |
4142 | /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of | |
4143 | OP1. */ | |
4144 | if (CONSTANT_CLASS_P (op0)) | |
81c40241 | 4145 | ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in); |
4a00c761 | 4146 | else |
81c40241 | 4147 | ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]); |
4a00c761 JJ |
4148 | |
4149 | if (!ok) | |
4150 | { | |
73fbfcad | 4151 | if (dump_enabled_p ()) |
78c60e3d | 4152 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4153 | "use not simple.\n"); |
4a00c761 JJ |
4154 | return false; |
4155 | } | |
4156 | } | |
4157 | ||
b690cc0f RG |
4158 | /* If op0 is an external or constant defs use a vector type of |
4159 | the same size as the output vector type. */ | |
ebfd146a | 4160 | if (!vectype_in) |
b690cc0f | 4161 | vectype_in = get_same_sized_vectype (rhs_type, vectype_out); |
7d8930a0 IR |
4162 | if (vec_stmt) |
4163 | gcc_assert (vectype_in); | |
4164 | if (!vectype_in) | |
4165 | { | |
73fbfcad | 4166 | if (dump_enabled_p ()) |
4a00c761 | 4167 | { |
78c60e3d SS |
4168 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
4169 | "no vectype for scalar type "); | |
4170 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
e645e942 | 4171 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
4a00c761 | 4172 | } |
7d8930a0 IR |
4173 | |
4174 | return false; | |
4175 | } | |
ebfd146a | 4176 | |
e6f5c25d IE |
4177 | if (VECTOR_BOOLEAN_TYPE_P (vectype_out) |
4178 | && !VECTOR_BOOLEAN_TYPE_P (vectype_in)) | |
4179 | { | |
4180 | if (dump_enabled_p ()) | |
4181 | { | |
4182 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
4183 | "can't convert between boolean and non " | |
4184 | "boolean vectors"); | |
4185 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
4186 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
4187 | } | |
4188 | ||
4189 | return false; | |
4190 | } | |
4191 | ||
b690cc0f RG |
4192 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); |
4193 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
4a00c761 | 4194 | if (nunits_in < nunits_out) |
ebfd146a IR |
4195 | modifier = NARROW; |
4196 | else if (nunits_out == nunits_in) | |
4197 | modifier = NONE; | |
ebfd146a | 4198 | else |
4a00c761 | 4199 | modifier = WIDEN; |
ebfd146a | 4200 | |
ff802fa1 IR |
4201 | /* Multiple types in SLP are handled by creating the appropriate number of |
4202 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
4203 | case of SLP. */ | |
fce57248 | 4204 | if (slp_node) |
ebfd146a | 4205 | ncopies = 1; |
4a00c761 JJ |
4206 | else if (modifier == NARROW) |
4207 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; | |
4208 | else | |
4209 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; | |
b8698a0f | 4210 | |
ebfd146a IR |
4211 | /* Sanity check: make sure that at least one copy of the vectorized stmt |
4212 | needs to be generated. */ | |
4213 | gcc_assert (ncopies >= 1); | |
4214 | ||
b397965c RS |
4215 | machine_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type); |
4216 | machine_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type); | |
4217 | ||
ebfd146a | 4218 | /* Supportable by target? */ |
4a00c761 | 4219 | switch (modifier) |
ebfd146a | 4220 | { |
4a00c761 JJ |
4221 | case NONE: |
4222 | if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) | |
4223 | return false; | |
4224 | if (supportable_convert_operation (code, vectype_out, vectype_in, | |
4225 | &decl1, &code1)) | |
4226 | break; | |
4227 | /* FALLTHRU */ | |
4228 | unsupported: | |
73fbfcad | 4229 | if (dump_enabled_p ()) |
78c60e3d | 4230 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4231 | "conversion not supported by target.\n"); |
ebfd146a | 4232 | return false; |
ebfd146a | 4233 | |
4a00c761 JJ |
4234 | case WIDEN: |
4235 | if (supportable_widening_operation (code, stmt, vectype_out, vectype_in, | |
a86ec597 RH |
4236 | &code1, &code2, &multi_step_cvt, |
4237 | &interm_types)) | |
4a00c761 JJ |
4238 | { |
4239 | /* Binary widening operation can only be supported directly by the | |
4240 | architecture. */ | |
4241 | gcc_assert (!(multi_step_cvt && op_type == binary_op)); | |
4242 | break; | |
4243 | } | |
4244 | ||
4245 | if (code != FLOAT_EXPR | |
b397965c | 4246 | || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode)) |
4a00c761 JJ |
4247 | goto unsupported; |
4248 | ||
b397965c RS |
4249 | fltsz = GET_MODE_SIZE (lhs_mode); |
4250 | FOR_EACH_2XWIDER_MODE (rhs_mode, rhs_mode) | |
4a00c761 | 4251 | { |
c94843d2 RS |
4252 | if (GET_MODE_SIZE (rhs_mode) > fltsz) |
4253 | break; | |
4254 | ||
4a00c761 JJ |
4255 | cvt_type |
4256 | = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0); | |
4257 | cvt_type = get_same_sized_vectype (cvt_type, vectype_in); | |
4258 | if (cvt_type == NULL_TREE) | |
4259 | goto unsupported; | |
4260 | ||
4261 | if (GET_MODE_SIZE (rhs_mode) == fltsz) | |
4262 | { | |
4263 | if (!supportable_convert_operation (code, vectype_out, | |
4264 | cvt_type, &decl1, &codecvt1)) | |
4265 | goto unsupported; | |
4266 | } | |
4267 | else if (!supportable_widening_operation (code, stmt, vectype_out, | |
a86ec597 RH |
4268 | cvt_type, &codecvt1, |
4269 | &codecvt2, &multi_step_cvt, | |
4a00c761 JJ |
4270 | &interm_types)) |
4271 | continue; | |
4272 | else | |
4273 | gcc_assert (multi_step_cvt == 0); | |
4274 | ||
4275 | if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type, | |
a86ec597 RH |
4276 | vectype_in, &code1, &code2, |
4277 | &multi_step_cvt, &interm_types)) | |
4a00c761 JJ |
4278 | break; |
4279 | } | |
4280 | ||
4281 | if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz) | |
4282 | goto unsupported; | |
4283 | ||
4284 | if (GET_MODE_SIZE (rhs_mode) == fltsz) | |
4285 | codecvt2 = ERROR_MARK; | |
4286 | else | |
4287 | { | |
4288 | multi_step_cvt++; | |
9771b263 | 4289 | interm_types.safe_push (cvt_type); |
4a00c761 JJ |
4290 | cvt_type = NULL_TREE; |
4291 | } | |
4292 | break; | |
4293 | ||
4294 | case NARROW: | |
4295 | gcc_assert (op_type == unary_op); | |
4296 | if (supportable_narrowing_operation (code, vectype_out, vectype_in, | |
4297 | &code1, &multi_step_cvt, | |
4298 | &interm_types)) | |
4299 | break; | |
4300 | ||
4301 | if (code != FIX_TRUNC_EXPR | |
b397965c | 4302 | || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode)) |
4a00c761 JJ |
4303 | goto unsupported; |
4304 | ||
4a00c761 JJ |
4305 | cvt_type |
4306 | = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0); | |
4307 | cvt_type = get_same_sized_vectype (cvt_type, vectype_in); | |
4308 | if (cvt_type == NULL_TREE) | |
4309 | goto unsupported; | |
4310 | if (!supportable_convert_operation (code, cvt_type, vectype_in, | |
4311 | &decl1, &codecvt1)) | |
4312 | goto unsupported; | |
4313 | if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type, | |
4314 | &code1, &multi_step_cvt, | |
4315 | &interm_types)) | |
4316 | break; | |
4317 | goto unsupported; | |
4318 | ||
4319 | default: | |
4320 | gcc_unreachable (); | |
ebfd146a IR |
4321 | } |
4322 | ||
4323 | if (!vec_stmt) /* transformation not required. */ | |
4324 | { | |
73fbfcad | 4325 | if (dump_enabled_p ()) |
78c60e3d | 4326 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4327 | "=== vectorizable_conversion ===\n"); |
4a00c761 | 4328 | if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR) |
8bd37302 BS |
4329 | { |
4330 | STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type; | |
4fc5ebf1 | 4331 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
8bd37302 | 4332 | } |
4a00c761 JJ |
4333 | else if (modifier == NARROW) |
4334 | { | |
4335 | STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type; | |
8bd37302 | 4336 | vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt); |
4a00c761 JJ |
4337 | } |
4338 | else | |
4339 | { | |
4340 | STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type; | |
8bd37302 | 4341 | vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt); |
4a00c761 | 4342 | } |
9771b263 | 4343 | interm_types.release (); |
ebfd146a IR |
4344 | return true; |
4345 | } | |
4346 | ||
67b8dbac | 4347 | /* Transform. */ |
73fbfcad | 4348 | if (dump_enabled_p ()) |
78c60e3d | 4349 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4350 | "transform conversion. ncopies = %d.\n", ncopies); |
ebfd146a | 4351 | |
4a00c761 JJ |
4352 | if (op_type == binary_op) |
4353 | { | |
4354 | if (CONSTANT_CLASS_P (op0)) | |
4355 | op0 = fold_convert (TREE_TYPE (op1), op0); | |
4356 | else if (CONSTANT_CLASS_P (op1)) | |
4357 | op1 = fold_convert (TREE_TYPE (op0), op1); | |
4358 | } | |
4359 | ||
4360 | /* In case of multi-step conversion, we first generate conversion operations | |
4361 | to the intermediate types, and then from that types to the final one. | |
4362 | We create vector destinations for the intermediate type (TYPES) received | |
4363 | from supportable_*_operation, and store them in the correct order | |
4364 | for future use in vect_create_vectorized_*_stmts (). */ | |
8c681247 | 4365 | auto_vec<tree> vec_dsts (multi_step_cvt + 1); |
82294ec1 JJ |
4366 | vec_dest = vect_create_destination_var (scalar_dest, |
4367 | (cvt_type && modifier == WIDEN) | |
4368 | ? cvt_type : vectype_out); | |
9771b263 | 4369 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4370 | |
4371 | if (multi_step_cvt) | |
4372 | { | |
9771b263 DN |
4373 | for (i = interm_types.length () - 1; |
4374 | interm_types.iterate (i, &intermediate_type); i--) | |
4a00c761 JJ |
4375 | { |
4376 | vec_dest = vect_create_destination_var (scalar_dest, | |
4377 | intermediate_type); | |
9771b263 | 4378 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4379 | } |
4380 | } | |
ebfd146a | 4381 | |
4a00c761 | 4382 | if (cvt_type) |
82294ec1 JJ |
4383 | vec_dest = vect_create_destination_var (scalar_dest, |
4384 | modifier == WIDEN | |
4385 | ? vectype_out : cvt_type); | |
4a00c761 JJ |
4386 | |
4387 | if (!slp_node) | |
4388 | { | |
30862efc | 4389 | if (modifier == WIDEN) |
4a00c761 | 4390 | { |
c3284718 | 4391 | vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1); |
4a00c761 | 4392 | if (op_type == binary_op) |
9771b263 | 4393 | vec_oprnds1.create (1); |
4a00c761 | 4394 | } |
30862efc | 4395 | else if (modifier == NARROW) |
9771b263 DN |
4396 | vec_oprnds0.create ( |
4397 | 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1)); | |
4a00c761 JJ |
4398 | } |
4399 | else if (code == WIDEN_LSHIFT_EXPR) | |
9771b263 | 4400 | vec_oprnds1.create (slp_node->vec_stmts_size); |
ebfd146a | 4401 | |
4a00c761 | 4402 | last_oprnd = op0; |
ebfd146a IR |
4403 | prev_stmt_info = NULL; |
4404 | switch (modifier) | |
4405 | { | |
4406 | case NONE: | |
4407 | for (j = 0; j < ncopies; j++) | |
4408 | { | |
ebfd146a | 4409 | if (j == 0) |
306b0c92 | 4410 | vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node); |
ebfd146a IR |
4411 | else |
4412 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL); | |
4413 | ||
9771b263 | 4414 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 JJ |
4415 | { |
4416 | /* Arguments are ready, create the new vector stmt. */ | |
4417 | if (code1 == CALL_EXPR) | |
4418 | { | |
4419 | new_stmt = gimple_build_call (decl1, 1, vop0); | |
4420 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4421 | gimple_call_set_lhs (new_stmt, new_temp); | |
4422 | } | |
4423 | else | |
4424 | { | |
4425 | gcc_assert (TREE_CODE_LENGTH (code1) == unary_op); | |
0d0e4a03 | 4426 | new_stmt = gimple_build_assign (vec_dest, code1, vop0); |
4a00c761 JJ |
4427 | new_temp = make_ssa_name (vec_dest, new_stmt); |
4428 | gimple_assign_set_lhs (new_stmt, new_temp); | |
4429 | } | |
4430 | ||
4431 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
4432 | if (slp_node) | |
9771b263 | 4433 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
225ce44b RB |
4434 | else |
4435 | { | |
4436 | if (!prev_stmt_info) | |
4437 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
4438 | else | |
4439 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
4440 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
4441 | } | |
4a00c761 | 4442 | } |
ebfd146a IR |
4443 | } |
4444 | break; | |
4445 | ||
4446 | case WIDEN: | |
4447 | /* In case the vectorization factor (VF) is bigger than the number | |
4448 | of elements that we can fit in a vectype (nunits), we have to | |
4449 | generate more than one vector stmt - i.e - we need to "unroll" | |
4450 | the vector stmt by a factor VF/nunits. */ | |
4451 | for (j = 0; j < ncopies; j++) | |
4452 | { | |
4a00c761 | 4453 | /* Handle uses. */ |
ebfd146a | 4454 | if (j == 0) |
4a00c761 JJ |
4455 | { |
4456 | if (slp_node) | |
4457 | { | |
4458 | if (code == WIDEN_LSHIFT_EXPR) | |
4459 | { | |
4460 | unsigned int k; | |
ebfd146a | 4461 | |
4a00c761 JJ |
4462 | vec_oprnd1 = op1; |
4463 | /* Store vec_oprnd1 for every vector stmt to be created | |
4464 | for SLP_NODE. We check during the analysis that all | |
4465 | the shift arguments are the same. */ | |
4466 | for (k = 0; k < slp_node->vec_stmts_size - 1; k++) | |
9771b263 | 4467 | vec_oprnds1.quick_push (vec_oprnd1); |
4a00c761 JJ |
4468 | |
4469 | vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, | |
306b0c92 | 4470 | slp_node); |
4a00c761 JJ |
4471 | } |
4472 | else | |
4473 | vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, | |
306b0c92 | 4474 | &vec_oprnds1, slp_node); |
4a00c761 JJ |
4475 | } |
4476 | else | |
4477 | { | |
81c40241 | 4478 | vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt); |
9771b263 | 4479 | vec_oprnds0.quick_push (vec_oprnd0); |
4a00c761 JJ |
4480 | if (op_type == binary_op) |
4481 | { | |
4482 | if (code == WIDEN_LSHIFT_EXPR) | |
4483 | vec_oprnd1 = op1; | |
4484 | else | |
81c40241 | 4485 | vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt); |
9771b263 | 4486 | vec_oprnds1.quick_push (vec_oprnd1); |
4a00c761 JJ |
4487 | } |
4488 | } | |
4489 | } | |
ebfd146a | 4490 | else |
4a00c761 JJ |
4491 | { |
4492 | vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); | |
9771b263 DN |
4493 | vec_oprnds0.truncate (0); |
4494 | vec_oprnds0.quick_push (vec_oprnd0); | |
4a00c761 JJ |
4495 | if (op_type == binary_op) |
4496 | { | |
4497 | if (code == WIDEN_LSHIFT_EXPR) | |
4498 | vec_oprnd1 = op1; | |
4499 | else | |
4500 | vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], | |
4501 | vec_oprnd1); | |
9771b263 DN |
4502 | vec_oprnds1.truncate (0); |
4503 | vec_oprnds1.quick_push (vec_oprnd1); | |
4a00c761 JJ |
4504 | } |
4505 | } | |
ebfd146a | 4506 | |
4a00c761 JJ |
4507 | /* Arguments are ready. Create the new vector stmts. */ |
4508 | for (i = multi_step_cvt; i >= 0; i--) | |
4509 | { | |
9771b263 | 4510 | tree this_dest = vec_dsts[i]; |
4a00c761 JJ |
4511 | enum tree_code c1 = code1, c2 = code2; |
4512 | if (i == 0 && codecvt2 != ERROR_MARK) | |
4513 | { | |
4514 | c1 = codecvt1; | |
4515 | c2 = codecvt2; | |
4516 | } | |
4517 | vect_create_vectorized_promotion_stmts (&vec_oprnds0, | |
4518 | &vec_oprnds1, | |
4519 | stmt, this_dest, gsi, | |
4520 | c1, c2, decl1, decl2, | |
4521 | op_type); | |
4522 | } | |
4523 | ||
9771b263 | 4524 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 JJ |
4525 | { |
4526 | if (cvt_type) | |
4527 | { | |
4528 | if (codecvt1 == CALL_EXPR) | |
4529 | { | |
4530 | new_stmt = gimple_build_call (decl1, 1, vop0); | |
4531 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4532 | gimple_call_set_lhs (new_stmt, new_temp); | |
4533 | } | |
4534 | else | |
4535 | { | |
4536 | gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op); | |
b731b390 | 4537 | new_temp = make_ssa_name (vec_dest); |
0d0e4a03 JJ |
4538 | new_stmt = gimple_build_assign (new_temp, codecvt1, |
4539 | vop0); | |
4a00c761 JJ |
4540 | } |
4541 | ||
4542 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
4543 | } | |
4544 | else | |
4545 | new_stmt = SSA_NAME_DEF_STMT (vop0); | |
4546 | ||
4547 | if (slp_node) | |
9771b263 | 4548 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
4a00c761 | 4549 | else |
c689ce1e RB |
4550 | { |
4551 | if (!prev_stmt_info) | |
4552 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; | |
4553 | else | |
4554 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
4555 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
4556 | } | |
4a00c761 | 4557 | } |
ebfd146a | 4558 | } |
4a00c761 JJ |
4559 | |
4560 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
ebfd146a IR |
4561 | break; |
4562 | ||
4563 | case NARROW: | |
4564 | /* In case the vectorization factor (VF) is bigger than the number | |
4565 | of elements that we can fit in a vectype (nunits), we have to | |
4566 | generate more than one vector stmt - i.e - we need to "unroll" | |
4567 | the vector stmt by a factor VF/nunits. */ | |
4568 | for (j = 0; j < ncopies; j++) | |
4569 | { | |
4570 | /* Handle uses. */ | |
4a00c761 JJ |
4571 | if (slp_node) |
4572 | vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, | |
306b0c92 | 4573 | slp_node); |
ebfd146a IR |
4574 | else |
4575 | { | |
9771b263 | 4576 | vec_oprnds0.truncate (0); |
4a00c761 JJ |
4577 | vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0, |
4578 | vect_pow2 (multi_step_cvt) - 1); | |
ebfd146a IR |
4579 | } |
4580 | ||
4a00c761 JJ |
4581 | /* Arguments are ready. Create the new vector stmts. */ |
4582 | if (cvt_type) | |
9771b263 | 4583 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 JJ |
4584 | { |
4585 | if (codecvt1 == CALL_EXPR) | |
4586 | { | |
4587 | new_stmt = gimple_build_call (decl1, 1, vop0); | |
4588 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4589 | gimple_call_set_lhs (new_stmt, new_temp); | |
4590 | } | |
4591 | else | |
4592 | { | |
4593 | gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op); | |
b731b390 | 4594 | new_temp = make_ssa_name (vec_dest); |
0d0e4a03 JJ |
4595 | new_stmt = gimple_build_assign (new_temp, codecvt1, |
4596 | vop0); | |
4a00c761 | 4597 | } |
ebfd146a | 4598 | |
4a00c761 | 4599 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
9771b263 | 4600 | vec_oprnds0[i] = new_temp; |
4a00c761 | 4601 | } |
ebfd146a | 4602 | |
4a00c761 JJ |
4603 | vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt, |
4604 | stmt, vec_dsts, gsi, | |
4605 | slp_node, code1, | |
4606 | &prev_stmt_info); | |
ebfd146a IR |
4607 | } |
4608 | ||
4609 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
4a00c761 | 4610 | break; |
ebfd146a IR |
4611 | } |
4612 | ||
9771b263 DN |
4613 | vec_oprnds0.release (); |
4614 | vec_oprnds1.release (); | |
9771b263 | 4615 | interm_types.release (); |
ebfd146a IR |
4616 | |
4617 | return true; | |
4618 | } | |
ff802fa1 IR |
4619 | |
4620 | ||
ebfd146a IR |
4621 | /* Function vectorizable_assignment. |
4622 | ||
b8698a0f L |
4623 | Check if STMT performs an assignment (copy) that can be vectorized. |
4624 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
ebfd146a IR |
4625 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
4626 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
4627 | ||
4628 | static bool | |
355fe088 TS |
4629 | vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi, |
4630 | gimple **vec_stmt, slp_tree slp_node) | |
ebfd146a IR |
4631 | { |
4632 | tree vec_dest; | |
4633 | tree scalar_dest; | |
4634 | tree op; | |
4635 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
ebfd146a IR |
4636 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
4637 | tree new_temp; | |
355fe088 | 4638 | gimple *def_stmt; |
4fc5ebf1 JG |
4639 | enum vect_def_type dt[1] = {vect_unknown_def_type}; |
4640 | int ndts = 1; | |
ebfd146a | 4641 | int ncopies; |
f18b55bd | 4642 | int i, j; |
6e1aa848 | 4643 | vec<tree> vec_oprnds = vNULL; |
ebfd146a | 4644 | tree vop; |
a70d6342 | 4645 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 4646 | vec_info *vinfo = stmt_info->vinfo; |
355fe088 | 4647 | gimple *new_stmt = NULL; |
f18b55bd | 4648 | stmt_vec_info prev_stmt_info = NULL; |
fde9c428 RG |
4649 | enum tree_code code; |
4650 | tree vectype_in; | |
ebfd146a | 4651 | |
a70d6342 | 4652 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
4653 | return false; |
4654 | ||
66c16fd9 RB |
4655 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
4656 | && ! vec_stmt) | |
ebfd146a IR |
4657 | return false; |
4658 | ||
4659 | /* Is vectorizable assignment? */ | |
4660 | if (!is_gimple_assign (stmt)) | |
4661 | return false; | |
4662 | ||
4663 | scalar_dest = gimple_assign_lhs (stmt); | |
4664 | if (TREE_CODE (scalar_dest) != SSA_NAME) | |
4665 | return false; | |
4666 | ||
fde9c428 | 4667 | code = gimple_assign_rhs_code (stmt); |
ebfd146a | 4668 | if (gimple_assign_single_p (stmt) |
fde9c428 RG |
4669 | || code == PAREN_EXPR |
4670 | || CONVERT_EXPR_CODE_P (code)) | |
ebfd146a IR |
4671 | op = gimple_assign_rhs1 (stmt); |
4672 | else | |
4673 | return false; | |
4674 | ||
7b7ec6c5 RG |
4675 | if (code == VIEW_CONVERT_EXPR) |
4676 | op = TREE_OPERAND (op, 0); | |
4677 | ||
465c8c19 JJ |
4678 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
4679 | unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
4680 | ||
4681 | /* Multiple types in SLP are handled by creating the appropriate number of | |
4682 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
4683 | case of SLP. */ | |
fce57248 | 4684 | if (slp_node) |
465c8c19 JJ |
4685 | ncopies = 1; |
4686 | else | |
4687 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
4688 | ||
4689 | gcc_assert (ncopies >= 1); | |
4690 | ||
81c40241 | 4691 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in)) |
ebfd146a | 4692 | { |
73fbfcad | 4693 | if (dump_enabled_p ()) |
78c60e3d | 4694 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4695 | "use not simple.\n"); |
ebfd146a IR |
4696 | return false; |
4697 | } | |
4698 | ||
fde9c428 RG |
4699 | /* We can handle NOP_EXPR conversions that do not change the number |
4700 | of elements or the vector size. */ | |
7b7ec6c5 RG |
4701 | if ((CONVERT_EXPR_CODE_P (code) |
4702 | || code == VIEW_CONVERT_EXPR) | |
fde9c428 RG |
4703 | && (!vectype_in |
4704 | || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits | |
4705 | || (GET_MODE_SIZE (TYPE_MODE (vectype)) | |
4706 | != GET_MODE_SIZE (TYPE_MODE (vectype_in))))) | |
4707 | return false; | |
4708 | ||
7b7b1813 RG |
4709 | /* We do not handle bit-precision changes. */ |
4710 | if ((CONVERT_EXPR_CODE_P (code) | |
4711 | || code == VIEW_CONVERT_EXPR) | |
4712 | && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) | |
2be65d9e RS |
4713 | && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)) |
4714 | || !type_has_mode_precision_p (TREE_TYPE (op))) | |
7b7b1813 RG |
4715 | /* But a conversion that does not change the bit-pattern is ok. */ |
4716 | && !((TYPE_PRECISION (TREE_TYPE (scalar_dest)) | |
4717 | > TYPE_PRECISION (TREE_TYPE (op))) | |
2dab46d5 IE |
4718 | && TYPE_UNSIGNED (TREE_TYPE (op))) |
4719 | /* Conversion between boolean types of different sizes is | |
4720 | a simple assignment in case their vectypes are same | |
4721 | boolean vectors. */ | |
4722 | && (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
4723 | || !VECTOR_BOOLEAN_TYPE_P (vectype_in))) | |
7b7b1813 | 4724 | { |
73fbfcad | 4725 | if (dump_enabled_p ()) |
78c60e3d SS |
4726 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
4727 | "type conversion to/from bit-precision " | |
e645e942 | 4728 | "unsupported.\n"); |
7b7b1813 RG |
4729 | return false; |
4730 | } | |
4731 | ||
ebfd146a IR |
4732 | if (!vec_stmt) /* transformation not required. */ |
4733 | { | |
4734 | STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; | |
73fbfcad | 4735 | if (dump_enabled_p ()) |
78c60e3d | 4736 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4737 | "=== vectorizable_assignment ===\n"); |
4fc5ebf1 | 4738 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
ebfd146a IR |
4739 | return true; |
4740 | } | |
4741 | ||
67b8dbac | 4742 | /* Transform. */ |
73fbfcad | 4743 | if (dump_enabled_p ()) |
e645e942 | 4744 | dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n"); |
ebfd146a IR |
4745 | |
4746 | /* Handle def. */ | |
4747 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
4748 | ||
4749 | /* Handle use. */ | |
f18b55bd | 4750 | for (j = 0; j < ncopies; j++) |
ebfd146a | 4751 | { |
f18b55bd IR |
4752 | /* Handle uses. */ |
4753 | if (j == 0) | |
306b0c92 | 4754 | vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node); |
f18b55bd IR |
4755 | else |
4756 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL); | |
4757 | ||
4758 | /* Arguments are ready. create the new vector stmt. */ | |
9771b263 | 4759 | FOR_EACH_VEC_ELT (vec_oprnds, i, vop) |
f18b55bd | 4760 | { |
7b7ec6c5 RG |
4761 | if (CONVERT_EXPR_CODE_P (code) |
4762 | || code == VIEW_CONVERT_EXPR) | |
4a73490d | 4763 | vop = build1 (VIEW_CONVERT_EXPR, vectype, vop); |
f18b55bd IR |
4764 | new_stmt = gimple_build_assign (vec_dest, vop); |
4765 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4766 | gimple_assign_set_lhs (new_stmt, new_temp); | |
4767 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
4768 | if (slp_node) | |
9771b263 | 4769 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
f18b55bd | 4770 | } |
ebfd146a IR |
4771 | |
4772 | if (slp_node) | |
f18b55bd IR |
4773 | continue; |
4774 | ||
4775 | if (j == 0) | |
4776 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
4777 | else | |
4778 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
4779 | ||
4780 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
4781 | } | |
b8698a0f | 4782 | |
9771b263 | 4783 | vec_oprnds.release (); |
ebfd146a IR |
4784 | return true; |
4785 | } | |
4786 | ||
9dc3f7de | 4787 | |
1107f3ae IR |
4788 | /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE |
4789 | either as shift by a scalar or by a vector. */ | |
4790 | ||
4791 | bool | |
4792 | vect_supportable_shift (enum tree_code code, tree scalar_type) | |
4793 | { | |
4794 | ||
ef4bddc2 | 4795 | machine_mode vec_mode; |
1107f3ae IR |
4796 | optab optab; |
4797 | int icode; | |
4798 | tree vectype; | |
4799 | ||
4800 | vectype = get_vectype_for_scalar_type (scalar_type); | |
4801 | if (!vectype) | |
4802 | return false; | |
4803 | ||
4804 | optab = optab_for_tree_code (code, vectype, optab_scalar); | |
4805 | if (!optab | |
4806 | || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing) | |
4807 | { | |
4808 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
4809 | if (!optab | |
4810 | || (optab_handler (optab, TYPE_MODE (vectype)) | |
4811 | == CODE_FOR_nothing)) | |
4812 | return false; | |
4813 | } | |
4814 | ||
4815 | vec_mode = TYPE_MODE (vectype); | |
4816 | icode = (int) optab_handler (optab, vec_mode); | |
4817 | if (icode == CODE_FOR_nothing) | |
4818 | return false; | |
4819 | ||
4820 | return true; | |
4821 | } | |
4822 | ||
4823 | ||
9dc3f7de IR |
4824 | /* Function vectorizable_shift. |
4825 | ||
4826 | Check if STMT performs a shift operation that can be vectorized. | |
4827 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
4828 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. | |
4829 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
4830 | ||
4831 | static bool | |
355fe088 TS |
4832 | vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, |
4833 | gimple **vec_stmt, slp_tree slp_node) | |
9dc3f7de IR |
4834 | { |
4835 | tree vec_dest; | |
4836 | tree scalar_dest; | |
4837 | tree op0, op1 = NULL; | |
4838 | tree vec_oprnd1 = NULL_TREE; | |
4839 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
4840 | tree vectype; | |
4841 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4842 | enum tree_code code; | |
ef4bddc2 | 4843 | machine_mode vec_mode; |
9dc3f7de IR |
4844 | tree new_temp; |
4845 | optab optab; | |
4846 | int icode; | |
ef4bddc2 | 4847 | machine_mode optab_op2_mode; |
355fe088 | 4848 | gimple *def_stmt; |
9dc3f7de | 4849 | enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
4fc5ebf1 | 4850 | int ndts = 2; |
355fe088 | 4851 | gimple *new_stmt = NULL; |
9dc3f7de IR |
4852 | stmt_vec_info prev_stmt_info; |
4853 | int nunits_in; | |
4854 | int nunits_out; | |
4855 | tree vectype_out; | |
cede2577 | 4856 | tree op1_vectype; |
9dc3f7de IR |
4857 | int ncopies; |
4858 | int j, i; | |
6e1aa848 DN |
4859 | vec<tree> vec_oprnds0 = vNULL; |
4860 | vec<tree> vec_oprnds1 = vNULL; | |
9dc3f7de IR |
4861 | tree vop0, vop1; |
4862 | unsigned int k; | |
49eab32e | 4863 | bool scalar_shift_arg = true; |
9dc3f7de | 4864 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 4865 | vec_info *vinfo = stmt_info->vinfo; |
9dc3f7de IR |
4866 | int vf; |
4867 | ||
4868 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
4869 | return false; | |
4870 | ||
66c16fd9 RB |
4871 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
4872 | && ! vec_stmt) | |
9dc3f7de IR |
4873 | return false; |
4874 | ||
4875 | /* Is STMT a vectorizable binary/unary operation? */ | |
4876 | if (!is_gimple_assign (stmt)) | |
4877 | return false; | |
4878 | ||
4879 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
4880 | return false; | |
4881 | ||
4882 | code = gimple_assign_rhs_code (stmt); | |
4883 | ||
4884 | if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR | |
4885 | || code == RROTATE_EXPR)) | |
4886 | return false; | |
4887 | ||
4888 | scalar_dest = gimple_assign_lhs (stmt); | |
4889 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); | |
2be65d9e | 4890 | if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))) |
7b7b1813 | 4891 | { |
73fbfcad | 4892 | if (dump_enabled_p ()) |
78c60e3d | 4893 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4894 | "bit-precision shifts not supported.\n"); |
7b7b1813 RG |
4895 | return false; |
4896 | } | |
9dc3f7de IR |
4897 | |
4898 | op0 = gimple_assign_rhs1 (stmt); | |
81c40241 | 4899 | if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype)) |
9dc3f7de | 4900 | { |
73fbfcad | 4901 | if (dump_enabled_p ()) |
78c60e3d | 4902 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4903 | "use not simple.\n"); |
9dc3f7de IR |
4904 | return false; |
4905 | } | |
4906 | /* If op0 is an external or constant def use a vector type with | |
4907 | the same size as the output vector type. */ | |
4908 | if (!vectype) | |
4909 | vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out); | |
4910 | if (vec_stmt) | |
4911 | gcc_assert (vectype); | |
4912 | if (!vectype) | |
4913 | { | |
73fbfcad | 4914 | if (dump_enabled_p ()) |
78c60e3d | 4915 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4916 | "no vectype for scalar type\n"); |
9dc3f7de IR |
4917 | return false; |
4918 | } | |
4919 | ||
4920 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
4921 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype); | |
4922 | if (nunits_out != nunits_in) | |
4923 | return false; | |
4924 | ||
4925 | op1 = gimple_assign_rhs2 (stmt); | |
81c40241 | 4926 | if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype)) |
9dc3f7de | 4927 | { |
73fbfcad | 4928 | if (dump_enabled_p ()) |
78c60e3d | 4929 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4930 | "use not simple.\n"); |
9dc3f7de IR |
4931 | return false; |
4932 | } | |
4933 | ||
4934 | if (loop_vinfo) | |
4935 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
4936 | else | |
4937 | vf = 1; | |
4938 | ||
4939 | /* Multiple types in SLP are handled by creating the appropriate number of | |
4940 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
4941 | case of SLP. */ | |
fce57248 | 4942 | if (slp_node) |
9dc3f7de IR |
4943 | ncopies = 1; |
4944 | else | |
4945 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; | |
4946 | ||
4947 | gcc_assert (ncopies >= 1); | |
4948 | ||
4949 | /* Determine whether the shift amount is a vector, or scalar. If the | |
4950 | shift/rotate amount is a vector, use the vector/vector shift optabs. */ | |
4951 | ||
dbfa87aa YR |
4952 | if ((dt[1] == vect_internal_def |
4953 | || dt[1] == vect_induction_def) | |
4954 | && !slp_node) | |
49eab32e JJ |
4955 | scalar_shift_arg = false; |
4956 | else if (dt[1] == vect_constant_def | |
4957 | || dt[1] == vect_external_def | |
4958 | || dt[1] == vect_internal_def) | |
4959 | { | |
4960 | /* In SLP, need to check whether the shift count is the same, | |
4961 | in loops if it is a constant or invariant, it is always | |
4962 | a scalar shift. */ | |
4963 | if (slp_node) | |
4964 | { | |
355fe088 TS |
4965 | vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node); |
4966 | gimple *slpstmt; | |
49eab32e | 4967 | |
9771b263 | 4968 | FOR_EACH_VEC_ELT (stmts, k, slpstmt) |
49eab32e JJ |
4969 | if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0)) |
4970 | scalar_shift_arg = false; | |
4971 | } | |
60d393e8 RB |
4972 | |
4973 | /* If the shift amount is computed by a pattern stmt we cannot | |
4974 | use the scalar amount directly thus give up and use a vector | |
4975 | shift. */ | |
4976 | if (dt[1] == vect_internal_def) | |
4977 | { | |
4978 | gimple *def = SSA_NAME_DEF_STMT (op1); | |
4979 | if (is_pattern_stmt_p (vinfo_for_stmt (def))) | |
4980 | scalar_shift_arg = false; | |
4981 | } | |
49eab32e JJ |
4982 | } |
4983 | else | |
4984 | { | |
73fbfcad | 4985 | if (dump_enabled_p ()) |
78c60e3d | 4986 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4987 | "operand mode requires invariant argument.\n"); |
49eab32e JJ |
4988 | return false; |
4989 | } | |
4990 | ||
9dc3f7de | 4991 | /* Vector shifted by vector. */ |
49eab32e | 4992 | if (!scalar_shift_arg) |
9dc3f7de IR |
4993 | { |
4994 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
73fbfcad | 4995 | if (dump_enabled_p ()) |
78c60e3d | 4996 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4997 | "vector/vector shift/rotate found.\n"); |
78c60e3d | 4998 | |
aa948027 JJ |
4999 | if (!op1_vectype) |
5000 | op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out); | |
5001 | if (op1_vectype == NULL_TREE | |
5002 | || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype)) | |
cede2577 | 5003 | { |
73fbfcad | 5004 | if (dump_enabled_p ()) |
78c60e3d SS |
5005 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5006 | "unusable type for last operand in" | |
e645e942 | 5007 | " vector/vector shift/rotate.\n"); |
cede2577 JJ |
5008 | return false; |
5009 | } | |
9dc3f7de IR |
5010 | } |
5011 | /* See if the machine has a vector shifted by scalar insn and if not | |
5012 | then see if it has a vector shifted by vector insn. */ | |
49eab32e | 5013 | else |
9dc3f7de IR |
5014 | { |
5015 | optab = optab_for_tree_code (code, vectype, optab_scalar); | |
5016 | if (optab | |
5017 | && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing) | |
5018 | { | |
73fbfcad | 5019 | if (dump_enabled_p ()) |
78c60e3d | 5020 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5021 | "vector/scalar shift/rotate found.\n"); |
9dc3f7de IR |
5022 | } |
5023 | else | |
5024 | { | |
5025 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
5026 | if (optab | |
5027 | && (optab_handler (optab, TYPE_MODE (vectype)) | |
5028 | != CODE_FOR_nothing)) | |
5029 | { | |
49eab32e JJ |
5030 | scalar_shift_arg = false; |
5031 | ||
73fbfcad | 5032 | if (dump_enabled_p ()) |
78c60e3d | 5033 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5034 | "vector/vector shift/rotate found.\n"); |
9dc3f7de IR |
5035 | |
5036 | /* Unlike the other binary operators, shifts/rotates have | |
5037 | the rhs being int, instead of the same type as the lhs, | |
5038 | so make sure the scalar is the right type if we are | |
aa948027 | 5039 | dealing with vectors of long long/long/short/char. */ |
9dc3f7de IR |
5040 | if (dt[1] == vect_constant_def) |
5041 | op1 = fold_convert (TREE_TYPE (vectype), op1); | |
aa948027 JJ |
5042 | else if (!useless_type_conversion_p (TREE_TYPE (vectype), |
5043 | TREE_TYPE (op1))) | |
5044 | { | |
5045 | if (slp_node | |
5046 | && TYPE_MODE (TREE_TYPE (vectype)) | |
5047 | != TYPE_MODE (TREE_TYPE (op1))) | |
5048 | { | |
73fbfcad | 5049 | if (dump_enabled_p ()) |
78c60e3d SS |
5050 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5051 | "unusable type for last operand in" | |
e645e942 | 5052 | " vector/vector shift/rotate.\n"); |
21c0a521 | 5053 | return false; |
aa948027 JJ |
5054 | } |
5055 | if (vec_stmt && !slp_node) | |
5056 | { | |
5057 | op1 = fold_convert (TREE_TYPE (vectype), op1); | |
5058 | op1 = vect_init_vector (stmt, op1, | |
5059 | TREE_TYPE (vectype), NULL); | |
5060 | } | |
5061 | } | |
9dc3f7de IR |
5062 | } |
5063 | } | |
5064 | } | |
9dc3f7de IR |
5065 | |
5066 | /* Supportable by target? */ | |
5067 | if (!optab) | |
5068 | { | |
73fbfcad | 5069 | if (dump_enabled_p ()) |
78c60e3d | 5070 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5071 | "no optab.\n"); |
9dc3f7de IR |
5072 | return false; |
5073 | } | |
5074 | vec_mode = TYPE_MODE (vectype); | |
5075 | icode = (int) optab_handler (optab, vec_mode); | |
5076 | if (icode == CODE_FOR_nothing) | |
5077 | { | |
73fbfcad | 5078 | if (dump_enabled_p ()) |
78c60e3d | 5079 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5080 | "op not supported by target.\n"); |
9dc3f7de IR |
5081 | /* Check only during analysis. */ |
5082 | if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD | |
5083 | || (vf < vect_min_worthwhile_factor (code) | |
5084 | && !vec_stmt)) | |
5085 | return false; | |
73fbfcad | 5086 | if (dump_enabled_p ()) |
e645e942 TJ |
5087 | dump_printf_loc (MSG_NOTE, vect_location, |
5088 | "proceeding using word mode.\n"); | |
9dc3f7de IR |
5089 | } |
5090 | ||
5091 | /* Worthwhile without SIMD support? Check only during analysis. */ | |
5092 | if (!VECTOR_MODE_P (TYPE_MODE (vectype)) | |
5093 | && vf < vect_min_worthwhile_factor (code) | |
5094 | && !vec_stmt) | |
5095 | { | |
73fbfcad | 5096 | if (dump_enabled_p ()) |
78c60e3d | 5097 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5098 | "not worthwhile without SIMD support.\n"); |
9dc3f7de IR |
5099 | return false; |
5100 | } | |
5101 | ||
5102 | if (!vec_stmt) /* transformation not required. */ | |
5103 | { | |
5104 | STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type; | |
73fbfcad | 5105 | if (dump_enabled_p ()) |
e645e942 TJ |
5106 | dump_printf_loc (MSG_NOTE, vect_location, |
5107 | "=== vectorizable_shift ===\n"); | |
4fc5ebf1 | 5108 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
9dc3f7de IR |
5109 | return true; |
5110 | } | |
5111 | ||
67b8dbac | 5112 | /* Transform. */ |
9dc3f7de | 5113 | |
73fbfcad | 5114 | if (dump_enabled_p ()) |
78c60e3d | 5115 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5116 | "transform binary/unary operation.\n"); |
9dc3f7de IR |
5117 | |
5118 | /* Handle def. */ | |
5119 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
5120 | ||
9dc3f7de IR |
5121 | prev_stmt_info = NULL; |
5122 | for (j = 0; j < ncopies; j++) | |
5123 | { | |
5124 | /* Handle uses. */ | |
5125 | if (j == 0) | |
5126 | { | |
5127 | if (scalar_shift_arg) | |
5128 | { | |
5129 | /* Vector shl and shr insn patterns can be defined with scalar | |
5130 | operand 2 (shift operand). In this case, use constant or loop | |
5131 | invariant op1 directly, without extending it to vector mode | |
5132 | first. */ | |
5133 | optab_op2_mode = insn_data[icode].operand[2].mode; | |
5134 | if (!VECTOR_MODE_P (optab_op2_mode)) | |
5135 | { | |
73fbfcad | 5136 | if (dump_enabled_p ()) |
78c60e3d | 5137 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5138 | "operand 1 using scalar mode.\n"); |
9dc3f7de | 5139 | vec_oprnd1 = op1; |
8930f723 | 5140 | vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1); |
9771b263 | 5141 | vec_oprnds1.quick_push (vec_oprnd1); |
9dc3f7de IR |
5142 | if (slp_node) |
5143 | { | |
5144 | /* Store vec_oprnd1 for every vector stmt to be created | |
5145 | for SLP_NODE. We check during the analysis that all | |
5146 | the shift arguments are the same. | |
5147 | TODO: Allow different constants for different vector | |
5148 | stmts generated for an SLP instance. */ | |
5149 | for (k = 0; k < slp_node->vec_stmts_size - 1; k++) | |
9771b263 | 5150 | vec_oprnds1.quick_push (vec_oprnd1); |
9dc3f7de IR |
5151 | } |
5152 | } | |
5153 | } | |
5154 | ||
5155 | /* vec_oprnd1 is available if operand 1 should be of a scalar-type | |
5156 | (a special case for certain kind of vector shifts); otherwise, | |
5157 | operand 1 should be of a vector type (the usual case). */ | |
5158 | if (vec_oprnd1) | |
5159 | vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, | |
306b0c92 | 5160 | slp_node); |
9dc3f7de IR |
5161 | else |
5162 | vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, | |
306b0c92 | 5163 | slp_node); |
9dc3f7de IR |
5164 | } |
5165 | else | |
5166 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1); | |
5167 | ||
5168 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 5169 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
9dc3f7de | 5170 | { |
9771b263 | 5171 | vop1 = vec_oprnds1[i]; |
0d0e4a03 | 5172 | new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); |
9dc3f7de IR |
5173 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5174 | gimple_assign_set_lhs (new_stmt, new_temp); | |
5175 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5176 | if (slp_node) | |
9771b263 | 5177 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
9dc3f7de IR |
5178 | } |
5179 | ||
5180 | if (slp_node) | |
5181 | continue; | |
5182 | ||
5183 | if (j == 0) | |
5184 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
5185 | else | |
5186 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
5187 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
5188 | } | |
5189 | ||
9771b263 DN |
5190 | vec_oprnds0.release (); |
5191 | vec_oprnds1.release (); | |
9dc3f7de IR |
5192 | |
5193 | return true; | |
5194 | } | |
5195 | ||
5196 | ||
ebfd146a IR |
5197 | /* Function vectorizable_operation. |
5198 | ||
16949072 RG |
5199 | Check if STMT performs a binary, unary or ternary operation that can |
5200 | be vectorized. | |
b8698a0f | 5201 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized |
ebfd146a IR |
5202 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
5203 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
5204 | ||
5205 | static bool | |
355fe088 TS |
5206 | vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, |
5207 | gimple **vec_stmt, slp_tree slp_node) | |
ebfd146a | 5208 | { |
00f07b86 | 5209 | tree vec_dest; |
ebfd146a | 5210 | tree scalar_dest; |
16949072 | 5211 | tree op0, op1 = NULL_TREE, op2 = NULL_TREE; |
ebfd146a | 5212 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
00f07b86 | 5213 | tree vectype; |
ebfd146a IR |
5214 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
5215 | enum tree_code code; | |
ef4bddc2 | 5216 | machine_mode vec_mode; |
ebfd146a IR |
5217 | tree new_temp; |
5218 | int op_type; | |
00f07b86 | 5219 | optab optab; |
523ba738 | 5220 | bool target_support_p; |
355fe088 | 5221 | gimple *def_stmt; |
16949072 RG |
5222 | enum vect_def_type dt[3] |
5223 | = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 5224 | int ndts = 3; |
355fe088 | 5225 | gimple *new_stmt = NULL; |
ebfd146a | 5226 | stmt_vec_info prev_stmt_info; |
b690cc0f | 5227 | int nunits_in; |
ebfd146a IR |
5228 | int nunits_out; |
5229 | tree vectype_out; | |
5230 | int ncopies; | |
5231 | int j, i; | |
6e1aa848 DN |
5232 | vec<tree> vec_oprnds0 = vNULL; |
5233 | vec<tree> vec_oprnds1 = vNULL; | |
5234 | vec<tree> vec_oprnds2 = vNULL; | |
16949072 | 5235 | tree vop0, vop1, vop2; |
a70d6342 | 5236 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5237 | vec_info *vinfo = stmt_info->vinfo; |
a70d6342 IR |
5238 | int vf; |
5239 | ||
a70d6342 | 5240 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
5241 | return false; |
5242 | ||
66c16fd9 RB |
5243 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5244 | && ! vec_stmt) | |
ebfd146a IR |
5245 | return false; |
5246 | ||
5247 | /* Is STMT a vectorizable binary/unary operation? */ | |
5248 | if (!is_gimple_assign (stmt)) | |
5249 | return false; | |
5250 | ||
5251 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
5252 | return false; | |
5253 | ||
ebfd146a IR |
5254 | code = gimple_assign_rhs_code (stmt); |
5255 | ||
5256 | /* For pointer addition, we should use the normal plus for | |
5257 | the vector addition. */ | |
5258 | if (code == POINTER_PLUS_EXPR) | |
5259 | code = PLUS_EXPR; | |
5260 | ||
5261 | /* Support only unary or binary operations. */ | |
5262 | op_type = TREE_CODE_LENGTH (code); | |
16949072 | 5263 | if (op_type != unary_op && op_type != binary_op && op_type != ternary_op) |
ebfd146a | 5264 | { |
73fbfcad | 5265 | if (dump_enabled_p ()) |
78c60e3d | 5266 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5267 | "num. args = %d (not unary/binary/ternary op).\n", |
78c60e3d | 5268 | op_type); |
ebfd146a IR |
5269 | return false; |
5270 | } | |
5271 | ||
b690cc0f RG |
5272 | scalar_dest = gimple_assign_lhs (stmt); |
5273 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); | |
5274 | ||
7b7b1813 RG |
5275 | /* Most operations cannot handle bit-precision types without extra |
5276 | truncations. */ | |
045c1278 | 5277 | if (!VECTOR_BOOLEAN_TYPE_P (vectype_out) |
2be65d9e | 5278 | && !type_has_mode_precision_p (TREE_TYPE (scalar_dest)) |
7b7b1813 RG |
5279 | /* Exception are bitwise binary operations. */ |
5280 | && code != BIT_IOR_EXPR | |
5281 | && code != BIT_XOR_EXPR | |
5282 | && code != BIT_AND_EXPR) | |
5283 | { | |
73fbfcad | 5284 | if (dump_enabled_p ()) |
78c60e3d | 5285 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5286 | "bit-precision arithmetic not supported.\n"); |
7b7b1813 RG |
5287 | return false; |
5288 | } | |
5289 | ||
ebfd146a | 5290 | op0 = gimple_assign_rhs1 (stmt); |
81c40241 | 5291 | if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype)) |
ebfd146a | 5292 | { |
73fbfcad | 5293 | if (dump_enabled_p ()) |
78c60e3d | 5294 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5295 | "use not simple.\n"); |
ebfd146a IR |
5296 | return false; |
5297 | } | |
b690cc0f RG |
5298 | /* If op0 is an external or constant def use a vector type with |
5299 | the same size as the output vector type. */ | |
5300 | if (!vectype) | |
b036c6c5 IE |
5301 | { |
5302 | /* For boolean type we cannot determine vectype by | |
5303 | invariant value (don't know whether it is a vector | |
5304 | of booleans or vector of integers). We use output | |
5305 | vectype because operations on boolean don't change | |
5306 | type. */ | |
2568d8a1 | 5307 | if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0))) |
b036c6c5 | 5308 | { |
2568d8a1 | 5309 | if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest))) |
b036c6c5 IE |
5310 | { |
5311 | if (dump_enabled_p ()) | |
5312 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5313 | "not supported operation on bool value.\n"); | |
5314 | return false; | |
5315 | } | |
5316 | vectype = vectype_out; | |
5317 | } | |
5318 | else | |
5319 | vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out); | |
5320 | } | |
7d8930a0 IR |
5321 | if (vec_stmt) |
5322 | gcc_assert (vectype); | |
5323 | if (!vectype) | |
5324 | { | |
73fbfcad | 5325 | if (dump_enabled_p ()) |
7d8930a0 | 5326 | { |
78c60e3d SS |
5327 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5328 | "no vectype for scalar type "); | |
5329 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
5330 | TREE_TYPE (op0)); | |
e645e942 | 5331 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
7d8930a0 IR |
5332 | } |
5333 | ||
5334 | return false; | |
5335 | } | |
b690cc0f RG |
5336 | |
5337 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
5338 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype); | |
5339 | if (nunits_out != nunits_in) | |
5340 | return false; | |
ebfd146a | 5341 | |
16949072 | 5342 | if (op_type == binary_op || op_type == ternary_op) |
ebfd146a IR |
5343 | { |
5344 | op1 = gimple_assign_rhs2 (stmt); | |
81c40241 | 5345 | if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1])) |
ebfd146a | 5346 | { |
73fbfcad | 5347 | if (dump_enabled_p ()) |
78c60e3d | 5348 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5349 | "use not simple.\n"); |
ebfd146a IR |
5350 | return false; |
5351 | } | |
5352 | } | |
16949072 RG |
5353 | if (op_type == ternary_op) |
5354 | { | |
5355 | op2 = gimple_assign_rhs3 (stmt); | |
81c40241 | 5356 | if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2])) |
16949072 | 5357 | { |
73fbfcad | 5358 | if (dump_enabled_p ()) |
78c60e3d | 5359 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5360 | "use not simple.\n"); |
16949072 RG |
5361 | return false; |
5362 | } | |
5363 | } | |
ebfd146a | 5364 | |
b690cc0f RG |
5365 | if (loop_vinfo) |
5366 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
5367 | else | |
5368 | vf = 1; | |
5369 | ||
5370 | /* Multiple types in SLP are handled by creating the appropriate number of | |
ff802fa1 | 5371 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in |
b690cc0f | 5372 | case of SLP. */ |
fce57248 | 5373 | if (slp_node) |
b690cc0f RG |
5374 | ncopies = 1; |
5375 | else | |
5376 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; | |
5377 | ||
5378 | gcc_assert (ncopies >= 1); | |
5379 | ||
9dc3f7de | 5380 | /* Shifts are handled in vectorizable_shift (). */ |
ebfd146a IR |
5381 | if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR |
5382 | || code == RROTATE_EXPR) | |
9dc3f7de | 5383 | return false; |
ebfd146a | 5384 | |
ebfd146a | 5385 | /* Supportable by target? */ |
00f07b86 RH |
5386 | |
5387 | vec_mode = TYPE_MODE (vectype); | |
5388 | if (code == MULT_HIGHPART_EXPR) | |
523ba738 | 5389 | target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)); |
00f07b86 RH |
5390 | else |
5391 | { | |
5392 | optab = optab_for_tree_code (code, vectype, optab_default); | |
5393 | if (!optab) | |
5deb57cb | 5394 | { |
73fbfcad | 5395 | if (dump_enabled_p ()) |
78c60e3d | 5396 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5397 | "no optab.\n"); |
00f07b86 | 5398 | return false; |
5deb57cb | 5399 | } |
523ba738 RS |
5400 | target_support_p = (optab_handler (optab, vec_mode) |
5401 | != CODE_FOR_nothing); | |
5deb57cb JJ |
5402 | } |
5403 | ||
523ba738 | 5404 | if (!target_support_p) |
ebfd146a | 5405 | { |
73fbfcad | 5406 | if (dump_enabled_p ()) |
78c60e3d | 5407 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5408 | "op not supported by target.\n"); |
ebfd146a IR |
5409 | /* Check only during analysis. */ |
5410 | if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD | |
5deb57cb | 5411 | || (!vec_stmt && vf < vect_min_worthwhile_factor (code))) |
ebfd146a | 5412 | return false; |
73fbfcad | 5413 | if (dump_enabled_p ()) |
e645e942 TJ |
5414 | dump_printf_loc (MSG_NOTE, vect_location, |
5415 | "proceeding using word mode.\n"); | |
383d9c83 IR |
5416 | } |
5417 | ||
4a00c761 | 5418 | /* Worthwhile without SIMD support? Check only during analysis. */ |
5deb57cb JJ |
5419 | if (!VECTOR_MODE_P (vec_mode) |
5420 | && !vec_stmt | |
5421 | && vf < vect_min_worthwhile_factor (code)) | |
7d8930a0 | 5422 | { |
73fbfcad | 5423 | if (dump_enabled_p ()) |
78c60e3d | 5424 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5425 | "not worthwhile without SIMD support.\n"); |
e34842c6 | 5426 | return false; |
7d8930a0 | 5427 | } |
ebfd146a | 5428 | |
ebfd146a IR |
5429 | if (!vec_stmt) /* transformation not required. */ |
5430 | { | |
4a00c761 | 5431 | STMT_VINFO_TYPE (stmt_info) = op_vec_info_type; |
73fbfcad | 5432 | if (dump_enabled_p ()) |
78c60e3d | 5433 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5434 | "=== vectorizable_operation ===\n"); |
4fc5ebf1 | 5435 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL); |
ebfd146a IR |
5436 | return true; |
5437 | } | |
5438 | ||
67b8dbac | 5439 | /* Transform. */ |
ebfd146a | 5440 | |
73fbfcad | 5441 | if (dump_enabled_p ()) |
78c60e3d | 5442 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5443 | "transform binary/unary operation.\n"); |
383d9c83 | 5444 | |
ebfd146a | 5445 | /* Handle def. */ |
00f07b86 | 5446 | vec_dest = vect_create_destination_var (scalar_dest, vectype); |
b8698a0f | 5447 | |
ebfd146a IR |
5448 | /* In case the vectorization factor (VF) is bigger than the number |
5449 | of elements that we can fit in a vectype (nunits), we have to generate | |
5450 | more than one vector stmt - i.e - we need to "unroll" the | |
4a00c761 JJ |
5451 | vector stmt by a factor VF/nunits. In doing so, we record a pointer |
5452 | from one copy of the vector stmt to the next, in the field | |
5453 | STMT_VINFO_RELATED_STMT. This is necessary in order to allow following | |
5454 | stages to find the correct vector defs to be used when vectorizing | |
5455 | stmts that use the defs of the current stmt. The example below | |
5456 | illustrates the vectorization process when VF=16 and nunits=4 (i.e., | |
5457 | we need to create 4 vectorized stmts): | |
5458 | ||
5459 | before vectorization: | |
5460 | RELATED_STMT VEC_STMT | |
5461 | S1: x = memref - - | |
5462 | S2: z = x + 1 - - | |
5463 | ||
5464 | step 1: vectorize stmt S1 (done in vectorizable_load. See more details | |
5465 | there): | |
5466 | RELATED_STMT VEC_STMT | |
5467 | VS1_0: vx0 = memref0 VS1_1 - | |
5468 | VS1_1: vx1 = memref1 VS1_2 - | |
5469 | VS1_2: vx2 = memref2 VS1_3 - | |
5470 | VS1_3: vx3 = memref3 - - | |
5471 | S1: x = load - VS1_0 | |
5472 | S2: z = x + 1 - - | |
5473 | ||
5474 | step2: vectorize stmt S2 (done here): | |
5475 | To vectorize stmt S2 we first need to find the relevant vector | |
5476 | def for the first operand 'x'. This is, as usual, obtained from | |
5477 | the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt | |
5478 | that defines 'x' (S1). This way we find the stmt VS1_0, and the | |
5479 | relevant vector def 'vx0'. Having found 'vx0' we can generate | |
5480 | the vector stmt VS2_0, and as usual, record it in the | |
5481 | STMT_VINFO_VEC_STMT of stmt S2. | |
5482 | When creating the second copy (VS2_1), we obtain the relevant vector | |
5483 | def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of | |
5484 | stmt VS1_0. This way we find the stmt VS1_1 and the relevant | |
5485 | vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a | |
5486 | pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0. | |
5487 | Similarly when creating stmts VS2_2 and VS2_3. This is the resulting | |
5488 | chain of stmts and pointers: | |
5489 | RELATED_STMT VEC_STMT | |
5490 | VS1_0: vx0 = memref0 VS1_1 - | |
5491 | VS1_1: vx1 = memref1 VS1_2 - | |
5492 | VS1_2: vx2 = memref2 VS1_3 - | |
5493 | VS1_3: vx3 = memref3 - - | |
5494 | S1: x = load - VS1_0 | |
5495 | VS2_0: vz0 = vx0 + v1 VS2_1 - | |
5496 | VS2_1: vz1 = vx1 + v1 VS2_2 - | |
5497 | VS2_2: vz2 = vx2 + v1 VS2_3 - | |
5498 | VS2_3: vz3 = vx3 + v1 - - | |
5499 | S2: z = x + 1 - VS2_0 */ | |
ebfd146a IR |
5500 | |
5501 | prev_stmt_info = NULL; | |
5502 | for (j = 0; j < ncopies; j++) | |
5503 | { | |
5504 | /* Handle uses. */ | |
5505 | if (j == 0) | |
4a00c761 JJ |
5506 | { |
5507 | if (op_type == binary_op || op_type == ternary_op) | |
5508 | vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, | |
306b0c92 | 5509 | slp_node); |
4a00c761 JJ |
5510 | else |
5511 | vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, | |
306b0c92 | 5512 | slp_node); |
4a00c761 | 5513 | if (op_type == ternary_op) |
c392943c | 5514 | vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL, |
306b0c92 | 5515 | slp_node); |
4a00c761 | 5516 | } |
ebfd146a | 5517 | else |
4a00c761 JJ |
5518 | { |
5519 | vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1); | |
5520 | if (op_type == ternary_op) | |
5521 | { | |
9771b263 DN |
5522 | tree vec_oprnd = vec_oprnds2.pop (); |
5523 | vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2], | |
5524 | vec_oprnd)); | |
4a00c761 JJ |
5525 | } |
5526 | } | |
5527 | ||
5528 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 5529 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
ebfd146a | 5530 | { |
4a00c761 | 5531 | vop1 = ((op_type == binary_op || op_type == ternary_op) |
9771b263 | 5532 | ? vec_oprnds1[i] : NULL_TREE); |
4a00c761 | 5533 | vop2 = ((op_type == ternary_op) |
9771b263 | 5534 | ? vec_oprnds2[i] : NULL_TREE); |
0d0e4a03 | 5535 | new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2); |
4a00c761 JJ |
5536 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5537 | gimple_assign_set_lhs (new_stmt, new_temp); | |
5538 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5539 | if (slp_node) | |
9771b263 | 5540 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
ebfd146a IR |
5541 | } |
5542 | ||
4a00c761 JJ |
5543 | if (slp_node) |
5544 | continue; | |
5545 | ||
5546 | if (j == 0) | |
5547 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
5548 | else | |
5549 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
5550 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
ebfd146a IR |
5551 | } |
5552 | ||
9771b263 DN |
5553 | vec_oprnds0.release (); |
5554 | vec_oprnds1.release (); | |
5555 | vec_oprnds2.release (); | |
ebfd146a | 5556 | |
ebfd146a IR |
5557 | return true; |
5558 | } | |
5559 | ||
c716e67f XDL |
5560 | /* A helper function to ensure data reference DR's base alignment |
5561 | for STMT_INFO. */ | |
5562 | ||
5563 | static void | |
5564 | ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr) | |
5565 | { | |
5566 | if (!dr->aux) | |
5567 | return; | |
5568 | ||
52639a61 | 5569 | if (DR_VECT_AUX (dr)->base_misaligned) |
c716e67f XDL |
5570 | { |
5571 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
52639a61 | 5572 | tree base_decl = DR_VECT_AUX (dr)->base_decl; |
c716e67f | 5573 | |
428f0c67 JH |
5574 | if (decl_in_symtab_p (base_decl)) |
5575 | symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype)); | |
5576 | else | |
5577 | { | |
fe37c7af | 5578 | SET_DECL_ALIGN (base_decl, TYPE_ALIGN (vectype)); |
428f0c67 JH |
5579 | DECL_USER_ALIGN (base_decl) = 1; |
5580 | } | |
52639a61 | 5581 | DR_VECT_AUX (dr)->base_misaligned = false; |
c716e67f XDL |
5582 | } |
5583 | } | |
5584 | ||
ebfd146a | 5585 | |
44fc7854 BE |
5586 | /* Function get_group_alias_ptr_type. |
5587 | ||
5588 | Return the alias type for the group starting at FIRST_STMT. */ | |
5589 | ||
5590 | static tree | |
5591 | get_group_alias_ptr_type (gimple *first_stmt) | |
5592 | { | |
5593 | struct data_reference *first_dr, *next_dr; | |
5594 | gimple *next_stmt; | |
5595 | ||
5596 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
5597 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt)); | |
5598 | while (next_stmt) | |
5599 | { | |
5600 | next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt)); | |
5601 | if (get_alias_set (DR_REF (first_dr)) | |
5602 | != get_alias_set (DR_REF (next_dr))) | |
5603 | { | |
5604 | if (dump_enabled_p ()) | |
5605 | dump_printf_loc (MSG_NOTE, vect_location, | |
5606 | "conflicting alias set types.\n"); | |
5607 | return ptr_type_node; | |
5608 | } | |
5609 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); | |
5610 | } | |
5611 | return reference_alias_ptr_type (DR_REF (first_dr)); | |
5612 | } | |
5613 | ||
5614 | ||
ebfd146a IR |
5615 | /* Function vectorizable_store. |
5616 | ||
b8698a0f L |
5617 | Check if STMT defines a non scalar data-ref (array/pointer/structure) that |
5618 | can be vectorized. | |
5619 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
ebfd146a IR |
5620 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
5621 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
5622 | ||
5623 | static bool | |
355fe088 | 5624 | vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, |
c716e67f | 5625 | slp_tree slp_node) |
ebfd146a IR |
5626 | { |
5627 | tree scalar_dest; | |
5628 | tree data_ref; | |
5629 | tree op; | |
5630 | tree vec_oprnd = NULL_TREE; | |
5631 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
5632 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL; | |
272c6793 | 5633 | tree elem_type; |
ebfd146a | 5634 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 5635 | struct loop *loop = NULL; |
ef4bddc2 | 5636 | machine_mode vec_mode; |
ebfd146a IR |
5637 | tree dummy; |
5638 | enum dr_alignment_support alignment_support_scheme; | |
355fe088 | 5639 | gimple *def_stmt; |
ebfd146a IR |
5640 | enum vect_def_type dt; |
5641 | stmt_vec_info prev_stmt_info = NULL; | |
5642 | tree dataref_ptr = NULL_TREE; | |
74bf76ed | 5643 | tree dataref_offset = NULL_TREE; |
355fe088 | 5644 | gimple *ptr_incr = NULL; |
ebfd146a IR |
5645 | int ncopies; |
5646 | int j; | |
2de001ee RS |
5647 | gimple *next_stmt, *first_stmt; |
5648 | bool grouped_store; | |
ebfd146a | 5649 | unsigned int group_size, i; |
6e1aa848 DN |
5650 | vec<tree> oprnds = vNULL; |
5651 | vec<tree> result_chain = vNULL; | |
ebfd146a | 5652 | bool inv_p; |
09dfa495 | 5653 | tree offset = NULL_TREE; |
6e1aa848 | 5654 | vec<tree> vec_oprnds = vNULL; |
ebfd146a | 5655 | bool slp = (slp_node != NULL); |
ebfd146a | 5656 | unsigned int vec_num; |
a70d6342 | 5657 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5658 | vec_info *vinfo = stmt_info->vinfo; |
272c6793 | 5659 | tree aggr_type; |
134c85ca | 5660 | gather_scatter_info gs_info; |
3bab6342 | 5661 | enum vect_def_type scatter_src_dt = vect_unknown_def_type; |
355fe088 | 5662 | gimple *new_stmt; |
b17dc4d4 | 5663 | int vf; |
2de001ee | 5664 | vec_load_store_type vls_type; |
44fc7854 | 5665 | tree ref_type; |
a70d6342 | 5666 | |
a70d6342 | 5667 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
5668 | return false; |
5669 | ||
66c16fd9 RB |
5670 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5671 | && ! vec_stmt) | |
ebfd146a IR |
5672 | return false; |
5673 | ||
5674 | /* Is vectorizable store? */ | |
5675 | ||
5676 | if (!is_gimple_assign (stmt)) | |
5677 | return false; | |
5678 | ||
5679 | scalar_dest = gimple_assign_lhs (stmt); | |
ab0ef706 JJ |
5680 | if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR |
5681 | && is_pattern_stmt_p (stmt_info)) | |
5682 | scalar_dest = TREE_OPERAND (scalar_dest, 0); | |
ebfd146a | 5683 | if (TREE_CODE (scalar_dest) != ARRAY_REF |
38000232 | 5684 | && TREE_CODE (scalar_dest) != BIT_FIELD_REF |
ebfd146a | 5685 | && TREE_CODE (scalar_dest) != INDIRECT_REF |
e9dbe7bb IR |
5686 | && TREE_CODE (scalar_dest) != COMPONENT_REF |
5687 | && TREE_CODE (scalar_dest) != IMAGPART_EXPR | |
70f34814 RG |
5688 | && TREE_CODE (scalar_dest) != REALPART_EXPR |
5689 | && TREE_CODE (scalar_dest) != MEM_REF) | |
ebfd146a IR |
5690 | return false; |
5691 | ||
fce57248 RS |
5692 | /* Cannot have hybrid store SLP -- that would mean storing to the |
5693 | same location twice. */ | |
5694 | gcc_assert (slp == PURE_SLP_STMT (stmt_info)); | |
5695 | ||
ebfd146a | 5696 | gcc_assert (gimple_assign_single_p (stmt)); |
465c8c19 | 5697 | |
f4d09712 | 5698 | tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE; |
465c8c19 JJ |
5699 | unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype); |
5700 | ||
5701 | if (loop_vinfo) | |
b17dc4d4 RB |
5702 | { |
5703 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
5704 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
5705 | } | |
5706 | else | |
5707 | vf = 1; | |
465c8c19 JJ |
5708 | |
5709 | /* Multiple types in SLP are handled by creating the appropriate number of | |
5710 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
5711 | case of SLP. */ | |
fce57248 | 5712 | if (slp) |
465c8c19 JJ |
5713 | ncopies = 1; |
5714 | else | |
5715 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
5716 | ||
5717 | gcc_assert (ncopies >= 1); | |
5718 | ||
5719 | /* FORNOW. This restriction should be relaxed. */ | |
5720 | if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1) | |
5721 | { | |
5722 | if (dump_enabled_p ()) | |
5723 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5724 | "multiple types in nested loop.\n"); | |
5725 | return false; | |
5726 | } | |
5727 | ||
ebfd146a | 5728 | op = gimple_assign_rhs1 (stmt); |
f4d09712 KY |
5729 | |
5730 | if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype)) | |
ebfd146a | 5731 | { |
73fbfcad | 5732 | if (dump_enabled_p ()) |
78c60e3d | 5733 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5734 | "use not simple.\n"); |
ebfd146a IR |
5735 | return false; |
5736 | } | |
5737 | ||
2de001ee RS |
5738 | if (dt == vect_constant_def || dt == vect_external_def) |
5739 | vls_type = VLS_STORE_INVARIANT; | |
5740 | else | |
5741 | vls_type = VLS_STORE; | |
5742 | ||
f4d09712 KY |
5743 | if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype)) |
5744 | return false; | |
5745 | ||
272c6793 | 5746 | elem_type = TREE_TYPE (vectype); |
ebfd146a | 5747 | vec_mode = TYPE_MODE (vectype); |
7b7b1813 | 5748 | |
ebfd146a IR |
5749 | /* FORNOW. In some cases can vectorize even if data-type not supported |
5750 | (e.g. - array initialization with 0). */ | |
947131ba | 5751 | if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing) |
ebfd146a IR |
5752 | return false; |
5753 | ||
5754 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
5755 | return false; | |
5756 | ||
2de001ee | 5757 | vect_memory_access_type memory_access_type; |
62da9e14 | 5758 | if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies, |
2de001ee RS |
5759 | &memory_access_type, &gs_info)) |
5760 | return false; | |
3bab6342 | 5761 | |
ebfd146a IR |
5762 | if (!vec_stmt) /* transformation not required. */ |
5763 | { | |
2de001ee | 5764 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; |
ebfd146a | 5765 | STMT_VINFO_TYPE (stmt_info) = store_vec_info_type; |
2e8ab70c RB |
5766 | /* The SLP costs are calculated during SLP analysis. */ |
5767 | if (!PURE_SLP_STMT (stmt_info)) | |
2de001ee | 5768 | vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt, |
2e8ab70c | 5769 | NULL, NULL, NULL); |
ebfd146a IR |
5770 | return true; |
5771 | } | |
2de001ee | 5772 | gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); |
ebfd146a | 5773 | |
67b8dbac | 5774 | /* Transform. */ |
ebfd146a | 5775 | |
c716e67f XDL |
5776 | ensure_base_align (stmt_info, dr); |
5777 | ||
2de001ee | 5778 | if (memory_access_type == VMAT_GATHER_SCATTER) |
3bab6342 AT |
5779 | { |
5780 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src; | |
134c85ca | 5781 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
3bab6342 AT |
5782 | tree rettype, srctype, ptrtype, idxtype, masktype, scaletype; |
5783 | tree ptr, mask, var, scale, perm_mask = NULL_TREE; | |
5784 | edge pe = loop_preheader_edge (loop); | |
5785 | gimple_seq seq; | |
5786 | basic_block new_bb; | |
5787 | enum { NARROW, NONE, WIDEN } modifier; | |
134c85ca | 5788 | int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); |
3bab6342 AT |
5789 | |
5790 | if (nunits == (unsigned int) scatter_off_nunits) | |
5791 | modifier = NONE; | |
5792 | else if (nunits == (unsigned int) scatter_off_nunits / 2) | |
5793 | { | |
5794 | unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits); | |
5795 | modifier = WIDEN; | |
5796 | ||
5797 | for (i = 0; i < (unsigned int) scatter_off_nunits; ++i) | |
5798 | sel[i] = i | nunits; | |
5799 | ||
134c85ca | 5800 | perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel); |
3bab6342 AT |
5801 | gcc_assert (perm_mask != NULL_TREE); |
5802 | } | |
5803 | else if (nunits == (unsigned int) scatter_off_nunits * 2) | |
5804 | { | |
5805 | unsigned char *sel = XALLOCAVEC (unsigned char, nunits); | |
5806 | modifier = NARROW; | |
5807 | ||
5808 | for (i = 0; i < (unsigned int) nunits; ++i) | |
5809 | sel[i] = i | scatter_off_nunits; | |
5810 | ||
5811 | perm_mask = vect_gen_perm_mask_checked (vectype, sel); | |
5812 | gcc_assert (perm_mask != NULL_TREE); | |
5813 | ncopies *= 2; | |
5814 | } | |
5815 | else | |
5816 | gcc_unreachable (); | |
5817 | ||
134c85ca | 5818 | rettype = TREE_TYPE (TREE_TYPE (gs_info.decl)); |
3bab6342 AT |
5819 | ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); |
5820 | masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
5821 | idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
5822 | srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
5823 | scaletype = TREE_VALUE (arglist); | |
5824 | ||
5825 | gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE | |
5826 | && TREE_CODE (rettype) == VOID_TYPE); | |
5827 | ||
134c85ca | 5828 | ptr = fold_convert (ptrtype, gs_info.base); |
3bab6342 AT |
5829 | if (!is_gimple_min_invariant (ptr)) |
5830 | { | |
5831 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
5832 | new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
5833 | gcc_assert (!new_bb); | |
5834 | } | |
5835 | ||
5836 | /* Currently we support only unconditional scatter stores, | |
5837 | so mask should be all ones. */ | |
5838 | mask = build_int_cst (masktype, -1); | |
5839 | mask = vect_init_vector (stmt, mask, masktype, NULL); | |
5840 | ||
134c85ca | 5841 | scale = build_int_cst (scaletype, gs_info.scale); |
3bab6342 AT |
5842 | |
5843 | prev_stmt_info = NULL; | |
5844 | for (j = 0; j < ncopies; ++j) | |
5845 | { | |
5846 | if (j == 0) | |
5847 | { | |
5848 | src = vec_oprnd1 | |
81c40241 | 5849 | = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt); |
3bab6342 | 5850 | op = vec_oprnd0 |
134c85ca | 5851 | = vect_get_vec_def_for_operand (gs_info.offset, stmt); |
3bab6342 AT |
5852 | } |
5853 | else if (modifier != NONE && (j & 1)) | |
5854 | { | |
5855 | if (modifier == WIDEN) | |
5856 | { | |
5857 | src = vec_oprnd1 | |
5858 | = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1); | |
5859 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask, | |
5860 | stmt, gsi); | |
5861 | } | |
5862 | else if (modifier == NARROW) | |
5863 | { | |
5864 | src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask, | |
5865 | stmt, gsi); | |
5866 | op = vec_oprnd0 | |
134c85ca RS |
5867 | = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, |
5868 | vec_oprnd0); | |
3bab6342 AT |
5869 | } |
5870 | else | |
5871 | gcc_unreachable (); | |
5872 | } | |
5873 | else | |
5874 | { | |
5875 | src = vec_oprnd1 | |
5876 | = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1); | |
5877 | op = vec_oprnd0 | |
134c85ca RS |
5878 | = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, |
5879 | vec_oprnd0); | |
3bab6342 AT |
5880 | } |
5881 | ||
5882 | if (!useless_type_conversion_p (srctype, TREE_TYPE (src))) | |
5883 | { | |
5884 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)) | |
5885 | == TYPE_VECTOR_SUBPARTS (srctype)); | |
0e22bb5a | 5886 | var = vect_get_new_ssa_name (srctype, vect_simple_var); |
3bab6342 AT |
5887 | src = build1 (VIEW_CONVERT_EXPR, srctype, src); |
5888 | new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src); | |
5889 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5890 | src = var; | |
5891 | } | |
5892 | ||
5893 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
5894 | { | |
5895 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)) | |
5896 | == TYPE_VECTOR_SUBPARTS (idxtype)); | |
0e22bb5a | 5897 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); |
3bab6342 AT |
5898 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); |
5899 | new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); | |
5900 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5901 | op = var; | |
5902 | } | |
5903 | ||
5904 | new_stmt | |
134c85ca | 5905 | = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale); |
3bab6342 AT |
5906 | |
5907 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
5908 | ||
5909 | if (prev_stmt_info == NULL) | |
5910 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
5911 | else | |
5912 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
5913 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
5914 | } | |
5915 | return true; | |
5916 | } | |
5917 | ||
2de001ee | 5918 | grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info); |
0d0293ac | 5919 | if (grouped_store) |
ebfd146a | 5920 | { |
2de001ee | 5921 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
ebfd146a | 5922 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); |
e14c1050 | 5923 | group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
ebfd146a | 5924 | |
e14c1050 | 5925 | GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++; |
ebfd146a IR |
5926 | |
5927 | /* FORNOW */ | |
a70d6342 | 5928 | gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt)); |
ebfd146a IR |
5929 | |
5930 | /* We vectorize all the stmts of the interleaving group when we | |
5931 | reach the last stmt in the group. */ | |
e14c1050 IR |
5932 | if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt)) |
5933 | < GROUP_SIZE (vinfo_for_stmt (first_stmt)) | |
ebfd146a IR |
5934 | && !slp) |
5935 | { | |
5936 | *vec_stmt = NULL; | |
5937 | return true; | |
5938 | } | |
5939 | ||
5940 | if (slp) | |
4b5caab7 | 5941 | { |
0d0293ac | 5942 | grouped_store = false; |
4b5caab7 IR |
5943 | /* VEC_NUM is the number of vect stmts to be created for this |
5944 | group. */ | |
5945 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
9771b263 | 5946 | first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
52eab378 | 5947 | gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt); |
4b5caab7 | 5948 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); |
d092494c | 5949 | op = gimple_assign_rhs1 (first_stmt); |
4b5caab7 | 5950 | } |
ebfd146a | 5951 | else |
4b5caab7 IR |
5952 | /* VEC_NUM is the number of vect stmts to be created for this |
5953 | group. */ | |
ebfd146a | 5954 | vec_num = group_size; |
44fc7854 BE |
5955 | |
5956 | ref_type = get_group_alias_ptr_type (first_stmt); | |
ebfd146a | 5957 | } |
b8698a0f | 5958 | else |
ebfd146a IR |
5959 | { |
5960 | first_stmt = stmt; | |
5961 | first_dr = dr; | |
5962 | group_size = vec_num = 1; | |
44fc7854 | 5963 | ref_type = reference_alias_ptr_type (DR_REF (first_dr)); |
ebfd146a | 5964 | } |
b8698a0f | 5965 | |
73fbfcad | 5966 | if (dump_enabled_p ()) |
78c60e3d | 5967 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5968 | "transform store. ncopies = %d\n", ncopies); |
ebfd146a | 5969 | |
2de001ee RS |
5970 | if (memory_access_type == VMAT_ELEMENTWISE |
5971 | || memory_access_type == VMAT_STRIDED_SLP) | |
f2e2a985 MM |
5972 | { |
5973 | gimple_stmt_iterator incr_gsi; | |
5974 | bool insert_after; | |
355fe088 | 5975 | gimple *incr; |
f2e2a985 MM |
5976 | tree offvar; |
5977 | tree ivstep; | |
5978 | tree running_off; | |
5979 | gimple_seq stmts = NULL; | |
5980 | tree stride_base, stride_step, alias_off; | |
5981 | tree vec_oprnd; | |
f502d50e | 5982 | unsigned int g; |
f2e2a985 MM |
5983 | |
5984 | gcc_assert (!nested_in_vect_loop_p (loop, stmt)); | |
5985 | ||
5986 | stride_base | |
5987 | = fold_build_pointer_plus | |
f502d50e | 5988 | (unshare_expr (DR_BASE_ADDRESS (first_dr)), |
f2e2a985 | 5989 | size_binop (PLUS_EXPR, |
f502d50e | 5990 | convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))), |
44fc7854 | 5991 | convert_to_ptrofftype (DR_INIT (first_dr)))); |
f502d50e | 5992 | stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr))); |
f2e2a985 MM |
5993 | |
5994 | /* For a store with loop-invariant (but other than power-of-2) | |
5995 | stride (i.e. not a grouped access) like so: | |
5996 | ||
5997 | for (i = 0; i < n; i += stride) | |
5998 | array[i] = ...; | |
5999 | ||
6000 | we generate a new induction variable and new stores from | |
6001 | the components of the (vectorized) rhs: | |
6002 | ||
6003 | for (j = 0; ; j += VF*stride) | |
6004 | vectemp = ...; | |
6005 | tmp1 = vectemp[0]; | |
6006 | array[j] = tmp1; | |
6007 | tmp2 = vectemp[1]; | |
6008 | array[j + stride] = tmp2; | |
6009 | ... | |
6010 | */ | |
6011 | ||
cee62fee | 6012 | unsigned nstores = nunits; |
b17dc4d4 | 6013 | unsigned lnel = 1; |
cee62fee | 6014 | tree ltype = elem_type; |
04199738 | 6015 | tree lvectype = vectype; |
cee62fee MM |
6016 | if (slp) |
6017 | { | |
b17dc4d4 RB |
6018 | if (group_size < nunits |
6019 | && nunits % group_size == 0) | |
6020 | { | |
6021 | nstores = nunits / group_size; | |
6022 | lnel = group_size; | |
6023 | ltype = build_vector_type (elem_type, group_size); | |
04199738 RB |
6024 | lvectype = vectype; |
6025 | ||
6026 | /* First check if vec_extract optab doesn't support extraction | |
6027 | of vector elts directly. */ | |
b397965c | 6028 | scalar_mode elmode = SCALAR_TYPE_MODE (elem_type); |
04199738 RB |
6029 | machine_mode vmode = mode_for_vector (elmode, group_size); |
6030 | if (! VECTOR_MODE_P (vmode) | |
6031 | || (convert_optab_handler (vec_extract_optab, | |
6032 | TYPE_MODE (vectype), vmode) | |
6033 | == CODE_FOR_nothing)) | |
6034 | { | |
6035 | /* Try to avoid emitting an extract of vector elements | |
6036 | by performing the extracts using an integer type of the | |
6037 | same size, extracting from a vector of those and then | |
6038 | re-interpreting it as the original vector type if | |
6039 | supported. */ | |
6040 | unsigned lsize | |
6041 | = group_size * GET_MODE_BITSIZE (elmode); | |
fffbab82 | 6042 | elmode = int_mode_for_size (lsize, 0).require (); |
04199738 RB |
6043 | vmode = mode_for_vector (elmode, nunits / group_size); |
6044 | /* If we can't construct such a vector fall back to | |
6045 | element extracts from the original vector type and | |
6046 | element size stores. */ | |
6047 | if (VECTOR_MODE_P (vmode) | |
6048 | && (convert_optab_handler (vec_extract_optab, | |
6049 | vmode, elmode) | |
6050 | != CODE_FOR_nothing)) | |
6051 | { | |
6052 | nstores = nunits / group_size; | |
6053 | lnel = group_size; | |
6054 | ltype = build_nonstandard_integer_type (lsize, 1); | |
6055 | lvectype = build_vector_type (ltype, nstores); | |
6056 | } | |
6057 | /* Else fall back to vector extraction anyway. | |
6058 | Fewer stores are more important than avoiding spilling | |
6059 | of the vector we extract from. Compared to the | |
6060 | construction case in vectorizable_load no store-forwarding | |
6061 | issue exists here for reasonable archs. */ | |
6062 | } | |
b17dc4d4 RB |
6063 | } |
6064 | else if (group_size >= nunits | |
6065 | && group_size % nunits == 0) | |
6066 | { | |
6067 | nstores = 1; | |
6068 | lnel = nunits; | |
6069 | ltype = vectype; | |
04199738 | 6070 | lvectype = vectype; |
b17dc4d4 | 6071 | } |
cee62fee MM |
6072 | ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type)); |
6073 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
6074 | } | |
6075 | ||
f2e2a985 MM |
6076 | ivstep = stride_step; |
6077 | ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep, | |
b17dc4d4 | 6078 | build_int_cst (TREE_TYPE (ivstep), vf)); |
f2e2a985 MM |
6079 | |
6080 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
6081 | ||
6082 | create_iv (stride_base, ivstep, NULL, | |
6083 | loop, &incr_gsi, insert_after, | |
6084 | &offvar, NULL); | |
6085 | incr = gsi_stmt (incr_gsi); | |
310213d4 | 6086 | set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo)); |
f2e2a985 MM |
6087 | |
6088 | stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE); | |
6089 | if (stmts) | |
6090 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
6091 | ||
6092 | prev_stmt_info = NULL; | |
44fc7854 | 6093 | alias_off = build_int_cst (ref_type, 0); |
f502d50e MM |
6094 | next_stmt = first_stmt; |
6095 | for (g = 0; g < group_size; g++) | |
f2e2a985 | 6096 | { |
f502d50e MM |
6097 | running_off = offvar; |
6098 | if (g) | |
f2e2a985 | 6099 | { |
f502d50e MM |
6100 | tree size = TYPE_SIZE_UNIT (ltype); |
6101 | tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g), | |
f2e2a985 | 6102 | size); |
f502d50e | 6103 | tree newoff = copy_ssa_name (running_off, NULL); |
f2e2a985 | 6104 | incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, |
f502d50e | 6105 | running_off, pos); |
f2e2a985 | 6106 | vect_finish_stmt_generation (stmt, incr, gsi); |
f2e2a985 | 6107 | running_off = newoff; |
f502d50e | 6108 | } |
b17dc4d4 RB |
6109 | unsigned int group_el = 0; |
6110 | unsigned HOST_WIDE_INT | |
6111 | elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); | |
f502d50e MM |
6112 | for (j = 0; j < ncopies; j++) |
6113 | { | |
6114 | /* We've set op and dt above, from gimple_assign_rhs1(stmt), | |
6115 | and first_stmt == stmt. */ | |
6116 | if (j == 0) | |
6117 | { | |
6118 | if (slp) | |
6119 | { | |
6120 | vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL, | |
306b0c92 | 6121 | slp_node); |
f502d50e MM |
6122 | vec_oprnd = vec_oprnds[0]; |
6123 | } | |
6124 | else | |
6125 | { | |
6126 | gcc_assert (gimple_assign_single_p (next_stmt)); | |
6127 | op = gimple_assign_rhs1 (next_stmt); | |
81c40241 | 6128 | vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt); |
f502d50e MM |
6129 | } |
6130 | } | |
f2e2a985 | 6131 | else |
f502d50e MM |
6132 | { |
6133 | if (slp) | |
6134 | vec_oprnd = vec_oprnds[j]; | |
6135 | else | |
c079cbac | 6136 | { |
81c40241 | 6137 | vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt); |
c079cbac RB |
6138 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd); |
6139 | } | |
f502d50e | 6140 | } |
04199738 RB |
6141 | /* Pun the vector to extract from if necessary. */ |
6142 | if (lvectype != vectype) | |
6143 | { | |
6144 | tree tem = make_ssa_name (lvectype); | |
6145 | gimple *pun | |
6146 | = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
6147 | lvectype, vec_oprnd)); | |
6148 | vect_finish_stmt_generation (stmt, pun, gsi); | |
6149 | vec_oprnd = tem; | |
6150 | } | |
f502d50e MM |
6151 | for (i = 0; i < nstores; i++) |
6152 | { | |
6153 | tree newref, newoff; | |
355fe088 | 6154 | gimple *incr, *assign; |
f502d50e MM |
6155 | tree size = TYPE_SIZE (ltype); |
6156 | /* Extract the i'th component. */ | |
6157 | tree pos = fold_build2 (MULT_EXPR, bitsizetype, | |
6158 | bitsize_int (i), size); | |
6159 | tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd, | |
6160 | size, pos); | |
6161 | ||
6162 | elem = force_gimple_operand_gsi (gsi, elem, true, | |
6163 | NULL_TREE, true, | |
6164 | GSI_SAME_STMT); | |
6165 | ||
b17dc4d4 RB |
6166 | tree this_off = build_int_cst (TREE_TYPE (alias_off), |
6167 | group_el * elsz); | |
f502d50e | 6168 | newref = build2 (MEM_REF, ltype, |
b17dc4d4 | 6169 | running_off, this_off); |
f502d50e MM |
6170 | |
6171 | /* And store it to *running_off. */ | |
6172 | assign = gimple_build_assign (newref, elem); | |
6173 | vect_finish_stmt_generation (stmt, assign, gsi); | |
6174 | ||
b17dc4d4 RB |
6175 | group_el += lnel; |
6176 | if (! slp | |
6177 | || group_el == group_size) | |
6178 | { | |
6179 | newoff = copy_ssa_name (running_off, NULL); | |
6180 | incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, | |
6181 | running_off, stride_step); | |
6182 | vect_finish_stmt_generation (stmt, incr, gsi); | |
f502d50e | 6183 | |
b17dc4d4 RB |
6184 | running_off = newoff; |
6185 | group_el = 0; | |
6186 | } | |
225ce44b RB |
6187 | if (g == group_size - 1 |
6188 | && !slp) | |
f502d50e MM |
6189 | { |
6190 | if (j == 0 && i == 0) | |
225ce44b RB |
6191 | STMT_VINFO_VEC_STMT (stmt_info) |
6192 | = *vec_stmt = assign; | |
f502d50e MM |
6193 | else |
6194 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign; | |
6195 | prev_stmt_info = vinfo_for_stmt (assign); | |
6196 | } | |
6197 | } | |
f2e2a985 | 6198 | } |
f502d50e | 6199 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
b17dc4d4 RB |
6200 | if (slp) |
6201 | break; | |
f2e2a985 | 6202 | } |
778dd3b6 RB |
6203 | |
6204 | vec_oprnds.release (); | |
f2e2a985 MM |
6205 | return true; |
6206 | } | |
6207 | ||
8c681247 | 6208 | auto_vec<tree> dr_chain (group_size); |
9771b263 | 6209 | oprnds.create (group_size); |
ebfd146a | 6210 | |
720f5239 | 6211 | alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false); |
ebfd146a | 6212 | gcc_assert (alignment_support_scheme); |
272c6793 RS |
6213 | /* Targets with store-lane instructions must not require explicit |
6214 | realignment. */ | |
2de001ee | 6215 | gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES |
272c6793 RS |
6216 | || alignment_support_scheme == dr_aligned |
6217 | || alignment_support_scheme == dr_unaligned_supported); | |
6218 | ||
62da9e14 RS |
6219 | if (memory_access_type == VMAT_CONTIGUOUS_DOWN |
6220 | || memory_access_type == VMAT_CONTIGUOUS_REVERSE) | |
09dfa495 BM |
6221 | offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1); |
6222 | ||
2de001ee | 6223 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
272c6793 RS |
6224 | aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); |
6225 | else | |
6226 | aggr_type = vectype; | |
ebfd146a IR |
6227 | |
6228 | /* In case the vectorization factor (VF) is bigger than the number | |
6229 | of elements that we can fit in a vectype (nunits), we have to generate | |
6230 | more than one vector stmt - i.e - we need to "unroll" the | |
b8698a0f | 6231 | vector stmt by a factor VF/nunits. For more details see documentation in |
ebfd146a IR |
6232 | vect_get_vec_def_for_copy_stmt. */ |
6233 | ||
0d0293ac | 6234 | /* In case of interleaving (non-unit grouped access): |
ebfd146a IR |
6235 | |
6236 | S1: &base + 2 = x2 | |
6237 | S2: &base = x0 | |
6238 | S3: &base + 1 = x1 | |
6239 | S4: &base + 3 = x3 | |
6240 | ||
6241 | We create vectorized stores starting from base address (the access of the | |
6242 | first stmt in the chain (S2 in the above example), when the last store stmt | |
6243 | of the chain (S4) is reached: | |
6244 | ||
6245 | VS1: &base = vx2 | |
6246 | VS2: &base + vec_size*1 = vx0 | |
6247 | VS3: &base + vec_size*2 = vx1 | |
6248 | VS4: &base + vec_size*3 = vx3 | |
6249 | ||
6250 | Then permutation statements are generated: | |
6251 | ||
3fcc1b55 JJ |
6252 | VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} > |
6253 | VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} > | |
ebfd146a | 6254 | ... |
b8698a0f | 6255 | |
ebfd146a IR |
6256 | And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts |
6257 | (the order of the data-refs in the output of vect_permute_store_chain | |
6258 | corresponds to the order of scalar stmts in the interleaving chain - see | |
6259 | the documentation of vect_permute_store_chain()). | |
6260 | ||
6261 | In case of both multiple types and interleaving, above vector stores and | |
ff802fa1 | 6262 | permutation stmts are created for every copy. The result vector stmts are |
ebfd146a | 6263 | put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding |
b8698a0f | 6264 | STMT_VINFO_RELATED_STMT for the next copies. |
ebfd146a IR |
6265 | */ |
6266 | ||
6267 | prev_stmt_info = NULL; | |
6268 | for (j = 0; j < ncopies; j++) | |
6269 | { | |
ebfd146a IR |
6270 | |
6271 | if (j == 0) | |
6272 | { | |
6273 | if (slp) | |
6274 | { | |
6275 | /* Get vectorized arguments for SLP_NODE. */ | |
d092494c | 6276 | vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, |
306b0c92 | 6277 | NULL, slp_node); |
ebfd146a | 6278 | |
9771b263 | 6279 | vec_oprnd = vec_oprnds[0]; |
ebfd146a IR |
6280 | } |
6281 | else | |
6282 | { | |
b8698a0f L |
6283 | /* For interleaved stores we collect vectorized defs for all the |
6284 | stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then | |
6285 | used as an input to vect_permute_store_chain(), and OPRNDS as | |
ebfd146a IR |
6286 | an input to vect_get_vec_def_for_stmt_copy() for the next copy. |
6287 | ||
0d0293ac | 6288 | If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and |
ebfd146a | 6289 | OPRNDS are of size 1. */ |
b8698a0f | 6290 | next_stmt = first_stmt; |
ebfd146a IR |
6291 | for (i = 0; i < group_size; i++) |
6292 | { | |
b8698a0f L |
6293 | /* Since gaps are not supported for interleaved stores, |
6294 | GROUP_SIZE is the exact number of stmts in the chain. | |
6295 | Therefore, NEXT_STMT can't be NULL_TREE. In case that | |
6296 | there is no interleaving, GROUP_SIZE is 1, and only one | |
ebfd146a IR |
6297 | iteration of the loop will be executed. */ |
6298 | gcc_assert (next_stmt | |
6299 | && gimple_assign_single_p (next_stmt)); | |
6300 | op = gimple_assign_rhs1 (next_stmt); | |
6301 | ||
81c40241 | 6302 | vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt); |
9771b263 DN |
6303 | dr_chain.quick_push (vec_oprnd); |
6304 | oprnds.quick_push (vec_oprnd); | |
e14c1050 | 6305 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
ebfd146a IR |
6306 | } |
6307 | } | |
6308 | ||
6309 | /* We should have catched mismatched types earlier. */ | |
6310 | gcc_assert (useless_type_conversion_p (vectype, | |
6311 | TREE_TYPE (vec_oprnd))); | |
74bf76ed JJ |
6312 | bool simd_lane_access_p |
6313 | = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info); | |
6314 | if (simd_lane_access_p | |
6315 | && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR | |
6316 | && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0)) | |
6317 | && integer_zerop (DR_OFFSET (first_dr)) | |
6318 | && integer_zerop (DR_INIT (first_dr)) | |
6319 | && alias_sets_conflict_p (get_alias_set (aggr_type), | |
44fc7854 | 6320 | get_alias_set (TREE_TYPE (ref_type)))) |
74bf76ed JJ |
6321 | { |
6322 | dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr)); | |
44fc7854 | 6323 | dataref_offset = build_int_cst (ref_type, 0); |
8928eff3 | 6324 | inv_p = false; |
74bf76ed JJ |
6325 | } |
6326 | else | |
6327 | dataref_ptr | |
6328 | = vect_create_data_ref_ptr (first_stmt, aggr_type, | |
6329 | simd_lane_access_p ? loop : NULL, | |
09dfa495 | 6330 | offset, &dummy, gsi, &ptr_incr, |
74bf76ed | 6331 | simd_lane_access_p, &inv_p); |
a70d6342 | 6332 | gcc_assert (bb_vinfo || !inv_p); |
ebfd146a | 6333 | } |
b8698a0f | 6334 | else |
ebfd146a | 6335 | { |
b8698a0f L |
6336 | /* For interleaved stores we created vectorized defs for all the |
6337 | defs stored in OPRNDS in the previous iteration (previous copy). | |
6338 | DR_CHAIN is then used as an input to vect_permute_store_chain(), | |
ebfd146a IR |
6339 | and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the |
6340 | next copy. | |
0d0293ac | 6341 | If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and |
ebfd146a IR |
6342 | OPRNDS are of size 1. */ |
6343 | for (i = 0; i < group_size; i++) | |
6344 | { | |
9771b263 | 6345 | op = oprnds[i]; |
81c40241 | 6346 | vect_is_simple_use (op, vinfo, &def_stmt, &dt); |
b8698a0f | 6347 | vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op); |
9771b263 DN |
6348 | dr_chain[i] = vec_oprnd; |
6349 | oprnds[i] = vec_oprnd; | |
ebfd146a | 6350 | } |
74bf76ed JJ |
6351 | if (dataref_offset) |
6352 | dataref_offset | |
6353 | = int_const_binop (PLUS_EXPR, dataref_offset, | |
6354 | TYPE_SIZE_UNIT (aggr_type)); | |
6355 | else | |
6356 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, | |
6357 | TYPE_SIZE_UNIT (aggr_type)); | |
ebfd146a IR |
6358 | } |
6359 | ||
2de001ee | 6360 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
ebfd146a | 6361 | { |
272c6793 | 6362 | tree vec_array; |
267d3070 | 6363 | |
272c6793 RS |
6364 | /* Combine all the vectors into an array. */ |
6365 | vec_array = create_vector_array (vectype, vec_num); | |
6366 | for (i = 0; i < vec_num; i++) | |
c2d7ab2a | 6367 | { |
9771b263 | 6368 | vec_oprnd = dr_chain[i]; |
272c6793 | 6369 | write_vector_array (stmt, gsi, vec_oprnd, vec_array, i); |
267d3070 | 6370 | } |
b8698a0f | 6371 | |
272c6793 RS |
6372 | /* Emit: |
6373 | MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */ | |
44fc7854 | 6374 | data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type); |
a844293d RS |
6375 | gcall *call = gimple_build_call_internal (IFN_STORE_LANES, 1, |
6376 | vec_array); | |
6377 | gimple_call_set_lhs (call, data_ref); | |
6378 | gimple_call_set_nothrow (call, true); | |
6379 | new_stmt = call; | |
267d3070 | 6380 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
272c6793 RS |
6381 | } |
6382 | else | |
6383 | { | |
6384 | new_stmt = NULL; | |
0d0293ac | 6385 | if (grouped_store) |
272c6793 | 6386 | { |
b6b9227d JJ |
6387 | if (j == 0) |
6388 | result_chain.create (group_size); | |
272c6793 RS |
6389 | /* Permute. */ |
6390 | vect_permute_store_chain (dr_chain, group_size, stmt, gsi, | |
6391 | &result_chain); | |
6392 | } | |
c2d7ab2a | 6393 | |
272c6793 RS |
6394 | next_stmt = first_stmt; |
6395 | for (i = 0; i < vec_num; i++) | |
6396 | { | |
644ffefd | 6397 | unsigned align, misalign; |
272c6793 RS |
6398 | |
6399 | if (i > 0) | |
6400 | /* Bump the vector pointer. */ | |
6401 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
6402 | stmt, NULL_TREE); | |
6403 | ||
6404 | if (slp) | |
9771b263 | 6405 | vec_oprnd = vec_oprnds[i]; |
0d0293ac MM |
6406 | else if (grouped_store) |
6407 | /* For grouped stores vectorized defs are interleaved in | |
272c6793 | 6408 | vect_permute_store_chain(). */ |
9771b263 | 6409 | vec_oprnd = result_chain[i]; |
272c6793 | 6410 | |
69a2e8a1 | 6411 | data_ref = fold_build2 (MEM_REF, vectype, |
aed93b23 RB |
6412 | dataref_ptr, |
6413 | dataref_offset | |
6414 | ? dataref_offset | |
44fc7854 | 6415 | : build_int_cst (ref_type, 0)); |
644ffefd | 6416 | align = TYPE_ALIGN_UNIT (vectype); |
272c6793 | 6417 | if (aligned_access_p (first_dr)) |
644ffefd | 6418 | misalign = 0; |
272c6793 RS |
6419 | else if (DR_MISALIGNMENT (first_dr) == -1) |
6420 | { | |
25f68d90 | 6421 | align = dr_alignment (vect_dr_behavior (first_dr)); |
52639a61 | 6422 | misalign = 0; |
272c6793 RS |
6423 | TREE_TYPE (data_ref) |
6424 | = build_aligned_type (TREE_TYPE (data_ref), | |
52639a61 | 6425 | align * BITS_PER_UNIT); |
272c6793 RS |
6426 | } |
6427 | else | |
6428 | { | |
6429 | TREE_TYPE (data_ref) | |
6430 | = build_aligned_type (TREE_TYPE (data_ref), | |
6431 | TYPE_ALIGN (elem_type)); | |
644ffefd | 6432 | misalign = DR_MISALIGNMENT (first_dr); |
272c6793 | 6433 | } |
aed93b23 RB |
6434 | if (dataref_offset == NULL_TREE |
6435 | && TREE_CODE (dataref_ptr) == SSA_NAME) | |
74bf76ed JJ |
6436 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, |
6437 | misalign); | |
c2d7ab2a | 6438 | |
62da9e14 | 6439 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
09dfa495 BM |
6440 | { |
6441 | tree perm_mask = perm_mask_for_reverse (vectype); | |
6442 | tree perm_dest | |
6443 | = vect_create_destination_var (gimple_assign_rhs1 (stmt), | |
6444 | vectype); | |
b731b390 | 6445 | tree new_temp = make_ssa_name (perm_dest); |
09dfa495 BM |
6446 | |
6447 | /* Generate the permute statement. */ | |
355fe088 | 6448 | gimple *perm_stmt |
0d0e4a03 JJ |
6449 | = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd, |
6450 | vec_oprnd, perm_mask); | |
09dfa495 BM |
6451 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); |
6452 | ||
6453 | perm_stmt = SSA_NAME_DEF_STMT (new_temp); | |
6454 | vec_oprnd = new_temp; | |
6455 | } | |
6456 | ||
272c6793 RS |
6457 | /* Arguments are ready. Create the new vector stmt. */ |
6458 | new_stmt = gimple_build_assign (data_ref, vec_oprnd); | |
6459 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
272c6793 RS |
6460 | |
6461 | if (slp) | |
6462 | continue; | |
6463 | ||
e14c1050 | 6464 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
272c6793 RS |
6465 | if (!next_stmt) |
6466 | break; | |
6467 | } | |
ebfd146a | 6468 | } |
1da0876c RS |
6469 | if (!slp) |
6470 | { | |
6471 | if (j == 0) | |
6472 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
6473 | else | |
6474 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
6475 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
6476 | } | |
ebfd146a IR |
6477 | } |
6478 | ||
9771b263 DN |
6479 | oprnds.release (); |
6480 | result_chain.release (); | |
6481 | vec_oprnds.release (); | |
ebfd146a IR |
6482 | |
6483 | return true; | |
6484 | } | |
6485 | ||
557be5a8 AL |
6486 | /* Given a vector type VECTYPE, turns permutation SEL into the equivalent |
6487 | VECTOR_CST mask. No checks are made that the target platform supports the | |
6488 | mask, so callers may wish to test can_vec_perm_p separately, or use | |
6489 | vect_gen_perm_mask_checked. */ | |
a1e53f3f | 6490 | |
3fcc1b55 | 6491 | tree |
557be5a8 | 6492 | vect_gen_perm_mask_any (tree vectype, const unsigned char *sel) |
a1e53f3f | 6493 | { |
d2a12ae7 | 6494 | tree mask_elt_type, mask_type, mask_vec, *mask_elts; |
2635892a | 6495 | int i, nunits; |
a1e53f3f | 6496 | |
22e4dee7 | 6497 | nunits = TYPE_VECTOR_SUBPARTS (vectype); |
22e4dee7 | 6498 | |
96f9265a | 6499 | mask_elt_type = lang_hooks.types.type_for_mode |
304b9962 | 6500 | (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1); |
22e4dee7 | 6501 | mask_type = get_vectype_for_scalar_type (mask_elt_type); |
a1e53f3f | 6502 | |
d2a12ae7 | 6503 | mask_elts = XALLOCAVEC (tree, nunits); |
aec7ae7d | 6504 | for (i = nunits - 1; i >= 0; i--) |
d2a12ae7 RG |
6505 | mask_elts[i] = build_int_cst (mask_elt_type, sel[i]); |
6506 | mask_vec = build_vector (mask_type, mask_elts); | |
a1e53f3f | 6507 | |
2635892a | 6508 | return mask_vec; |
a1e53f3f L |
6509 | } |
6510 | ||
cf7aa6a3 AL |
6511 | /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p, |
6512 | i.e. that the target supports the pattern _for arbitrary input vectors_. */ | |
557be5a8 AL |
6513 | |
6514 | tree | |
6515 | vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel) | |
6516 | { | |
6517 | gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel)); | |
6518 | return vect_gen_perm_mask_any (vectype, sel); | |
6519 | } | |
6520 | ||
aec7ae7d JJ |
6521 | /* Given a vector variable X and Y, that was generated for the scalar |
6522 | STMT, generate instructions to permute the vector elements of X and Y | |
6523 | using permutation mask MASK_VEC, insert them at *GSI and return the | |
6524 | permuted vector variable. */ | |
a1e53f3f L |
6525 | |
6526 | static tree | |
355fe088 | 6527 | permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt, |
aec7ae7d | 6528 | gimple_stmt_iterator *gsi) |
a1e53f3f L |
6529 | { |
6530 | tree vectype = TREE_TYPE (x); | |
aec7ae7d | 6531 | tree perm_dest, data_ref; |
355fe088 | 6532 | gimple *perm_stmt; |
a1e53f3f | 6533 | |
acdcd61b | 6534 | perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype); |
b731b390 | 6535 | data_ref = make_ssa_name (perm_dest); |
a1e53f3f L |
6536 | |
6537 | /* Generate the permute statement. */ | |
0d0e4a03 | 6538 | perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec); |
a1e53f3f L |
6539 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); |
6540 | ||
6541 | return data_ref; | |
6542 | } | |
6543 | ||
6b916b36 RB |
6544 | /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP, |
6545 | inserting them on the loops preheader edge. Returns true if we | |
6546 | were successful in doing so (and thus STMT can be moved then), | |
6547 | otherwise returns false. */ | |
6548 | ||
6549 | static bool | |
355fe088 | 6550 | hoist_defs_of_uses (gimple *stmt, struct loop *loop) |
6b916b36 RB |
6551 | { |
6552 | ssa_op_iter i; | |
6553 | tree op; | |
6554 | bool any = false; | |
6555 | ||
6556 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
6557 | { | |
355fe088 | 6558 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
6b916b36 RB |
6559 | if (!gimple_nop_p (def_stmt) |
6560 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) | |
6561 | { | |
6562 | /* Make sure we don't need to recurse. While we could do | |
6563 | so in simple cases when there are more complex use webs | |
6564 | we don't have an easy way to preserve stmt order to fulfil | |
6565 | dependencies within them. */ | |
6566 | tree op2; | |
6567 | ssa_op_iter i2; | |
d1417442 JJ |
6568 | if (gimple_code (def_stmt) == GIMPLE_PHI) |
6569 | return false; | |
6b916b36 RB |
6570 | FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE) |
6571 | { | |
355fe088 | 6572 | gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2); |
6b916b36 RB |
6573 | if (!gimple_nop_p (def_stmt2) |
6574 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2))) | |
6575 | return false; | |
6576 | } | |
6577 | any = true; | |
6578 | } | |
6579 | } | |
6580 | ||
6581 | if (!any) | |
6582 | return true; | |
6583 | ||
6584 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
6585 | { | |
355fe088 | 6586 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
6b916b36 RB |
6587 | if (!gimple_nop_p (def_stmt) |
6588 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) | |
6589 | { | |
6590 | gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt); | |
6591 | gsi_remove (&gsi, false); | |
6592 | gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt); | |
6593 | } | |
6594 | } | |
6595 | ||
6596 | return true; | |
6597 | } | |
6598 | ||
ebfd146a IR |
6599 | /* vectorizable_load. |
6600 | ||
b8698a0f L |
6601 | Check if STMT reads a non scalar data-ref (array/pointer/structure) that |
6602 | can be vectorized. | |
6603 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
ebfd146a IR |
6604 | stmt to replace it, put it in VEC_STMT, and insert it at BSI. |
6605 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
6606 | ||
6607 | static bool | |
355fe088 | 6608 | vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, |
c716e67f | 6609 | slp_tree slp_node, slp_instance slp_node_instance) |
ebfd146a IR |
6610 | { |
6611 | tree scalar_dest; | |
6612 | tree vec_dest = NULL; | |
6613 | tree data_ref = NULL; | |
6614 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
b8698a0f | 6615 | stmt_vec_info prev_stmt_info; |
ebfd146a | 6616 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 6617 | struct loop *loop = NULL; |
ebfd146a | 6618 | struct loop *containing_loop = (gimple_bb (stmt))->loop_father; |
a70d6342 | 6619 | bool nested_in_vect_loop = false; |
c716e67f | 6620 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL; |
272c6793 | 6621 | tree elem_type; |
ebfd146a | 6622 | tree new_temp; |
ef4bddc2 | 6623 | machine_mode mode; |
355fe088 | 6624 | gimple *new_stmt = NULL; |
ebfd146a IR |
6625 | tree dummy; |
6626 | enum dr_alignment_support alignment_support_scheme; | |
6627 | tree dataref_ptr = NULL_TREE; | |
74bf76ed | 6628 | tree dataref_offset = NULL_TREE; |
355fe088 | 6629 | gimple *ptr_incr = NULL; |
ebfd146a | 6630 | int ncopies; |
44fc7854 | 6631 | int i, j, group_size, group_gap_adj; |
ebfd146a IR |
6632 | tree msq = NULL_TREE, lsq; |
6633 | tree offset = NULL_TREE; | |
356bbc4c | 6634 | tree byte_offset = NULL_TREE; |
ebfd146a | 6635 | tree realignment_token = NULL_TREE; |
538dd0b7 | 6636 | gphi *phi = NULL; |
6e1aa848 | 6637 | vec<tree> dr_chain = vNULL; |
0d0293ac | 6638 | bool grouped_load = false; |
355fe088 | 6639 | gimple *first_stmt; |
4f0a0218 | 6640 | gimple *first_stmt_for_drptr = NULL; |
ebfd146a IR |
6641 | bool inv_p; |
6642 | bool compute_in_loop = false; | |
6643 | struct loop *at_loop; | |
6644 | int vec_num; | |
6645 | bool slp = (slp_node != NULL); | |
6646 | bool slp_perm = false; | |
6647 | enum tree_code code; | |
a70d6342 IR |
6648 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
6649 | int vf; | |
272c6793 | 6650 | tree aggr_type; |
134c85ca | 6651 | gather_scatter_info gs_info; |
310213d4 | 6652 | vec_info *vinfo = stmt_info->vinfo; |
44fc7854 | 6653 | tree ref_type; |
a70d6342 | 6654 | |
465c8c19 JJ |
6655 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
6656 | return false; | |
6657 | ||
66c16fd9 RB |
6658 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
6659 | && ! vec_stmt) | |
465c8c19 JJ |
6660 | return false; |
6661 | ||
6662 | /* Is vectorizable load? */ | |
6663 | if (!is_gimple_assign (stmt)) | |
6664 | return false; | |
6665 | ||
6666 | scalar_dest = gimple_assign_lhs (stmt); | |
6667 | if (TREE_CODE (scalar_dest) != SSA_NAME) | |
6668 | return false; | |
6669 | ||
6670 | code = gimple_assign_rhs_code (stmt); | |
6671 | if (code != ARRAY_REF | |
6672 | && code != BIT_FIELD_REF | |
6673 | && code != INDIRECT_REF | |
6674 | && code != COMPONENT_REF | |
6675 | && code != IMAGPART_EXPR | |
6676 | && code != REALPART_EXPR | |
6677 | && code != MEM_REF | |
6678 | && TREE_CODE_CLASS (code) != tcc_declaration) | |
6679 | return false; | |
6680 | ||
6681 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
6682 | return false; | |
6683 | ||
6684 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
6685 | int nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
6686 | ||
a70d6342 IR |
6687 | if (loop_vinfo) |
6688 | { | |
6689 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
6690 | nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); | |
6691 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
6692 | } | |
6693 | else | |
3533e503 | 6694 | vf = 1; |
ebfd146a IR |
6695 | |
6696 | /* Multiple types in SLP are handled by creating the appropriate number of | |
ff802fa1 | 6697 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in |
ebfd146a | 6698 | case of SLP. */ |
fce57248 | 6699 | if (slp) |
ebfd146a IR |
6700 | ncopies = 1; |
6701 | else | |
6702 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
6703 | ||
6704 | gcc_assert (ncopies >= 1); | |
6705 | ||
6706 | /* FORNOW. This restriction should be relaxed. */ | |
6707 | if (nested_in_vect_loop && ncopies > 1) | |
6708 | { | |
73fbfcad | 6709 | if (dump_enabled_p ()) |
78c60e3d | 6710 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 6711 | "multiple types in nested loop.\n"); |
ebfd146a IR |
6712 | return false; |
6713 | } | |
6714 | ||
f2556b68 RB |
6715 | /* Invalidate assumptions made by dependence analysis when vectorization |
6716 | on the unrolled body effectively re-orders stmts. */ | |
6717 | if (ncopies > 1 | |
6718 | && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 | |
6719 | && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
6720 | > STMT_VINFO_MIN_NEG_DIST (stmt_info))) | |
6721 | { | |
6722 | if (dump_enabled_p ()) | |
6723 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6724 | "cannot perform implicit CSE when unrolling " | |
6725 | "with negative dependence distance\n"); | |
6726 | return false; | |
6727 | } | |
6728 | ||
7b7b1813 | 6729 | elem_type = TREE_TYPE (vectype); |
947131ba | 6730 | mode = TYPE_MODE (vectype); |
ebfd146a IR |
6731 | |
6732 | /* FORNOW. In some cases can vectorize even if data-type not supported | |
6733 | (e.g. - data copies). */ | |
947131ba | 6734 | if (optab_handler (mov_optab, mode) == CODE_FOR_nothing) |
ebfd146a | 6735 | { |
73fbfcad | 6736 | if (dump_enabled_p ()) |
78c60e3d | 6737 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 6738 | "Aligned load, but unsupported type.\n"); |
ebfd146a IR |
6739 | return false; |
6740 | } | |
6741 | ||
ebfd146a | 6742 | /* Check if the load is a part of an interleaving chain. */ |
0d0293ac | 6743 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
ebfd146a | 6744 | { |
0d0293ac | 6745 | grouped_load = true; |
ebfd146a | 6746 | /* FORNOW */ |
2de001ee RS |
6747 | gcc_assert (!nested_in_vect_loop); |
6748 | gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info)); | |
ebfd146a | 6749 | |
e14c1050 | 6750 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
d3465d72 | 6751 | group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
d5f035ea | 6752 | |
b1af7da6 RB |
6753 | if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) |
6754 | slp_perm = true; | |
6755 | ||
f2556b68 RB |
6756 | /* Invalidate assumptions made by dependence analysis when vectorization |
6757 | on the unrolled body effectively re-orders stmts. */ | |
6758 | if (!PURE_SLP_STMT (stmt_info) | |
6759 | && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 | |
6760 | && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
6761 | > STMT_VINFO_MIN_NEG_DIST (stmt_info))) | |
6762 | { | |
6763 | if (dump_enabled_p ()) | |
6764 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6765 | "cannot perform implicit CSE when performing " | |
6766 | "group loads with negative dependence distance\n"); | |
6767 | return false; | |
6768 | } | |
96bb56b2 RB |
6769 | |
6770 | /* Similarly when the stmt is a load that is both part of a SLP | |
6771 | instance and a loop vectorized stmt via the same-dr mechanism | |
6772 | we have to give up. */ | |
6773 | if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info) | |
6774 | && (STMT_SLP_TYPE (stmt_info) | |
6775 | != STMT_SLP_TYPE (vinfo_for_stmt | |
6776 | (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info))))) | |
6777 | { | |
6778 | if (dump_enabled_p ()) | |
6779 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6780 | "conflicting SLP types for CSEd load\n"); | |
6781 | return false; | |
6782 | } | |
ebfd146a IR |
6783 | } |
6784 | ||
2de001ee | 6785 | vect_memory_access_type memory_access_type; |
62da9e14 | 6786 | if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies, |
2de001ee RS |
6787 | &memory_access_type, &gs_info)) |
6788 | return false; | |
a1e53f3f | 6789 | |
ebfd146a IR |
6790 | if (!vec_stmt) /* transformation not required. */ |
6791 | { | |
2de001ee RS |
6792 | if (!slp) |
6793 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; | |
ebfd146a | 6794 | STMT_VINFO_TYPE (stmt_info) = load_vec_info_type; |
2e8ab70c RB |
6795 | /* The SLP costs are calculated during SLP analysis. */ |
6796 | if (!PURE_SLP_STMT (stmt_info)) | |
2de001ee | 6797 | vect_model_load_cost (stmt_info, ncopies, memory_access_type, |
2e8ab70c | 6798 | NULL, NULL, NULL); |
ebfd146a IR |
6799 | return true; |
6800 | } | |
6801 | ||
2de001ee RS |
6802 | if (!slp) |
6803 | gcc_assert (memory_access_type | |
6804 | == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); | |
6805 | ||
73fbfcad | 6806 | if (dump_enabled_p ()) |
78c60e3d | 6807 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 6808 | "transform load. ncopies = %d\n", ncopies); |
ebfd146a | 6809 | |
67b8dbac | 6810 | /* Transform. */ |
ebfd146a | 6811 | |
c716e67f XDL |
6812 | ensure_base_align (stmt_info, dr); |
6813 | ||
2de001ee | 6814 | if (memory_access_type == VMAT_GATHER_SCATTER) |
aec7ae7d JJ |
6815 | { |
6816 | tree vec_oprnd0 = NULL_TREE, op; | |
134c85ca | 6817 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
aec7ae7d | 6818 | tree rettype, srctype, ptrtype, idxtype, masktype, scaletype; |
d3c2fee0 | 6819 | tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE; |
aec7ae7d JJ |
6820 | edge pe = loop_preheader_edge (loop); |
6821 | gimple_seq seq; | |
6822 | basic_block new_bb; | |
6823 | enum { NARROW, NONE, WIDEN } modifier; | |
134c85ca | 6824 | int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); |
aec7ae7d JJ |
6825 | |
6826 | if (nunits == gather_off_nunits) | |
6827 | modifier = NONE; | |
6828 | else if (nunits == gather_off_nunits / 2) | |
6829 | { | |
6830 | unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits); | |
6831 | modifier = WIDEN; | |
6832 | ||
6833 | for (i = 0; i < gather_off_nunits; ++i) | |
6834 | sel[i] = i | nunits; | |
6835 | ||
134c85ca | 6836 | perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel); |
aec7ae7d JJ |
6837 | } |
6838 | else if (nunits == gather_off_nunits * 2) | |
6839 | { | |
6840 | unsigned char *sel = XALLOCAVEC (unsigned char, nunits); | |
6841 | modifier = NARROW; | |
6842 | ||
6843 | for (i = 0; i < nunits; ++i) | |
6844 | sel[i] = i < gather_off_nunits | |
6845 | ? i : i + nunits - gather_off_nunits; | |
6846 | ||
557be5a8 | 6847 | perm_mask = vect_gen_perm_mask_checked (vectype, sel); |
aec7ae7d JJ |
6848 | ncopies *= 2; |
6849 | } | |
6850 | else | |
6851 | gcc_unreachable (); | |
6852 | ||
134c85ca | 6853 | rettype = TREE_TYPE (TREE_TYPE (gs_info.decl)); |
aec7ae7d JJ |
6854 | srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); |
6855 | ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6856 | idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6857 | masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6858 | scaletype = TREE_VALUE (arglist); | |
d3c2fee0 | 6859 | gcc_checking_assert (types_compatible_p (srctype, rettype)); |
aec7ae7d JJ |
6860 | |
6861 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
6862 | ||
134c85ca | 6863 | ptr = fold_convert (ptrtype, gs_info.base); |
aec7ae7d JJ |
6864 | if (!is_gimple_min_invariant (ptr)) |
6865 | { | |
6866 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
6867 | new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
6868 | gcc_assert (!new_bb); | |
6869 | } | |
6870 | ||
6871 | /* Currently we support only unconditional gather loads, | |
6872 | so mask should be all ones. */ | |
d3c2fee0 AI |
6873 | if (TREE_CODE (masktype) == INTEGER_TYPE) |
6874 | mask = build_int_cst (masktype, -1); | |
6875 | else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE) | |
6876 | { | |
6877 | mask = build_int_cst (TREE_TYPE (masktype), -1); | |
6878 | mask = build_vector_from_val (masktype, mask); | |
03b9e8e4 | 6879 | mask = vect_init_vector (stmt, mask, masktype, NULL); |
d3c2fee0 | 6880 | } |
aec7ae7d JJ |
6881 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype))) |
6882 | { | |
6883 | REAL_VALUE_TYPE r; | |
6884 | long tmp[6]; | |
6885 | for (j = 0; j < 6; ++j) | |
6886 | tmp[j] = -1; | |
6887 | real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype))); | |
6888 | mask = build_real (TREE_TYPE (masktype), r); | |
d3c2fee0 | 6889 | mask = build_vector_from_val (masktype, mask); |
03b9e8e4 | 6890 | mask = vect_init_vector (stmt, mask, masktype, NULL); |
aec7ae7d JJ |
6891 | } |
6892 | else | |
6893 | gcc_unreachable (); | |
aec7ae7d | 6894 | |
134c85ca | 6895 | scale = build_int_cst (scaletype, gs_info.scale); |
aec7ae7d | 6896 | |
d3c2fee0 AI |
6897 | if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE) |
6898 | merge = build_int_cst (TREE_TYPE (rettype), 0); | |
6899 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype))) | |
6900 | { | |
6901 | REAL_VALUE_TYPE r; | |
6902 | long tmp[6]; | |
6903 | for (j = 0; j < 6; ++j) | |
6904 | tmp[j] = 0; | |
6905 | real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype))); | |
6906 | merge = build_real (TREE_TYPE (rettype), r); | |
6907 | } | |
6908 | else | |
6909 | gcc_unreachable (); | |
6910 | merge = build_vector_from_val (rettype, merge); | |
6911 | merge = vect_init_vector (stmt, merge, rettype, NULL); | |
6912 | ||
aec7ae7d JJ |
6913 | prev_stmt_info = NULL; |
6914 | for (j = 0; j < ncopies; ++j) | |
6915 | { | |
6916 | if (modifier == WIDEN && (j & 1)) | |
6917 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, | |
6918 | perm_mask, stmt, gsi); | |
6919 | else if (j == 0) | |
6920 | op = vec_oprnd0 | |
134c85ca | 6921 | = vect_get_vec_def_for_operand (gs_info.offset, stmt); |
aec7ae7d JJ |
6922 | else |
6923 | op = vec_oprnd0 | |
134c85ca | 6924 | = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0); |
aec7ae7d JJ |
6925 | |
6926 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
6927 | { | |
6928 | gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)) | |
6929 | == TYPE_VECTOR_SUBPARTS (idxtype)); | |
0e22bb5a | 6930 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); |
aec7ae7d JJ |
6931 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); |
6932 | new_stmt | |
0d0e4a03 | 6933 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
aec7ae7d JJ |
6934 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
6935 | op = var; | |
6936 | } | |
6937 | ||
6938 | new_stmt | |
134c85ca | 6939 | = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale); |
aec7ae7d JJ |
6940 | |
6941 | if (!useless_type_conversion_p (vectype, rettype)) | |
6942 | { | |
6943 | gcc_assert (TYPE_VECTOR_SUBPARTS (vectype) | |
6944 | == TYPE_VECTOR_SUBPARTS (rettype)); | |
0e22bb5a | 6945 | op = vect_get_new_ssa_name (rettype, vect_simple_var); |
aec7ae7d JJ |
6946 | gimple_call_set_lhs (new_stmt, op); |
6947 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
b731b390 | 6948 | var = make_ssa_name (vec_dest); |
aec7ae7d JJ |
6949 | op = build1 (VIEW_CONVERT_EXPR, vectype, op); |
6950 | new_stmt | |
0d0e4a03 | 6951 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
aec7ae7d JJ |
6952 | } |
6953 | else | |
6954 | { | |
6955 | var = make_ssa_name (vec_dest, new_stmt); | |
6956 | gimple_call_set_lhs (new_stmt, var); | |
6957 | } | |
6958 | ||
6959 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
6960 | ||
6961 | if (modifier == NARROW) | |
6962 | { | |
6963 | if ((j & 1) == 0) | |
6964 | { | |
6965 | prev_res = var; | |
6966 | continue; | |
6967 | } | |
6968 | var = permute_vec_elements (prev_res, var, | |
6969 | perm_mask, stmt, gsi); | |
6970 | new_stmt = SSA_NAME_DEF_STMT (var); | |
6971 | } | |
6972 | ||
6973 | if (prev_stmt_info == NULL) | |
6974 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
6975 | else | |
6976 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
6977 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
6978 | } | |
6979 | return true; | |
6980 | } | |
2de001ee RS |
6981 | |
6982 | if (memory_access_type == VMAT_ELEMENTWISE | |
6983 | || memory_access_type == VMAT_STRIDED_SLP) | |
7d75abc8 MM |
6984 | { |
6985 | gimple_stmt_iterator incr_gsi; | |
6986 | bool insert_after; | |
355fe088 | 6987 | gimple *incr; |
7d75abc8 | 6988 | tree offvar; |
7d75abc8 MM |
6989 | tree ivstep; |
6990 | tree running_off; | |
9771b263 | 6991 | vec<constructor_elt, va_gc> *v = NULL; |
7d75abc8 | 6992 | gimple_seq stmts = NULL; |
14ac6aa2 RB |
6993 | tree stride_base, stride_step, alias_off; |
6994 | ||
6995 | gcc_assert (!nested_in_vect_loop); | |
7d75abc8 | 6996 | |
f502d50e | 6997 | if (slp && grouped_load) |
44fc7854 BE |
6998 | { |
6999 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); | |
7000 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
7001 | group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); | |
7002 | ref_type = get_group_alias_ptr_type (first_stmt); | |
7003 | } | |
ab313a8c | 7004 | else |
44fc7854 BE |
7005 | { |
7006 | first_stmt = stmt; | |
7007 | first_dr = dr; | |
7008 | group_size = 1; | |
7009 | ref_type = reference_alias_ptr_type (DR_REF (first_dr)); | |
7010 | } | |
ab313a8c | 7011 | |
14ac6aa2 RB |
7012 | stride_base |
7013 | = fold_build_pointer_plus | |
ab313a8c | 7014 | (DR_BASE_ADDRESS (first_dr), |
14ac6aa2 | 7015 | size_binop (PLUS_EXPR, |
ab313a8c RB |
7016 | convert_to_ptrofftype (DR_OFFSET (first_dr)), |
7017 | convert_to_ptrofftype (DR_INIT (first_dr)))); | |
7018 | stride_step = fold_convert (sizetype, DR_STEP (first_dr)); | |
7d75abc8 MM |
7019 | |
7020 | /* For a load with loop-invariant (but other than power-of-2) | |
7021 | stride (i.e. not a grouped access) like so: | |
7022 | ||
7023 | for (i = 0; i < n; i += stride) | |
7024 | ... = array[i]; | |
7025 | ||
7026 | we generate a new induction variable and new accesses to | |
7027 | form a new vector (or vectors, depending on ncopies): | |
7028 | ||
7029 | for (j = 0; ; j += VF*stride) | |
7030 | tmp1 = array[j]; | |
7031 | tmp2 = array[j + stride]; | |
7032 | ... | |
7033 | vectemp = {tmp1, tmp2, ...} | |
7034 | */ | |
7035 | ||
ab313a8c RB |
7036 | ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step, |
7037 | build_int_cst (TREE_TYPE (stride_step), vf)); | |
7d75abc8 MM |
7038 | |
7039 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
7040 | ||
ab313a8c | 7041 | create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL, |
7d75abc8 MM |
7042 | loop, &incr_gsi, insert_after, |
7043 | &offvar, NULL); | |
7044 | incr = gsi_stmt (incr_gsi); | |
310213d4 | 7045 | set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo)); |
7d75abc8 | 7046 | |
ab313a8c RB |
7047 | stride_step = force_gimple_operand (unshare_expr (stride_step), |
7048 | &stmts, true, NULL_TREE); | |
7d75abc8 MM |
7049 | if (stmts) |
7050 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
7051 | ||
7052 | prev_stmt_info = NULL; | |
7053 | running_off = offvar; | |
44fc7854 | 7054 | alias_off = build_int_cst (ref_type, 0); |
7b5fc413 | 7055 | int nloads = nunits; |
e09b4c37 | 7056 | int lnel = 1; |
7b5fc413 | 7057 | tree ltype = TREE_TYPE (vectype); |
ea60dd34 | 7058 | tree lvectype = vectype; |
b266b968 | 7059 | auto_vec<tree> dr_chain; |
2de001ee | 7060 | if (memory_access_type == VMAT_STRIDED_SLP) |
7b5fc413 | 7061 | { |
2de001ee | 7062 | if (group_size < nunits) |
e09b4c37 | 7063 | { |
ff03930a JJ |
7064 | /* First check if vec_init optab supports construction from |
7065 | vector elts directly. */ | |
b397965c | 7066 | scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype)); |
ff03930a | 7067 | machine_mode vmode = mode_for_vector (elmode, group_size); |
ea60dd34 | 7068 | if (VECTOR_MODE_P (vmode) |
ff03930a JJ |
7069 | && (convert_optab_handler (vec_init_optab, |
7070 | TYPE_MODE (vectype), vmode) | |
7071 | != CODE_FOR_nothing)) | |
ea60dd34 RB |
7072 | { |
7073 | nloads = nunits / group_size; | |
7074 | lnel = group_size; | |
ff03930a JJ |
7075 | ltype = build_vector_type (TREE_TYPE (vectype), group_size); |
7076 | } | |
7077 | else | |
7078 | { | |
7079 | /* Otherwise avoid emitting a constructor of vector elements | |
7080 | by performing the loads using an integer type of the same | |
7081 | size, constructing a vector of those and then | |
7082 | re-interpreting it as the original vector type. | |
7083 | This avoids a huge runtime penalty due to the general | |
7084 | inability to perform store forwarding from smaller stores | |
7085 | to a larger load. */ | |
7086 | unsigned lsize | |
7087 | = group_size * TYPE_PRECISION (TREE_TYPE (vectype)); | |
fffbab82 | 7088 | elmode = int_mode_for_size (lsize, 0).require (); |
ff03930a JJ |
7089 | vmode = mode_for_vector (elmode, nunits / group_size); |
7090 | /* If we can't construct such a vector fall back to | |
7091 | element loads of the original vector type. */ | |
7092 | if (VECTOR_MODE_P (vmode) | |
7093 | && (convert_optab_handler (vec_init_optab, vmode, elmode) | |
7094 | != CODE_FOR_nothing)) | |
7095 | { | |
7096 | nloads = nunits / group_size; | |
7097 | lnel = group_size; | |
7098 | ltype = build_nonstandard_integer_type (lsize, 1); | |
7099 | lvectype = build_vector_type (ltype, nloads); | |
7100 | } | |
ea60dd34 | 7101 | } |
e09b4c37 | 7102 | } |
2de001ee | 7103 | else |
e09b4c37 | 7104 | { |
ea60dd34 | 7105 | nloads = 1; |
e09b4c37 RB |
7106 | lnel = nunits; |
7107 | ltype = vectype; | |
e09b4c37 | 7108 | } |
2de001ee RS |
7109 | ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype))); |
7110 | } | |
7111 | if (slp) | |
7112 | { | |
66c16fd9 RB |
7113 | /* For SLP permutation support we need to load the whole group, |
7114 | not only the number of vector stmts the permutation result | |
7115 | fits in. */ | |
b266b968 | 7116 | if (slp_perm) |
66c16fd9 RB |
7117 | { |
7118 | ncopies = (group_size * vf + nunits - 1) / nunits; | |
7119 | dr_chain.create (ncopies); | |
7120 | } | |
7121 | else | |
7122 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
7b5fc413 | 7123 | } |
e09b4c37 RB |
7124 | int group_el = 0; |
7125 | unsigned HOST_WIDE_INT | |
7126 | elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); | |
7d75abc8 MM |
7127 | for (j = 0; j < ncopies; j++) |
7128 | { | |
7b5fc413 | 7129 | if (nloads > 1) |
e09b4c37 RB |
7130 | vec_alloc (v, nloads); |
7131 | for (i = 0; i < nloads; i++) | |
7b5fc413 | 7132 | { |
e09b4c37 RB |
7133 | tree this_off = build_int_cst (TREE_TYPE (alias_off), |
7134 | group_el * elsz); | |
7135 | new_stmt = gimple_build_assign (make_ssa_name (ltype), | |
7136 | build2 (MEM_REF, ltype, | |
7137 | running_off, this_off)); | |
7138 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7139 | if (nloads > 1) | |
7140 | CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, | |
7141 | gimple_assign_lhs (new_stmt)); | |
7142 | ||
7143 | group_el += lnel; | |
7144 | if (! slp | |
7145 | || group_el == group_size) | |
7b5fc413 | 7146 | { |
e09b4c37 RB |
7147 | tree newoff = copy_ssa_name (running_off); |
7148 | gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, | |
7149 | running_off, stride_step); | |
7b5fc413 RB |
7150 | vect_finish_stmt_generation (stmt, incr, gsi); |
7151 | ||
7152 | running_off = newoff; | |
e09b4c37 | 7153 | group_el = 0; |
7b5fc413 | 7154 | } |
7b5fc413 | 7155 | } |
e09b4c37 | 7156 | if (nloads > 1) |
7d75abc8 | 7157 | { |
ea60dd34 RB |
7158 | tree vec_inv = build_constructor (lvectype, v); |
7159 | new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi); | |
e09b4c37 | 7160 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
ea60dd34 RB |
7161 | if (lvectype != vectype) |
7162 | { | |
7163 | new_stmt = gimple_build_assign (make_ssa_name (vectype), | |
7164 | VIEW_CONVERT_EXPR, | |
7165 | build1 (VIEW_CONVERT_EXPR, | |
7166 | vectype, new_temp)); | |
7167 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7168 | } | |
7d75abc8 MM |
7169 | } |
7170 | ||
7b5fc413 | 7171 | if (slp) |
b266b968 | 7172 | { |
b266b968 RB |
7173 | if (slp_perm) |
7174 | dr_chain.quick_push (gimple_assign_lhs (new_stmt)); | |
66c16fd9 RB |
7175 | else |
7176 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); | |
b266b968 | 7177 | } |
7d75abc8 | 7178 | else |
225ce44b RB |
7179 | { |
7180 | if (j == 0) | |
7181 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
7182 | else | |
7183 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
7184 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
7185 | } | |
7d75abc8 | 7186 | } |
b266b968 | 7187 | if (slp_perm) |
29afecdf RB |
7188 | { |
7189 | unsigned n_perms; | |
7190 | vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, | |
7191 | slp_node_instance, false, &n_perms); | |
7192 | } | |
7d75abc8 MM |
7193 | return true; |
7194 | } | |
aec7ae7d | 7195 | |
0d0293ac | 7196 | if (grouped_load) |
ebfd146a | 7197 | { |
e14c1050 | 7198 | first_stmt = GROUP_FIRST_ELEMENT (stmt_info); |
44fc7854 | 7199 | group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); |
b267968e | 7200 | int group_gap = GROUP_GAP (vinfo_for_stmt (first_stmt)); |
4f0a0218 | 7201 | /* For SLP vectorization we directly vectorize a subchain |
52eab378 RB |
7202 | without permutation. */ |
7203 | if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) | |
4f0a0218 RB |
7204 | first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
7205 | /* For BB vectorization always use the first stmt to base | |
7206 | the data ref pointer on. */ | |
7207 | if (bb_vinfo) | |
7208 | first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0]; | |
6aa904c4 | 7209 | |
ebfd146a | 7210 | /* Check if the chain of loads is already vectorized. */ |
01d8bf07 RB |
7211 | if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)) |
7212 | /* For SLP we would need to copy over SLP_TREE_VEC_STMTS. | |
7213 | ??? But we can only do so if there is exactly one | |
7214 | as we have no way to get at the rest. Leave the CSE | |
7215 | opportunity alone. | |
7216 | ??? With the group load eventually participating | |
7217 | in multiple different permutations (having multiple | |
7218 | slp nodes which refer to the same group) the CSE | |
7219 | is even wrong code. See PR56270. */ | |
7220 | && !slp) | |
ebfd146a IR |
7221 | { |
7222 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
7223 | return true; | |
7224 | } | |
7225 | first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); | |
9b999e8c | 7226 | group_gap_adj = 0; |
ebfd146a IR |
7227 | |
7228 | /* VEC_NUM is the number of vect stmts to be created for this group. */ | |
7229 | if (slp) | |
7230 | { | |
0d0293ac | 7231 | grouped_load = false; |
91ff1504 RB |
7232 | /* For SLP permutation support we need to load the whole group, |
7233 | not only the number of vector stmts the permutation result | |
7234 | fits in. */ | |
7235 | if (slp_perm) | |
b267968e RB |
7236 | { |
7237 | vec_num = (group_size * vf + nunits - 1) / nunits; | |
7238 | group_gap_adj = vf * group_size - nunits * vec_num; | |
7239 | } | |
91ff1504 | 7240 | else |
b267968e RB |
7241 | { |
7242 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
7243 | group_gap_adj = group_gap; | |
7244 | } | |
a70d6342 | 7245 | } |
ebfd146a | 7246 | else |
9b999e8c | 7247 | vec_num = group_size; |
44fc7854 BE |
7248 | |
7249 | ref_type = get_group_alias_ptr_type (first_stmt); | |
ebfd146a IR |
7250 | } |
7251 | else | |
7252 | { | |
7253 | first_stmt = stmt; | |
7254 | first_dr = dr; | |
7255 | group_size = vec_num = 1; | |
9b999e8c | 7256 | group_gap_adj = 0; |
44fc7854 | 7257 | ref_type = reference_alias_ptr_type (DR_REF (first_dr)); |
ebfd146a IR |
7258 | } |
7259 | ||
720f5239 | 7260 | alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false); |
ebfd146a | 7261 | gcc_assert (alignment_support_scheme); |
272c6793 RS |
7262 | /* Targets with load-lane instructions must not require explicit |
7263 | realignment. */ | |
2de001ee | 7264 | gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES |
272c6793 RS |
7265 | || alignment_support_scheme == dr_aligned |
7266 | || alignment_support_scheme == dr_unaligned_supported); | |
ebfd146a IR |
7267 | |
7268 | /* In case the vectorization factor (VF) is bigger than the number | |
7269 | of elements that we can fit in a vectype (nunits), we have to generate | |
7270 | more than one vector stmt - i.e - we need to "unroll" the | |
ff802fa1 | 7271 | vector stmt by a factor VF/nunits. In doing so, we record a pointer |
ebfd146a | 7272 | from one copy of the vector stmt to the next, in the field |
ff802fa1 | 7273 | STMT_VINFO_RELATED_STMT. This is necessary in order to allow following |
ebfd146a | 7274 | stages to find the correct vector defs to be used when vectorizing |
ff802fa1 IR |
7275 | stmts that use the defs of the current stmt. The example below |
7276 | illustrates the vectorization process when VF=16 and nunits=4 (i.e., we | |
7277 | need to create 4 vectorized stmts): | |
ebfd146a IR |
7278 | |
7279 | before vectorization: | |
7280 | RELATED_STMT VEC_STMT | |
7281 | S1: x = memref - - | |
7282 | S2: z = x + 1 - - | |
7283 | ||
7284 | step 1: vectorize stmt S1: | |
7285 | We first create the vector stmt VS1_0, and, as usual, record a | |
7286 | pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1. | |
7287 | Next, we create the vector stmt VS1_1, and record a pointer to | |
7288 | it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0. | |
ff802fa1 | 7289 | Similarly, for VS1_2 and VS1_3. This is the resulting chain of |
ebfd146a IR |
7290 | stmts and pointers: |
7291 | RELATED_STMT VEC_STMT | |
7292 | VS1_0: vx0 = memref0 VS1_1 - | |
7293 | VS1_1: vx1 = memref1 VS1_2 - | |
7294 | VS1_2: vx2 = memref2 VS1_3 - | |
7295 | VS1_3: vx3 = memref3 - - | |
7296 | S1: x = load - VS1_0 | |
7297 | S2: z = x + 1 - - | |
7298 | ||
b8698a0f L |
7299 | See in documentation in vect_get_vec_def_for_stmt_copy for how the |
7300 | information we recorded in RELATED_STMT field is used to vectorize | |
ebfd146a IR |
7301 | stmt S2. */ |
7302 | ||
0d0293ac | 7303 | /* In case of interleaving (non-unit grouped access): |
ebfd146a IR |
7304 | |
7305 | S1: x2 = &base + 2 | |
7306 | S2: x0 = &base | |
7307 | S3: x1 = &base + 1 | |
7308 | S4: x3 = &base + 3 | |
7309 | ||
b8698a0f | 7310 | Vectorized loads are created in the order of memory accesses |
ebfd146a IR |
7311 | starting from the access of the first stmt of the chain: |
7312 | ||
7313 | VS1: vx0 = &base | |
7314 | VS2: vx1 = &base + vec_size*1 | |
7315 | VS3: vx3 = &base + vec_size*2 | |
7316 | VS4: vx4 = &base + vec_size*3 | |
7317 | ||
7318 | Then permutation statements are generated: | |
7319 | ||
e2c83630 RH |
7320 | VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } > |
7321 | VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } > | |
ebfd146a IR |
7322 | ... |
7323 | ||
7324 | And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts | |
7325 | (the order of the data-refs in the output of vect_permute_load_chain | |
7326 | corresponds to the order of scalar stmts in the interleaving chain - see | |
7327 | the documentation of vect_permute_load_chain()). | |
7328 | The generation of permutation stmts and recording them in | |
0d0293ac | 7329 | STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load(). |
ebfd146a | 7330 | |
b8698a0f | 7331 | In case of both multiple types and interleaving, the vector loads and |
ff802fa1 IR |
7332 | permutation stmts above are created for every copy. The result vector |
7333 | stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the | |
7334 | corresponding STMT_VINFO_RELATED_STMT for the next copies. */ | |
ebfd146a IR |
7335 | |
7336 | /* If the data reference is aligned (dr_aligned) or potentially unaligned | |
7337 | on a target that supports unaligned accesses (dr_unaligned_supported) | |
7338 | we generate the following code: | |
7339 | p = initial_addr; | |
7340 | indx = 0; | |
7341 | loop { | |
7342 | p = p + indx * vectype_size; | |
7343 | vec_dest = *(p); | |
7344 | indx = indx + 1; | |
7345 | } | |
7346 | ||
7347 | Otherwise, the data reference is potentially unaligned on a target that | |
b8698a0f | 7348 | does not support unaligned accesses (dr_explicit_realign_optimized) - |
ebfd146a IR |
7349 | then generate the following code, in which the data in each iteration is |
7350 | obtained by two vector loads, one from the previous iteration, and one | |
7351 | from the current iteration: | |
7352 | p1 = initial_addr; | |
7353 | msq_init = *(floor(p1)) | |
7354 | p2 = initial_addr + VS - 1; | |
7355 | realignment_token = call target_builtin; | |
7356 | indx = 0; | |
7357 | loop { | |
7358 | p2 = p2 + indx * vectype_size | |
7359 | lsq = *(floor(p2)) | |
7360 | vec_dest = realign_load (msq, lsq, realignment_token) | |
7361 | indx = indx + 1; | |
7362 | msq = lsq; | |
7363 | } */ | |
7364 | ||
7365 | /* If the misalignment remains the same throughout the execution of the | |
7366 | loop, we can create the init_addr and permutation mask at the loop | |
ff802fa1 | 7367 | preheader. Otherwise, it needs to be created inside the loop. |
ebfd146a IR |
7368 | This can only occur when vectorizing memory accesses in the inner-loop |
7369 | nested within an outer-loop that is being vectorized. */ | |
7370 | ||
d1e4b493 | 7371 | if (nested_in_vect_loop |
832b4117 | 7372 | && (DR_STEP_ALIGNMENT (dr) % GET_MODE_SIZE (TYPE_MODE (vectype))) != 0) |
ebfd146a IR |
7373 | { |
7374 | gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); | |
7375 | compute_in_loop = true; | |
7376 | } | |
7377 | ||
7378 | if ((alignment_support_scheme == dr_explicit_realign_optimized | |
7379 | || alignment_support_scheme == dr_explicit_realign) | |
59fd17e3 | 7380 | && !compute_in_loop) |
ebfd146a IR |
7381 | { |
7382 | msq = vect_setup_realignment (first_stmt, gsi, &realignment_token, | |
7383 | alignment_support_scheme, NULL_TREE, | |
7384 | &at_loop); | |
7385 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
7386 | { | |
538dd0b7 | 7387 | phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq)); |
356bbc4c JJ |
7388 | byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype), |
7389 | size_one_node); | |
ebfd146a IR |
7390 | } |
7391 | } | |
7392 | else | |
7393 | at_loop = loop; | |
7394 | ||
62da9e14 | 7395 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
a1e53f3f L |
7396 | offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1); |
7397 | ||
2de001ee | 7398 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
272c6793 RS |
7399 | aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); |
7400 | else | |
7401 | aggr_type = vectype; | |
7402 | ||
ebfd146a | 7403 | prev_stmt_info = NULL; |
b267968e | 7404 | int group_elt = 0; |
ebfd146a | 7405 | for (j = 0; j < ncopies; j++) |
b8698a0f | 7406 | { |
272c6793 | 7407 | /* 1. Create the vector or array pointer update chain. */ |
ebfd146a | 7408 | if (j == 0) |
74bf76ed JJ |
7409 | { |
7410 | bool simd_lane_access_p | |
7411 | = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info); | |
7412 | if (simd_lane_access_p | |
7413 | && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR | |
7414 | && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0)) | |
7415 | && integer_zerop (DR_OFFSET (first_dr)) | |
7416 | && integer_zerop (DR_INIT (first_dr)) | |
7417 | && alias_sets_conflict_p (get_alias_set (aggr_type), | |
44fc7854 | 7418 | get_alias_set (TREE_TYPE (ref_type))) |
74bf76ed JJ |
7419 | && (alignment_support_scheme == dr_aligned |
7420 | || alignment_support_scheme == dr_unaligned_supported)) | |
7421 | { | |
7422 | dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr)); | |
44fc7854 | 7423 | dataref_offset = build_int_cst (ref_type, 0); |
8928eff3 | 7424 | inv_p = false; |
74bf76ed | 7425 | } |
4f0a0218 RB |
7426 | else if (first_stmt_for_drptr |
7427 | && first_stmt != first_stmt_for_drptr) | |
7428 | { | |
7429 | dataref_ptr | |
7430 | = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type, | |
7431 | at_loop, offset, &dummy, gsi, | |
7432 | &ptr_incr, simd_lane_access_p, | |
7433 | &inv_p, byte_offset); | |
7434 | /* Adjust the pointer by the difference to first_stmt. */ | |
7435 | data_reference_p ptrdr | |
7436 | = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr)); | |
7437 | tree diff = fold_convert (sizetype, | |
7438 | size_binop (MINUS_EXPR, | |
7439 | DR_INIT (first_dr), | |
7440 | DR_INIT (ptrdr))); | |
7441 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
7442 | stmt, diff); | |
7443 | } | |
74bf76ed JJ |
7444 | else |
7445 | dataref_ptr | |
7446 | = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop, | |
7447 | offset, &dummy, gsi, &ptr_incr, | |
356bbc4c JJ |
7448 | simd_lane_access_p, &inv_p, |
7449 | byte_offset); | |
74bf76ed JJ |
7450 | } |
7451 | else if (dataref_offset) | |
7452 | dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, | |
7453 | TYPE_SIZE_UNIT (aggr_type)); | |
ebfd146a | 7454 | else |
272c6793 RS |
7455 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, |
7456 | TYPE_SIZE_UNIT (aggr_type)); | |
ebfd146a | 7457 | |
0d0293ac | 7458 | if (grouped_load || slp_perm) |
9771b263 | 7459 | dr_chain.create (vec_num); |
5ce1ee7f | 7460 | |
2de001ee | 7461 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
ebfd146a | 7462 | { |
272c6793 RS |
7463 | tree vec_array; |
7464 | ||
7465 | vec_array = create_vector_array (vectype, vec_num); | |
7466 | ||
7467 | /* Emit: | |
7468 | VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */ | |
44fc7854 | 7469 | data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type); |
a844293d RS |
7470 | gcall *call = gimple_build_call_internal (IFN_LOAD_LANES, 1, |
7471 | data_ref); | |
7472 | gimple_call_set_lhs (call, vec_array); | |
7473 | gimple_call_set_nothrow (call, true); | |
7474 | new_stmt = call; | |
272c6793 | 7475 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
ebfd146a | 7476 | |
272c6793 RS |
7477 | /* Extract each vector into an SSA_NAME. */ |
7478 | for (i = 0; i < vec_num; i++) | |
ebfd146a | 7479 | { |
272c6793 RS |
7480 | new_temp = read_vector_array (stmt, gsi, scalar_dest, |
7481 | vec_array, i); | |
9771b263 | 7482 | dr_chain.quick_push (new_temp); |
272c6793 RS |
7483 | } |
7484 | ||
7485 | /* Record the mapping between SSA_NAMEs and statements. */ | |
0d0293ac | 7486 | vect_record_grouped_load_vectors (stmt, dr_chain); |
272c6793 RS |
7487 | } |
7488 | else | |
7489 | { | |
7490 | for (i = 0; i < vec_num; i++) | |
7491 | { | |
7492 | if (i > 0) | |
7493 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
7494 | stmt, NULL_TREE); | |
7495 | ||
7496 | /* 2. Create the vector-load in the loop. */ | |
7497 | switch (alignment_support_scheme) | |
7498 | { | |
7499 | case dr_aligned: | |
7500 | case dr_unaligned_supported: | |
be1ac4ec | 7501 | { |
644ffefd MJ |
7502 | unsigned int align, misalign; |
7503 | ||
272c6793 | 7504 | data_ref |
aed93b23 RB |
7505 | = fold_build2 (MEM_REF, vectype, dataref_ptr, |
7506 | dataref_offset | |
7507 | ? dataref_offset | |
44fc7854 | 7508 | : build_int_cst (ref_type, 0)); |
644ffefd | 7509 | align = TYPE_ALIGN_UNIT (vectype); |
272c6793 RS |
7510 | if (alignment_support_scheme == dr_aligned) |
7511 | { | |
7512 | gcc_assert (aligned_access_p (first_dr)); | |
644ffefd | 7513 | misalign = 0; |
272c6793 RS |
7514 | } |
7515 | else if (DR_MISALIGNMENT (first_dr) == -1) | |
7516 | { | |
25f68d90 | 7517 | align = dr_alignment (vect_dr_behavior (first_dr)); |
52639a61 | 7518 | misalign = 0; |
272c6793 RS |
7519 | TREE_TYPE (data_ref) |
7520 | = build_aligned_type (TREE_TYPE (data_ref), | |
52639a61 | 7521 | align * BITS_PER_UNIT); |
272c6793 RS |
7522 | } |
7523 | else | |
7524 | { | |
7525 | TREE_TYPE (data_ref) | |
7526 | = build_aligned_type (TREE_TYPE (data_ref), | |
7527 | TYPE_ALIGN (elem_type)); | |
644ffefd | 7528 | misalign = DR_MISALIGNMENT (first_dr); |
272c6793 | 7529 | } |
aed93b23 RB |
7530 | if (dataref_offset == NULL_TREE |
7531 | && TREE_CODE (dataref_ptr) == SSA_NAME) | |
74bf76ed JJ |
7532 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), |
7533 | align, misalign); | |
272c6793 | 7534 | break; |
be1ac4ec | 7535 | } |
272c6793 | 7536 | case dr_explicit_realign: |
267d3070 | 7537 | { |
272c6793 | 7538 | tree ptr, bump; |
272c6793 | 7539 | |
d88981fc | 7540 | tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype)); |
272c6793 RS |
7541 | |
7542 | if (compute_in_loop) | |
7543 | msq = vect_setup_realignment (first_stmt, gsi, | |
7544 | &realignment_token, | |
7545 | dr_explicit_realign, | |
7546 | dataref_ptr, NULL); | |
7547 | ||
aed93b23 RB |
7548 | if (TREE_CODE (dataref_ptr) == SSA_NAME) |
7549 | ptr = copy_ssa_name (dataref_ptr); | |
7550 | else | |
7551 | ptr = make_ssa_name (TREE_TYPE (dataref_ptr)); | |
0d0e4a03 JJ |
7552 | new_stmt = gimple_build_assign |
7553 | (ptr, BIT_AND_EXPR, dataref_ptr, | |
272c6793 RS |
7554 | build_int_cst |
7555 | (TREE_TYPE (dataref_ptr), | |
7556 | -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype))); | |
272c6793 RS |
7557 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
7558 | data_ref | |
7559 | = build2 (MEM_REF, vectype, ptr, | |
44fc7854 | 7560 | build_int_cst (ref_type, 0)); |
272c6793 RS |
7561 | vec_dest = vect_create_destination_var (scalar_dest, |
7562 | vectype); | |
7563 | new_stmt = gimple_build_assign (vec_dest, data_ref); | |
7564 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
7565 | gimple_assign_set_lhs (new_stmt, new_temp); | |
7566 | gimple_set_vdef (new_stmt, gimple_vdef (stmt)); | |
7567 | gimple_set_vuse (new_stmt, gimple_vuse (stmt)); | |
7568 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7569 | msq = new_temp; | |
7570 | ||
d88981fc | 7571 | bump = size_binop (MULT_EXPR, vs, |
7b7b1813 | 7572 | TYPE_SIZE_UNIT (elem_type)); |
d88981fc | 7573 | bump = size_binop (MINUS_EXPR, bump, size_one_node); |
272c6793 | 7574 | ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump); |
0d0e4a03 JJ |
7575 | new_stmt = gimple_build_assign |
7576 | (NULL_TREE, BIT_AND_EXPR, ptr, | |
272c6793 RS |
7577 | build_int_cst |
7578 | (TREE_TYPE (ptr), | |
7579 | -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype))); | |
aed93b23 | 7580 | ptr = copy_ssa_name (ptr, new_stmt); |
272c6793 RS |
7581 | gimple_assign_set_lhs (new_stmt, ptr); |
7582 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7583 | data_ref | |
7584 | = build2 (MEM_REF, vectype, ptr, | |
44fc7854 | 7585 | build_int_cst (ref_type, 0)); |
272c6793 | 7586 | break; |
267d3070 | 7587 | } |
272c6793 | 7588 | case dr_explicit_realign_optimized: |
aed93b23 RB |
7589 | if (TREE_CODE (dataref_ptr) == SSA_NAME) |
7590 | new_temp = copy_ssa_name (dataref_ptr); | |
7591 | else | |
7592 | new_temp = make_ssa_name (TREE_TYPE (dataref_ptr)); | |
0d0e4a03 JJ |
7593 | new_stmt = gimple_build_assign |
7594 | (new_temp, BIT_AND_EXPR, dataref_ptr, | |
272c6793 RS |
7595 | build_int_cst |
7596 | (TREE_TYPE (dataref_ptr), | |
7597 | -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype))); | |
272c6793 RS |
7598 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
7599 | data_ref | |
7600 | = build2 (MEM_REF, vectype, new_temp, | |
44fc7854 | 7601 | build_int_cst (ref_type, 0)); |
272c6793 RS |
7602 | break; |
7603 | default: | |
7604 | gcc_unreachable (); | |
7605 | } | |
ebfd146a | 7606 | vec_dest = vect_create_destination_var (scalar_dest, vectype); |
272c6793 | 7607 | new_stmt = gimple_build_assign (vec_dest, data_ref); |
ebfd146a IR |
7608 | new_temp = make_ssa_name (vec_dest, new_stmt); |
7609 | gimple_assign_set_lhs (new_stmt, new_temp); | |
7610 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7611 | ||
272c6793 RS |
7612 | /* 3. Handle explicit realignment if necessary/supported. |
7613 | Create in loop: | |
7614 | vec_dest = realign_load (msq, lsq, realignment_token) */ | |
7615 | if (alignment_support_scheme == dr_explicit_realign_optimized | |
7616 | || alignment_support_scheme == dr_explicit_realign) | |
ebfd146a | 7617 | { |
272c6793 RS |
7618 | lsq = gimple_assign_lhs (new_stmt); |
7619 | if (!realignment_token) | |
7620 | realignment_token = dataref_ptr; | |
7621 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
0d0e4a03 JJ |
7622 | new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR, |
7623 | msq, lsq, realignment_token); | |
272c6793 RS |
7624 | new_temp = make_ssa_name (vec_dest, new_stmt); |
7625 | gimple_assign_set_lhs (new_stmt, new_temp); | |
7626 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
7627 | ||
7628 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
7629 | { | |
7630 | gcc_assert (phi); | |
7631 | if (i == vec_num - 1 && j == ncopies - 1) | |
7632 | add_phi_arg (phi, lsq, | |
7633 | loop_latch_edge (containing_loop), | |
9e227d60 | 7634 | UNKNOWN_LOCATION); |
272c6793 RS |
7635 | msq = lsq; |
7636 | } | |
ebfd146a | 7637 | } |
ebfd146a | 7638 | |
59fd17e3 RB |
7639 | /* 4. Handle invariant-load. */ |
7640 | if (inv_p && !bb_vinfo) | |
7641 | { | |
59fd17e3 | 7642 | gcc_assert (!grouped_load); |
d1417442 JJ |
7643 | /* If we have versioned for aliasing or the loop doesn't |
7644 | have any data dependencies that would preclude this, | |
7645 | then we are sure this is a loop invariant load and | |
7646 | thus we can insert it on the preheader edge. */ | |
7647 | if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) | |
7648 | && !nested_in_vect_loop | |
6b916b36 | 7649 | && hoist_defs_of_uses (stmt, loop)) |
a0e35eb0 RB |
7650 | { |
7651 | if (dump_enabled_p ()) | |
7652 | { | |
7653 | dump_printf_loc (MSG_NOTE, vect_location, | |
7654 | "hoisting out of the vectorized " | |
7655 | "loop: "); | |
7656 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
a0e35eb0 | 7657 | } |
b731b390 | 7658 | tree tem = copy_ssa_name (scalar_dest); |
a0e35eb0 RB |
7659 | gsi_insert_on_edge_immediate |
7660 | (loop_preheader_edge (loop), | |
7661 | gimple_build_assign (tem, | |
7662 | unshare_expr | |
7663 | (gimple_assign_rhs1 (stmt)))); | |
7664 | new_temp = vect_init_vector (stmt, tem, vectype, NULL); | |
34cd48e5 RB |
7665 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
7666 | set_vinfo_for_stmt (new_stmt, | |
7667 | new_stmt_vec_info (new_stmt, vinfo)); | |
a0e35eb0 RB |
7668 | } |
7669 | else | |
7670 | { | |
7671 | gimple_stmt_iterator gsi2 = *gsi; | |
7672 | gsi_next (&gsi2); | |
7673 | new_temp = vect_init_vector (stmt, scalar_dest, | |
7674 | vectype, &gsi2); | |
34cd48e5 | 7675 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
a0e35eb0 | 7676 | } |
59fd17e3 RB |
7677 | } |
7678 | ||
62da9e14 | 7679 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
272c6793 | 7680 | { |
aec7ae7d JJ |
7681 | tree perm_mask = perm_mask_for_reverse (vectype); |
7682 | new_temp = permute_vec_elements (new_temp, new_temp, | |
7683 | perm_mask, stmt, gsi); | |
ebfd146a IR |
7684 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
7685 | } | |
267d3070 | 7686 | |
272c6793 | 7687 | /* Collect vector loads and later create their permutation in |
0d0293ac MM |
7688 | vect_transform_grouped_load (). */ |
7689 | if (grouped_load || slp_perm) | |
9771b263 | 7690 | dr_chain.quick_push (new_temp); |
267d3070 | 7691 | |
272c6793 RS |
7692 | /* Store vector loads in the corresponding SLP_NODE. */ |
7693 | if (slp && !slp_perm) | |
9771b263 | 7694 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
b267968e RB |
7695 | |
7696 | /* With SLP permutation we load the gaps as well, without | |
7697 | we need to skip the gaps after we manage to fully load | |
7698 | all elements. group_gap_adj is GROUP_SIZE here. */ | |
7699 | group_elt += nunits; | |
7700 | if (group_gap_adj != 0 && ! slp_perm | |
7701 | && group_elt == group_size - group_gap_adj) | |
7702 | { | |
7703 | bool ovf; | |
7704 | tree bump | |
7705 | = wide_int_to_tree (sizetype, | |
7706 | wi::smul (TYPE_SIZE_UNIT (elem_type), | |
7707 | group_gap_adj, &ovf)); | |
7708 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
7709 | stmt, bump); | |
7710 | group_elt = 0; | |
7711 | } | |
272c6793 | 7712 | } |
9b999e8c RB |
7713 | /* Bump the vector pointer to account for a gap or for excess |
7714 | elements loaded for a permuted SLP load. */ | |
b267968e | 7715 | if (group_gap_adj != 0 && slp_perm) |
a64b9c26 | 7716 | { |
9b999e8c RB |
7717 | bool ovf; |
7718 | tree bump | |
7719 | = wide_int_to_tree (sizetype, | |
7720 | wi::smul (TYPE_SIZE_UNIT (elem_type), | |
7721 | group_gap_adj, &ovf)); | |
a64b9c26 RB |
7722 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
7723 | stmt, bump); | |
7724 | } | |
ebfd146a IR |
7725 | } |
7726 | ||
7727 | if (slp && !slp_perm) | |
7728 | continue; | |
7729 | ||
7730 | if (slp_perm) | |
7731 | { | |
29afecdf | 7732 | unsigned n_perms; |
01d8bf07 | 7733 | if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, |
29afecdf RB |
7734 | slp_node_instance, false, |
7735 | &n_perms)) | |
ebfd146a | 7736 | { |
9771b263 | 7737 | dr_chain.release (); |
ebfd146a IR |
7738 | return false; |
7739 | } | |
7740 | } | |
7741 | else | |
7742 | { | |
0d0293ac | 7743 | if (grouped_load) |
ebfd146a | 7744 | { |
2de001ee | 7745 | if (memory_access_type != VMAT_LOAD_STORE_LANES) |
0d0293ac | 7746 | vect_transform_grouped_load (stmt, dr_chain, group_size, gsi); |
ebfd146a | 7747 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); |
ebfd146a IR |
7748 | } |
7749 | else | |
7750 | { | |
7751 | if (j == 0) | |
7752 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
7753 | else | |
7754 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
7755 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
7756 | } | |
7757 | } | |
9771b263 | 7758 | dr_chain.release (); |
ebfd146a IR |
7759 | } |
7760 | ||
ebfd146a IR |
7761 | return true; |
7762 | } | |
7763 | ||
7764 | /* Function vect_is_simple_cond. | |
b8698a0f | 7765 | |
ebfd146a IR |
7766 | Input: |
7767 | LOOP - the loop that is being vectorized. | |
7768 | COND - Condition that is checked for simple use. | |
7769 | ||
e9e1d143 RG |
7770 | Output: |
7771 | *COMP_VECTYPE - the vector type for the comparison. | |
4fc5ebf1 | 7772 | *DTS - The def types for the arguments of the comparison |
e9e1d143 | 7773 | |
ebfd146a IR |
7774 | Returns whether a COND can be vectorized. Checks whether |
7775 | condition operands are supportable using vec_is_simple_use. */ | |
7776 | ||
87aab9b2 | 7777 | static bool |
4fc5ebf1 JG |
7778 | vect_is_simple_cond (tree cond, vec_info *vinfo, |
7779 | tree *comp_vectype, enum vect_def_type *dts) | |
ebfd146a IR |
7780 | { |
7781 | tree lhs, rhs; | |
e9e1d143 | 7782 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
ebfd146a | 7783 | |
a414c77f IE |
7784 | /* Mask case. */ |
7785 | if (TREE_CODE (cond) == SSA_NAME | |
2568d8a1 | 7786 | && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond))) |
a414c77f IE |
7787 | { |
7788 | gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond); | |
7789 | if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt, | |
4fc5ebf1 | 7790 | &dts[0], comp_vectype) |
a414c77f IE |
7791 | || !*comp_vectype |
7792 | || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype)) | |
7793 | return false; | |
7794 | return true; | |
7795 | } | |
7796 | ||
ebfd146a IR |
7797 | if (!COMPARISON_CLASS_P (cond)) |
7798 | return false; | |
7799 | ||
7800 | lhs = TREE_OPERAND (cond, 0); | |
7801 | rhs = TREE_OPERAND (cond, 1); | |
7802 | ||
7803 | if (TREE_CODE (lhs) == SSA_NAME) | |
7804 | { | |
355fe088 | 7805 | gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs); |
4fc5ebf1 | 7806 | if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1)) |
ebfd146a IR |
7807 | return false; |
7808 | } | |
4fc5ebf1 JG |
7809 | else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST |
7810 | || TREE_CODE (lhs) == FIXED_CST) | |
7811 | dts[0] = vect_constant_def; | |
7812 | else | |
ebfd146a IR |
7813 | return false; |
7814 | ||
7815 | if (TREE_CODE (rhs) == SSA_NAME) | |
7816 | { | |
355fe088 | 7817 | gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs); |
4fc5ebf1 | 7818 | if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2)) |
ebfd146a IR |
7819 | return false; |
7820 | } | |
4fc5ebf1 JG |
7821 | else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST |
7822 | || TREE_CODE (rhs) == FIXED_CST) | |
7823 | dts[1] = vect_constant_def; | |
7824 | else | |
ebfd146a IR |
7825 | return false; |
7826 | ||
28b33016 IE |
7827 | if (vectype1 && vectype2 |
7828 | && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2)) | |
7829 | return false; | |
7830 | ||
e9e1d143 | 7831 | *comp_vectype = vectype1 ? vectype1 : vectype2; |
ebfd146a IR |
7832 | return true; |
7833 | } | |
7834 | ||
7835 | /* vectorizable_condition. | |
7836 | ||
b8698a0f L |
7837 | Check if STMT is conditional modify expression that can be vectorized. |
7838 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
7839 | stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it | |
4bbe8262 IR |
7840 | at GSI. |
7841 | ||
7842 | When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable | |
7843 | to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in | |
0ad23163 | 7844 | else clause if it is 2). |
ebfd146a IR |
7845 | |
7846 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
7847 | ||
4bbe8262 | 7848 | bool |
355fe088 TS |
7849 | vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, |
7850 | gimple **vec_stmt, tree reduc_def, int reduc_index, | |
f7e531cf | 7851 | slp_tree slp_node) |
ebfd146a IR |
7852 | { |
7853 | tree scalar_dest = NULL_TREE; | |
7854 | tree vec_dest = NULL_TREE; | |
01216d27 JJ |
7855 | tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE; |
7856 | tree then_clause, else_clause; | |
ebfd146a | 7857 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
df11cc78 | 7858 | tree comp_vectype = NULL_TREE; |
ff802fa1 IR |
7859 | tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE; |
7860 | tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE; | |
5958f9e2 | 7861 | tree vec_compare; |
ebfd146a IR |
7862 | tree new_temp; |
7863 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4fc5ebf1 JG |
7864 | enum vect_def_type dts[4] |
7865 | = {vect_unknown_def_type, vect_unknown_def_type, | |
7866 | vect_unknown_def_type, vect_unknown_def_type}; | |
7867 | int ndts = 4; | |
f7e531cf | 7868 | int ncopies; |
01216d27 | 7869 | enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; |
a855b1b1 | 7870 | stmt_vec_info prev_stmt_info = NULL; |
f7e531cf IR |
7871 | int i, j; |
7872 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
6e1aa848 DN |
7873 | vec<tree> vec_oprnds0 = vNULL; |
7874 | vec<tree> vec_oprnds1 = vNULL; | |
7875 | vec<tree> vec_oprnds2 = vNULL; | |
7876 | vec<tree> vec_oprnds3 = vNULL; | |
74946978 | 7877 | tree vec_cmp_type; |
a414c77f | 7878 | bool masked = false; |
b8698a0f | 7879 | |
f7e531cf IR |
7880 | if (reduc_index && STMT_SLP_TYPE (stmt_info)) |
7881 | return false; | |
7882 | ||
af29617a AH |
7883 | if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION) |
7884 | { | |
7885 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
7886 | return false; | |
ebfd146a | 7887 | |
af29617a AH |
7888 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
7889 | && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle | |
7890 | && reduc_def)) | |
7891 | return false; | |
ebfd146a | 7892 | |
af29617a AH |
7893 | /* FORNOW: not yet supported. */ |
7894 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
7895 | { | |
7896 | if (dump_enabled_p ()) | |
7897 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7898 | "value used after loop.\n"); | |
7899 | return false; | |
7900 | } | |
ebfd146a IR |
7901 | } |
7902 | ||
7903 | /* Is vectorizable conditional operation? */ | |
7904 | if (!is_gimple_assign (stmt)) | |
7905 | return false; | |
7906 | ||
7907 | code = gimple_assign_rhs_code (stmt); | |
7908 | ||
7909 | if (code != COND_EXPR) | |
7910 | return false; | |
7911 | ||
465c8c19 JJ |
7912 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
7913 | int nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
2947d3b2 | 7914 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
465c8c19 | 7915 | |
fce57248 | 7916 | if (slp_node) |
465c8c19 JJ |
7917 | ncopies = 1; |
7918 | else | |
7919 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
7920 | ||
7921 | gcc_assert (ncopies >= 1); | |
7922 | if (reduc_index && ncopies > 1) | |
7923 | return false; /* FORNOW */ | |
7924 | ||
4e71066d RG |
7925 | cond_expr = gimple_assign_rhs1 (stmt); |
7926 | then_clause = gimple_assign_rhs2 (stmt); | |
7927 | else_clause = gimple_assign_rhs3 (stmt); | |
ebfd146a | 7928 | |
4fc5ebf1 JG |
7929 | if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, |
7930 | &comp_vectype, &dts[0]) | |
e9e1d143 | 7931 | || !comp_vectype) |
ebfd146a IR |
7932 | return false; |
7933 | ||
81c40241 | 7934 | gimple *def_stmt; |
4fc5ebf1 | 7935 | if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2], |
2947d3b2 IE |
7936 | &vectype1)) |
7937 | return false; | |
4fc5ebf1 | 7938 | if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3], |
2947d3b2 | 7939 | &vectype2)) |
ebfd146a | 7940 | return false; |
2947d3b2 IE |
7941 | |
7942 | if (vectype1 && !useless_type_conversion_p (vectype, vectype1)) | |
7943 | return false; | |
7944 | ||
7945 | if (vectype2 && !useless_type_conversion_p (vectype, vectype2)) | |
ebfd146a IR |
7946 | return false; |
7947 | ||
28b33016 IE |
7948 | masked = !COMPARISON_CLASS_P (cond_expr); |
7949 | vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype); | |
7950 | ||
74946978 MP |
7951 | if (vec_cmp_type == NULL_TREE) |
7952 | return false; | |
784fb9b3 | 7953 | |
01216d27 JJ |
7954 | cond_code = TREE_CODE (cond_expr); |
7955 | if (!masked) | |
7956 | { | |
7957 | cond_expr0 = TREE_OPERAND (cond_expr, 0); | |
7958 | cond_expr1 = TREE_OPERAND (cond_expr, 1); | |
7959 | } | |
7960 | ||
7961 | if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype)) | |
7962 | { | |
7963 | /* Boolean values may have another representation in vectors | |
7964 | and therefore we prefer bit operations over comparison for | |
7965 | them (which also works for scalar masks). We store opcodes | |
7966 | to use in bitop1 and bitop2. Statement is vectorized as | |
7967 | BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2) | |
7968 | depending on bitop1 and bitop2 arity. */ | |
7969 | switch (cond_code) | |
7970 | { | |
7971 | case GT_EXPR: | |
7972 | bitop1 = BIT_NOT_EXPR; | |
7973 | bitop2 = BIT_AND_EXPR; | |
7974 | break; | |
7975 | case GE_EXPR: | |
7976 | bitop1 = BIT_NOT_EXPR; | |
7977 | bitop2 = BIT_IOR_EXPR; | |
7978 | break; | |
7979 | case LT_EXPR: | |
7980 | bitop1 = BIT_NOT_EXPR; | |
7981 | bitop2 = BIT_AND_EXPR; | |
7982 | std::swap (cond_expr0, cond_expr1); | |
7983 | break; | |
7984 | case LE_EXPR: | |
7985 | bitop1 = BIT_NOT_EXPR; | |
7986 | bitop2 = BIT_IOR_EXPR; | |
7987 | std::swap (cond_expr0, cond_expr1); | |
7988 | break; | |
7989 | case NE_EXPR: | |
7990 | bitop1 = BIT_XOR_EXPR; | |
7991 | break; | |
7992 | case EQ_EXPR: | |
7993 | bitop1 = BIT_XOR_EXPR; | |
7994 | bitop2 = BIT_NOT_EXPR; | |
7995 | break; | |
7996 | default: | |
7997 | return false; | |
7998 | } | |
7999 | cond_code = SSA_NAME; | |
8000 | } | |
8001 | ||
b8698a0f | 8002 | if (!vec_stmt) |
ebfd146a IR |
8003 | { |
8004 | STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type; | |
01216d27 JJ |
8005 | if (bitop1 != NOP_EXPR) |
8006 | { | |
8007 | machine_mode mode = TYPE_MODE (comp_vectype); | |
8008 | optab optab; | |
8009 | ||
8010 | optab = optab_for_tree_code (bitop1, comp_vectype, optab_default); | |
8011 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8012 | return false; | |
8013 | ||
8014 | if (bitop2 != NOP_EXPR) | |
8015 | { | |
8016 | optab = optab_for_tree_code (bitop2, comp_vectype, | |
8017 | optab_default); | |
8018 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8019 | return false; | |
8020 | } | |
8021 | } | |
4fc5ebf1 JG |
8022 | if (expand_vec_cond_expr_p (vectype, comp_vectype, |
8023 | cond_code)) | |
8024 | { | |
8025 | vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL); | |
8026 | return true; | |
8027 | } | |
8028 | return false; | |
ebfd146a IR |
8029 | } |
8030 | ||
f7e531cf IR |
8031 | /* Transform. */ |
8032 | ||
8033 | if (!slp_node) | |
8034 | { | |
9771b263 DN |
8035 | vec_oprnds0.create (1); |
8036 | vec_oprnds1.create (1); | |
8037 | vec_oprnds2.create (1); | |
8038 | vec_oprnds3.create (1); | |
f7e531cf | 8039 | } |
ebfd146a IR |
8040 | |
8041 | /* Handle def. */ | |
8042 | scalar_dest = gimple_assign_lhs (stmt); | |
8043 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
8044 | ||
8045 | /* Handle cond expr. */ | |
a855b1b1 MM |
8046 | for (j = 0; j < ncopies; j++) |
8047 | { | |
538dd0b7 | 8048 | gassign *new_stmt = NULL; |
a855b1b1 MM |
8049 | if (j == 0) |
8050 | { | |
f7e531cf IR |
8051 | if (slp_node) |
8052 | { | |
00f96dc9 TS |
8053 | auto_vec<tree, 4> ops; |
8054 | auto_vec<vec<tree>, 4> vec_defs; | |
9771b263 | 8055 | |
a414c77f | 8056 | if (masked) |
01216d27 | 8057 | ops.safe_push (cond_expr); |
a414c77f IE |
8058 | else |
8059 | { | |
01216d27 JJ |
8060 | ops.safe_push (cond_expr0); |
8061 | ops.safe_push (cond_expr1); | |
a414c77f | 8062 | } |
9771b263 DN |
8063 | ops.safe_push (then_clause); |
8064 | ops.safe_push (else_clause); | |
306b0c92 | 8065 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
37b5ec8f JJ |
8066 | vec_oprnds3 = vec_defs.pop (); |
8067 | vec_oprnds2 = vec_defs.pop (); | |
a414c77f IE |
8068 | if (!masked) |
8069 | vec_oprnds1 = vec_defs.pop (); | |
37b5ec8f | 8070 | vec_oprnds0 = vec_defs.pop (); |
f7e531cf IR |
8071 | } |
8072 | else | |
8073 | { | |
355fe088 | 8074 | gimple *gtemp; |
a414c77f IE |
8075 | if (masked) |
8076 | { | |
8077 | vec_cond_lhs | |
8078 | = vect_get_vec_def_for_operand (cond_expr, stmt, | |
8079 | comp_vectype); | |
8080 | vect_is_simple_use (cond_expr, stmt_info->vinfo, | |
8081 | >emp, &dts[0]); | |
8082 | } | |
8083 | else | |
8084 | { | |
01216d27 JJ |
8085 | vec_cond_lhs |
8086 | = vect_get_vec_def_for_operand (cond_expr0, | |
8087 | stmt, comp_vectype); | |
8088 | vect_is_simple_use (cond_expr0, loop_vinfo, >emp, &dts[0]); | |
8089 | ||
8090 | vec_cond_rhs | |
8091 | = vect_get_vec_def_for_operand (cond_expr1, | |
8092 | stmt, comp_vectype); | |
8093 | vect_is_simple_use (cond_expr1, loop_vinfo, >emp, &dts[1]); | |
a414c77f | 8094 | } |
f7e531cf IR |
8095 | if (reduc_index == 1) |
8096 | vec_then_clause = reduc_def; | |
8097 | else | |
8098 | { | |
8099 | vec_then_clause = vect_get_vec_def_for_operand (then_clause, | |
81c40241 RB |
8100 | stmt); |
8101 | vect_is_simple_use (then_clause, loop_vinfo, | |
8102 | >emp, &dts[2]); | |
f7e531cf IR |
8103 | } |
8104 | if (reduc_index == 2) | |
8105 | vec_else_clause = reduc_def; | |
8106 | else | |
8107 | { | |
8108 | vec_else_clause = vect_get_vec_def_for_operand (else_clause, | |
81c40241 RB |
8109 | stmt); |
8110 | vect_is_simple_use (else_clause, loop_vinfo, >emp, &dts[3]); | |
f7e531cf | 8111 | } |
a855b1b1 MM |
8112 | } |
8113 | } | |
8114 | else | |
8115 | { | |
a414c77f IE |
8116 | vec_cond_lhs |
8117 | = vect_get_vec_def_for_stmt_copy (dts[0], | |
8118 | vec_oprnds0.pop ()); | |
8119 | if (!masked) | |
8120 | vec_cond_rhs | |
8121 | = vect_get_vec_def_for_stmt_copy (dts[1], | |
8122 | vec_oprnds1.pop ()); | |
8123 | ||
a855b1b1 | 8124 | vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2], |
9771b263 | 8125 | vec_oprnds2.pop ()); |
a855b1b1 | 8126 | vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3], |
9771b263 | 8127 | vec_oprnds3.pop ()); |
f7e531cf IR |
8128 | } |
8129 | ||
8130 | if (!slp_node) | |
8131 | { | |
9771b263 | 8132 | vec_oprnds0.quick_push (vec_cond_lhs); |
a414c77f IE |
8133 | if (!masked) |
8134 | vec_oprnds1.quick_push (vec_cond_rhs); | |
9771b263 DN |
8135 | vec_oprnds2.quick_push (vec_then_clause); |
8136 | vec_oprnds3.quick_push (vec_else_clause); | |
a855b1b1 MM |
8137 | } |
8138 | ||
9dc3f7de | 8139 | /* Arguments are ready. Create the new vector stmt. */ |
9771b263 | 8140 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs) |
f7e531cf | 8141 | { |
9771b263 DN |
8142 | vec_then_clause = vec_oprnds2[i]; |
8143 | vec_else_clause = vec_oprnds3[i]; | |
a855b1b1 | 8144 | |
a414c77f IE |
8145 | if (masked) |
8146 | vec_compare = vec_cond_lhs; | |
8147 | else | |
8148 | { | |
8149 | vec_cond_rhs = vec_oprnds1[i]; | |
01216d27 JJ |
8150 | if (bitop1 == NOP_EXPR) |
8151 | vec_compare = build2 (cond_code, vec_cmp_type, | |
8152 | vec_cond_lhs, vec_cond_rhs); | |
8153 | else | |
8154 | { | |
8155 | new_temp = make_ssa_name (vec_cmp_type); | |
8156 | if (bitop1 == BIT_NOT_EXPR) | |
8157 | new_stmt = gimple_build_assign (new_temp, bitop1, | |
8158 | vec_cond_rhs); | |
8159 | else | |
8160 | new_stmt | |
8161 | = gimple_build_assign (new_temp, bitop1, vec_cond_lhs, | |
8162 | vec_cond_rhs); | |
8163 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8164 | if (bitop2 == NOP_EXPR) | |
8165 | vec_compare = new_temp; | |
8166 | else if (bitop2 == BIT_NOT_EXPR) | |
8167 | { | |
8168 | /* Instead of doing ~x ? y : z do x ? z : y. */ | |
8169 | vec_compare = new_temp; | |
8170 | std::swap (vec_then_clause, vec_else_clause); | |
8171 | } | |
8172 | else | |
8173 | { | |
8174 | vec_compare = make_ssa_name (vec_cmp_type); | |
8175 | new_stmt | |
8176 | = gimple_build_assign (vec_compare, bitop2, | |
8177 | vec_cond_lhs, new_temp); | |
8178 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8179 | } | |
8180 | } | |
a414c77f | 8181 | } |
5958f9e2 JJ |
8182 | new_temp = make_ssa_name (vec_dest); |
8183 | new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR, | |
8184 | vec_compare, vec_then_clause, | |
8185 | vec_else_clause); | |
f7e531cf IR |
8186 | vect_finish_stmt_generation (stmt, new_stmt, gsi); |
8187 | if (slp_node) | |
9771b263 | 8188 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); |
f7e531cf IR |
8189 | } |
8190 | ||
8191 | if (slp_node) | |
8192 | continue; | |
8193 | ||
8194 | if (j == 0) | |
8195 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
8196 | else | |
8197 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
8198 | ||
8199 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
a855b1b1 | 8200 | } |
b8698a0f | 8201 | |
9771b263 DN |
8202 | vec_oprnds0.release (); |
8203 | vec_oprnds1.release (); | |
8204 | vec_oprnds2.release (); | |
8205 | vec_oprnds3.release (); | |
f7e531cf | 8206 | |
ebfd146a IR |
8207 | return true; |
8208 | } | |
8209 | ||
42fd8198 IE |
8210 | /* vectorizable_comparison. |
8211 | ||
8212 | Check if STMT is comparison expression that can be vectorized. | |
8213 | If VEC_STMT is also passed, vectorize the STMT: create a vectorized | |
8214 | comparison, put it in VEC_STMT, and insert it at GSI. | |
8215 | ||
8216 | Return FALSE if not a vectorizable STMT, TRUE otherwise. */ | |
8217 | ||
fce57248 | 8218 | static bool |
42fd8198 IE |
8219 | vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi, |
8220 | gimple **vec_stmt, tree reduc_def, | |
8221 | slp_tree slp_node) | |
8222 | { | |
8223 | tree lhs, rhs1, rhs2; | |
8224 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
8225 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; | |
8226 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
8227 | tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE; | |
8228 | tree new_temp; | |
8229 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
8230 | enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 8231 | int ndts = 2; |
42fd8198 IE |
8232 | unsigned nunits; |
8233 | int ncopies; | |
49e76ff1 | 8234 | enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; |
42fd8198 IE |
8235 | stmt_vec_info prev_stmt_info = NULL; |
8236 | int i, j; | |
8237 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
8238 | vec<tree> vec_oprnds0 = vNULL; | |
8239 | vec<tree> vec_oprnds1 = vNULL; | |
8240 | gimple *def_stmt; | |
8241 | tree mask_type; | |
8242 | tree mask; | |
8243 | ||
c245362b IE |
8244 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
8245 | return false; | |
8246 | ||
30480bcd | 8247 | if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype)) |
42fd8198 IE |
8248 | return false; |
8249 | ||
8250 | mask_type = vectype; | |
8251 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
8252 | ||
fce57248 | 8253 | if (slp_node) |
42fd8198 IE |
8254 | ncopies = 1; |
8255 | else | |
8256 | ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; | |
8257 | ||
8258 | gcc_assert (ncopies >= 1); | |
42fd8198 IE |
8259 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
8260 | && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle | |
8261 | && reduc_def)) | |
8262 | return false; | |
8263 | ||
8264 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
8265 | { | |
8266 | if (dump_enabled_p ()) | |
8267 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
8268 | "value used after loop.\n"); | |
8269 | return false; | |
8270 | } | |
8271 | ||
8272 | if (!is_gimple_assign (stmt)) | |
8273 | return false; | |
8274 | ||
8275 | code = gimple_assign_rhs_code (stmt); | |
8276 | ||
8277 | if (TREE_CODE_CLASS (code) != tcc_comparison) | |
8278 | return false; | |
8279 | ||
8280 | rhs1 = gimple_assign_rhs1 (stmt); | |
8281 | rhs2 = gimple_assign_rhs2 (stmt); | |
8282 | ||
8283 | if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt, | |
8284 | &dts[0], &vectype1)) | |
8285 | return false; | |
8286 | ||
8287 | if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt, | |
8288 | &dts[1], &vectype2)) | |
8289 | return false; | |
8290 | ||
8291 | if (vectype1 && vectype2 | |
8292 | && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2)) | |
8293 | return false; | |
8294 | ||
8295 | vectype = vectype1 ? vectype1 : vectype2; | |
8296 | ||
8297 | /* Invariant comparison. */ | |
8298 | if (!vectype) | |
8299 | { | |
69a9a66f RB |
8300 | vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1)); |
8301 | if (TYPE_VECTOR_SUBPARTS (vectype) != nunits) | |
42fd8198 IE |
8302 | return false; |
8303 | } | |
8304 | else if (nunits != TYPE_VECTOR_SUBPARTS (vectype)) | |
8305 | return false; | |
8306 | ||
49e76ff1 IE |
8307 | /* Can't compare mask and non-mask types. */ |
8308 | if (vectype1 && vectype2 | |
8309 | && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2))) | |
8310 | return false; | |
8311 | ||
8312 | /* Boolean values may have another representation in vectors | |
8313 | and therefore we prefer bit operations over comparison for | |
8314 | them (which also works for scalar masks). We store opcodes | |
8315 | to use in bitop1 and bitop2. Statement is vectorized as | |
8316 | BITOP2 (rhs1 BITOP1 rhs2) or | |
8317 | rhs1 BITOP2 (BITOP1 rhs2) | |
8318 | depending on bitop1 and bitop2 arity. */ | |
8319 | if (VECTOR_BOOLEAN_TYPE_P (vectype)) | |
8320 | { | |
8321 | if (code == GT_EXPR) | |
8322 | { | |
8323 | bitop1 = BIT_NOT_EXPR; | |
8324 | bitop2 = BIT_AND_EXPR; | |
8325 | } | |
8326 | else if (code == GE_EXPR) | |
8327 | { | |
8328 | bitop1 = BIT_NOT_EXPR; | |
8329 | bitop2 = BIT_IOR_EXPR; | |
8330 | } | |
8331 | else if (code == LT_EXPR) | |
8332 | { | |
8333 | bitop1 = BIT_NOT_EXPR; | |
8334 | bitop2 = BIT_AND_EXPR; | |
8335 | std::swap (rhs1, rhs2); | |
264d951a | 8336 | std::swap (dts[0], dts[1]); |
49e76ff1 IE |
8337 | } |
8338 | else if (code == LE_EXPR) | |
8339 | { | |
8340 | bitop1 = BIT_NOT_EXPR; | |
8341 | bitop2 = BIT_IOR_EXPR; | |
8342 | std::swap (rhs1, rhs2); | |
264d951a | 8343 | std::swap (dts[0], dts[1]); |
49e76ff1 IE |
8344 | } |
8345 | else | |
8346 | { | |
8347 | bitop1 = BIT_XOR_EXPR; | |
8348 | if (code == EQ_EXPR) | |
8349 | bitop2 = BIT_NOT_EXPR; | |
8350 | } | |
8351 | } | |
8352 | ||
42fd8198 IE |
8353 | if (!vec_stmt) |
8354 | { | |
8355 | STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type; | |
49e76ff1 | 8356 | vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)), |
4fc5ebf1 | 8357 | dts, ndts, NULL, NULL); |
49e76ff1 | 8358 | if (bitop1 == NOP_EXPR) |
96592eed | 8359 | return expand_vec_cmp_expr_p (vectype, mask_type, code); |
49e76ff1 IE |
8360 | else |
8361 | { | |
8362 | machine_mode mode = TYPE_MODE (vectype); | |
8363 | optab optab; | |
8364 | ||
8365 | optab = optab_for_tree_code (bitop1, vectype, optab_default); | |
8366 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8367 | return false; | |
8368 | ||
8369 | if (bitop2 != NOP_EXPR) | |
8370 | { | |
8371 | optab = optab_for_tree_code (bitop2, vectype, optab_default); | |
8372 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8373 | return false; | |
8374 | } | |
8375 | return true; | |
8376 | } | |
42fd8198 IE |
8377 | } |
8378 | ||
8379 | /* Transform. */ | |
8380 | if (!slp_node) | |
8381 | { | |
8382 | vec_oprnds0.create (1); | |
8383 | vec_oprnds1.create (1); | |
8384 | } | |
8385 | ||
8386 | /* Handle def. */ | |
8387 | lhs = gimple_assign_lhs (stmt); | |
8388 | mask = vect_create_destination_var (lhs, mask_type); | |
8389 | ||
8390 | /* Handle cmp expr. */ | |
8391 | for (j = 0; j < ncopies; j++) | |
8392 | { | |
8393 | gassign *new_stmt = NULL; | |
8394 | if (j == 0) | |
8395 | { | |
8396 | if (slp_node) | |
8397 | { | |
8398 | auto_vec<tree, 2> ops; | |
8399 | auto_vec<vec<tree>, 2> vec_defs; | |
8400 | ||
8401 | ops.safe_push (rhs1); | |
8402 | ops.safe_push (rhs2); | |
306b0c92 | 8403 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
42fd8198 IE |
8404 | vec_oprnds1 = vec_defs.pop (); |
8405 | vec_oprnds0 = vec_defs.pop (); | |
8406 | } | |
8407 | else | |
8408 | { | |
e4af0bc4 IE |
8409 | vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype); |
8410 | vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype); | |
42fd8198 IE |
8411 | } |
8412 | } | |
8413 | else | |
8414 | { | |
8415 | vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0], | |
8416 | vec_oprnds0.pop ()); | |
8417 | vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1], | |
8418 | vec_oprnds1.pop ()); | |
8419 | } | |
8420 | ||
8421 | if (!slp_node) | |
8422 | { | |
8423 | vec_oprnds0.quick_push (vec_rhs1); | |
8424 | vec_oprnds1.quick_push (vec_rhs2); | |
8425 | } | |
8426 | ||
8427 | /* Arguments are ready. Create the new vector stmt. */ | |
8428 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1) | |
8429 | { | |
8430 | vec_rhs2 = vec_oprnds1[i]; | |
8431 | ||
8432 | new_temp = make_ssa_name (mask); | |
49e76ff1 IE |
8433 | if (bitop1 == NOP_EXPR) |
8434 | { | |
8435 | new_stmt = gimple_build_assign (new_temp, code, | |
8436 | vec_rhs1, vec_rhs2); | |
8437 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8438 | } | |
8439 | else | |
8440 | { | |
8441 | if (bitop1 == BIT_NOT_EXPR) | |
8442 | new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2); | |
8443 | else | |
8444 | new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1, | |
8445 | vec_rhs2); | |
8446 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8447 | if (bitop2 != NOP_EXPR) | |
8448 | { | |
8449 | tree res = make_ssa_name (mask); | |
8450 | if (bitop2 == BIT_NOT_EXPR) | |
8451 | new_stmt = gimple_build_assign (res, bitop2, new_temp); | |
8452 | else | |
8453 | new_stmt = gimple_build_assign (res, bitop2, vec_rhs1, | |
8454 | new_temp); | |
8455 | vect_finish_stmt_generation (stmt, new_stmt, gsi); | |
8456 | } | |
8457 | } | |
42fd8198 IE |
8458 | if (slp_node) |
8459 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); | |
8460 | } | |
8461 | ||
8462 | if (slp_node) | |
8463 | continue; | |
8464 | ||
8465 | if (j == 0) | |
8466 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; | |
8467 | else | |
8468 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; | |
8469 | ||
8470 | prev_stmt_info = vinfo_for_stmt (new_stmt); | |
8471 | } | |
8472 | ||
8473 | vec_oprnds0.release (); | |
8474 | vec_oprnds1.release (); | |
8475 | ||
8476 | return true; | |
8477 | } | |
ebfd146a | 8478 | |
8644a673 | 8479 | /* Make sure the statement is vectorizable. */ |
ebfd146a IR |
8480 | |
8481 | bool | |
891ad31c RB |
8482 | vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, |
8483 | slp_instance node_instance) | |
ebfd146a | 8484 | { |
8644a673 | 8485 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
a70d6342 | 8486 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
b8698a0f | 8487 | enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info); |
ebfd146a | 8488 | bool ok; |
355fe088 | 8489 | gimple *pattern_stmt; |
363477c0 | 8490 | gimple_seq pattern_def_seq; |
ebfd146a | 8491 | |
73fbfcad | 8492 | if (dump_enabled_p ()) |
ebfd146a | 8493 | { |
78c60e3d SS |
8494 | dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); |
8495 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
8644a673 | 8496 | } |
ebfd146a | 8497 | |
1825a1f3 | 8498 | if (gimple_has_volatile_ops (stmt)) |
b8698a0f | 8499 | { |
73fbfcad | 8500 | if (dump_enabled_p ()) |
78c60e3d | 8501 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 8502 | "not vectorized: stmt has volatile operands\n"); |
1825a1f3 IR |
8503 | |
8504 | return false; | |
8505 | } | |
b8698a0f L |
8506 | |
8507 | /* Skip stmts that do not need to be vectorized. In loops this is expected | |
8644a673 IR |
8508 | to include: |
8509 | - the COND_EXPR which is the loop exit condition | |
8510 | - any LABEL_EXPRs in the loop | |
b8698a0f | 8511 | - computations that are used only for array indexing or loop control. |
8644a673 | 8512 | In basic blocks we only analyze statements that are a part of some SLP |
83197f37 | 8513 | instance, therefore, all the statements are relevant. |
ebfd146a | 8514 | |
d092494c | 8515 | Pattern statement needs to be analyzed instead of the original statement |
83197f37 | 8516 | if the original statement is not relevant. Otherwise, we analyze both |
079c527f JJ |
8517 | statements. In basic blocks we are called from some SLP instance |
8518 | traversal, don't analyze pattern stmts instead, the pattern stmts | |
8519 | already will be part of SLP instance. */ | |
83197f37 IR |
8520 | |
8521 | pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
b8698a0f | 8522 | if (!STMT_VINFO_RELEVANT_P (stmt_info) |
8644a673 | 8523 | && !STMT_VINFO_LIVE_P (stmt_info)) |
ebfd146a | 8524 | { |
9d5e7640 | 8525 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
83197f37 | 8526 | && pattern_stmt |
9d5e7640 IR |
8527 | && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) |
8528 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) | |
8529 | { | |
83197f37 | 8530 | /* Analyze PATTERN_STMT instead of the original stmt. */ |
9d5e7640 IR |
8531 | stmt = pattern_stmt; |
8532 | stmt_info = vinfo_for_stmt (pattern_stmt); | |
73fbfcad | 8533 | if (dump_enabled_p ()) |
9d5e7640 | 8534 | { |
78c60e3d SS |
8535 | dump_printf_loc (MSG_NOTE, vect_location, |
8536 | "==> examining pattern statement: "); | |
8537 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
9d5e7640 IR |
8538 | } |
8539 | } | |
8540 | else | |
8541 | { | |
73fbfcad | 8542 | if (dump_enabled_p ()) |
e645e942 | 8543 | dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n"); |
ebfd146a | 8544 | |
9d5e7640 IR |
8545 | return true; |
8546 | } | |
8644a673 | 8547 | } |
83197f37 | 8548 | else if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
079c527f | 8549 | && node == NULL |
83197f37 IR |
8550 | && pattern_stmt |
8551 | && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) | |
8552 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) | |
8553 | { | |
8554 | /* Analyze PATTERN_STMT too. */ | |
73fbfcad | 8555 | if (dump_enabled_p ()) |
83197f37 | 8556 | { |
78c60e3d SS |
8557 | dump_printf_loc (MSG_NOTE, vect_location, |
8558 | "==> examining pattern statement: "); | |
8559 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
83197f37 IR |
8560 | } |
8561 | ||
891ad31c RB |
8562 | if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node, |
8563 | node_instance)) | |
83197f37 IR |
8564 | return false; |
8565 | } | |
ebfd146a | 8566 | |
1107f3ae | 8567 | if (is_pattern_stmt_p (stmt_info) |
079c527f | 8568 | && node == NULL |
363477c0 | 8569 | && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info))) |
1107f3ae | 8570 | { |
363477c0 | 8571 | gimple_stmt_iterator si; |
1107f3ae | 8572 | |
363477c0 JJ |
8573 | for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si)) |
8574 | { | |
355fe088 | 8575 | gimple *pattern_def_stmt = gsi_stmt (si); |
363477c0 JJ |
8576 | if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt)) |
8577 | || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt))) | |
8578 | { | |
8579 | /* Analyze def stmt of STMT if it's a pattern stmt. */ | |
73fbfcad | 8580 | if (dump_enabled_p ()) |
363477c0 | 8581 | { |
78c60e3d SS |
8582 | dump_printf_loc (MSG_NOTE, vect_location, |
8583 | "==> examining pattern def statement: "); | |
8584 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); | |
363477c0 | 8585 | } |
1107f3ae | 8586 | |
363477c0 | 8587 | if (!vect_analyze_stmt (pattern_def_stmt, |
891ad31c | 8588 | need_to_vectorize, node, node_instance)) |
363477c0 JJ |
8589 | return false; |
8590 | } | |
8591 | } | |
8592 | } | |
1107f3ae | 8593 | |
8644a673 IR |
8594 | switch (STMT_VINFO_DEF_TYPE (stmt_info)) |
8595 | { | |
8596 | case vect_internal_def: | |
8597 | break; | |
ebfd146a | 8598 | |
8644a673 | 8599 | case vect_reduction_def: |
7c5222ff | 8600 | case vect_nested_cycle: |
14a61437 RB |
8601 | gcc_assert (!bb_vinfo |
8602 | && (relevance == vect_used_in_outer | |
8603 | || relevance == vect_used_in_outer_by_reduction | |
8604 | || relevance == vect_used_by_reduction | |
b28ead45 AH |
8605 | || relevance == vect_unused_in_scope |
8606 | || relevance == vect_used_only_live)); | |
8644a673 IR |
8607 | break; |
8608 | ||
8609 | case vect_induction_def: | |
e7baeb39 RB |
8610 | gcc_assert (!bb_vinfo); |
8611 | break; | |
8612 | ||
8644a673 IR |
8613 | case vect_constant_def: |
8614 | case vect_external_def: | |
8615 | case vect_unknown_def_type: | |
8616 | default: | |
8617 | gcc_unreachable (); | |
8618 | } | |
ebfd146a | 8619 | |
8644a673 | 8620 | if (STMT_VINFO_RELEVANT_P (stmt_info)) |
ebfd146a | 8621 | { |
8644a673 | 8622 | gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))); |
0136f8f0 AH |
8623 | gcc_assert (STMT_VINFO_VECTYPE (stmt_info) |
8624 | || (is_gimple_call (stmt) | |
8625 | && gimple_call_lhs (stmt) == NULL_TREE)); | |
8644a673 | 8626 | *need_to_vectorize = true; |
ebfd146a IR |
8627 | } |
8628 | ||
b1af7da6 RB |
8629 | if (PURE_SLP_STMT (stmt_info) && !node) |
8630 | { | |
8631 | dump_printf_loc (MSG_NOTE, vect_location, | |
8632 | "handled only by SLP analysis\n"); | |
8633 | return true; | |
8634 | } | |
8635 | ||
8636 | ok = true; | |
8637 | if (!bb_vinfo | |
8638 | && (STMT_VINFO_RELEVANT_P (stmt_info) | |
8639 | || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)) | |
8640 | ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node) | |
8641 | || vectorizable_conversion (stmt, NULL, NULL, node) | |
8642 | || vectorizable_shift (stmt, NULL, NULL, node) | |
8643 | || vectorizable_operation (stmt, NULL, NULL, node) | |
8644 | || vectorizable_assignment (stmt, NULL, NULL, node) | |
8645 | || vectorizable_load (stmt, NULL, NULL, node, NULL) | |
8646 | || vectorizable_call (stmt, NULL, NULL, node) | |
8647 | || vectorizable_store (stmt, NULL, NULL, node) | |
891ad31c | 8648 | || vectorizable_reduction (stmt, NULL, NULL, node, node_instance) |
e7baeb39 | 8649 | || vectorizable_induction (stmt, NULL, NULL, node) |
42fd8198 IE |
8650 | || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node) |
8651 | || vectorizable_comparison (stmt, NULL, NULL, NULL, node)); | |
b1af7da6 RB |
8652 | else |
8653 | { | |
8654 | if (bb_vinfo) | |
8655 | ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node) | |
8656 | || vectorizable_conversion (stmt, NULL, NULL, node) | |
8657 | || vectorizable_shift (stmt, NULL, NULL, node) | |
8658 | || vectorizable_operation (stmt, NULL, NULL, node) | |
8659 | || vectorizable_assignment (stmt, NULL, NULL, node) | |
8660 | || vectorizable_load (stmt, NULL, NULL, node, NULL) | |
8661 | || vectorizable_call (stmt, NULL, NULL, node) | |
8662 | || vectorizable_store (stmt, NULL, NULL, node) | |
42fd8198 IE |
8663 | || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node) |
8664 | || vectorizable_comparison (stmt, NULL, NULL, NULL, node)); | |
b1af7da6 | 8665 | } |
8644a673 IR |
8666 | |
8667 | if (!ok) | |
ebfd146a | 8668 | { |
73fbfcad | 8669 | if (dump_enabled_p ()) |
8644a673 | 8670 | { |
78c60e3d SS |
8671 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
8672 | "not vectorized: relevant stmt not "); | |
8673 | dump_printf (MSG_MISSED_OPTIMIZATION, "supported: "); | |
8674 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
8644a673 | 8675 | } |
b8698a0f | 8676 | |
ebfd146a IR |
8677 | return false; |
8678 | } | |
8679 | ||
a70d6342 IR |
8680 | if (bb_vinfo) |
8681 | return true; | |
8682 | ||
8644a673 IR |
8683 | /* Stmts that are (also) "live" (i.e. - that are used out of the loop) |
8684 | need extra handling, except for vectorizable reductions. */ | |
8685 | if (STMT_VINFO_LIVE_P (stmt_info) | |
8686 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) | |
b28ead45 | 8687 | ok = vectorizable_live_operation (stmt, NULL, NULL, -1, NULL); |
ebfd146a | 8688 | |
8644a673 | 8689 | if (!ok) |
ebfd146a | 8690 | { |
73fbfcad | 8691 | if (dump_enabled_p ()) |
8644a673 | 8692 | { |
78c60e3d SS |
8693 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
8694 | "not vectorized: live stmt not "); | |
8695 | dump_printf (MSG_MISSED_OPTIMIZATION, "supported: "); | |
8696 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
8644a673 | 8697 | } |
b8698a0f | 8698 | |
8644a673 | 8699 | return false; |
ebfd146a IR |
8700 | } |
8701 | ||
ebfd146a IR |
8702 | return true; |
8703 | } | |
8704 | ||
8705 | ||
8706 | /* Function vect_transform_stmt. | |
8707 | ||
8708 | Create a vectorized stmt to replace STMT, and insert it at BSI. */ | |
8709 | ||
8710 | bool | |
355fe088 | 8711 | vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, |
0d0293ac | 8712 | bool *grouped_store, slp_tree slp_node, |
ebfd146a IR |
8713 | slp_instance slp_node_instance) |
8714 | { | |
8715 | bool is_store = false; | |
355fe088 | 8716 | gimple *vec_stmt = NULL; |
ebfd146a | 8717 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
ebfd146a | 8718 | bool done; |
ebfd146a | 8719 | |
fce57248 | 8720 | gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info)); |
355fe088 | 8721 | gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); |
225ce44b | 8722 | |
ebfd146a IR |
8723 | switch (STMT_VINFO_TYPE (stmt_info)) |
8724 | { | |
8725 | case type_demotion_vec_info_type: | |
ebfd146a | 8726 | case type_promotion_vec_info_type: |
ebfd146a IR |
8727 | case type_conversion_vec_info_type: |
8728 | done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node); | |
8729 | gcc_assert (done); | |
8730 | break; | |
8731 | ||
8732 | case induc_vec_info_type: | |
e7baeb39 | 8733 | done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node); |
ebfd146a IR |
8734 | gcc_assert (done); |
8735 | break; | |
8736 | ||
9dc3f7de IR |
8737 | case shift_vec_info_type: |
8738 | done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node); | |
8739 | gcc_assert (done); | |
8740 | break; | |
8741 | ||
ebfd146a IR |
8742 | case op_vec_info_type: |
8743 | done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node); | |
8744 | gcc_assert (done); | |
8745 | break; | |
8746 | ||
8747 | case assignment_vec_info_type: | |
8748 | done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node); | |
8749 | gcc_assert (done); | |
8750 | break; | |
8751 | ||
8752 | case load_vec_info_type: | |
b8698a0f | 8753 | done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node, |
ebfd146a IR |
8754 | slp_node_instance); |
8755 | gcc_assert (done); | |
8756 | break; | |
8757 | ||
8758 | case store_vec_info_type: | |
8759 | done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node); | |
8760 | gcc_assert (done); | |
0d0293ac | 8761 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node) |
ebfd146a IR |
8762 | { |
8763 | /* In case of interleaving, the whole chain is vectorized when the | |
ff802fa1 | 8764 | last store in the chain is reached. Store stmts before the last |
ebfd146a IR |
8765 | one are skipped, and there vec_stmt_info shouldn't be freed |
8766 | meanwhile. */ | |
0d0293ac | 8767 | *grouped_store = true; |
ebfd146a IR |
8768 | if (STMT_VINFO_VEC_STMT (stmt_info)) |
8769 | is_store = true; | |
8770 | } | |
8771 | else | |
8772 | is_store = true; | |
8773 | break; | |
8774 | ||
8775 | case condition_vec_info_type: | |
f7e531cf | 8776 | done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node); |
ebfd146a IR |
8777 | gcc_assert (done); |
8778 | break; | |
8779 | ||
42fd8198 IE |
8780 | case comparison_vec_info_type: |
8781 | done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node); | |
8782 | gcc_assert (done); | |
8783 | break; | |
8784 | ||
ebfd146a | 8785 | case call_vec_info_type: |
190c2236 | 8786 | done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node); |
039d9ea1 | 8787 | stmt = gsi_stmt (*gsi); |
8e4284d0 | 8788 | if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) |
5ce9450f | 8789 | is_store = true; |
ebfd146a IR |
8790 | break; |
8791 | ||
0136f8f0 AH |
8792 | case call_simd_clone_vec_info_type: |
8793 | done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node); | |
8794 | stmt = gsi_stmt (*gsi); | |
8795 | break; | |
8796 | ||
ebfd146a | 8797 | case reduc_vec_info_type: |
891ad31c RB |
8798 | done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node, |
8799 | slp_node_instance); | |
ebfd146a IR |
8800 | gcc_assert (done); |
8801 | break; | |
8802 | ||
8803 | default: | |
8804 | if (!STMT_VINFO_LIVE_P (stmt_info)) | |
8805 | { | |
73fbfcad | 8806 | if (dump_enabled_p ()) |
78c60e3d | 8807 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 8808 | "stmt not supported.\n"); |
ebfd146a IR |
8809 | gcc_unreachable (); |
8810 | } | |
8811 | } | |
8812 | ||
225ce44b RB |
8813 | /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT. |
8814 | This would break hybrid SLP vectorization. */ | |
8815 | if (slp_node) | |
d90f8440 RB |
8816 | gcc_assert (!vec_stmt |
8817 | && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt); | |
225ce44b | 8818 | |
ebfd146a IR |
8819 | /* Handle inner-loop stmts whose DEF is used in the loop-nest that |
8820 | is being vectorized, but outside the immediately enclosing loop. */ | |
8821 | if (vec_stmt | |
a70d6342 IR |
8822 | && STMT_VINFO_LOOP_VINFO (stmt_info) |
8823 | && nested_in_vect_loop_p (LOOP_VINFO_LOOP ( | |
8824 | STMT_VINFO_LOOP_VINFO (stmt_info)), stmt) | |
ebfd146a IR |
8825 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type |
8826 | && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer | |
b8698a0f | 8827 | || STMT_VINFO_RELEVANT (stmt_info) == |
a70d6342 | 8828 | vect_used_in_outer_by_reduction)) |
ebfd146a | 8829 | { |
a70d6342 IR |
8830 | struct loop *innerloop = LOOP_VINFO_LOOP ( |
8831 | STMT_VINFO_LOOP_VINFO (stmt_info))->inner; | |
ebfd146a IR |
8832 | imm_use_iterator imm_iter; |
8833 | use_operand_p use_p; | |
8834 | tree scalar_dest; | |
355fe088 | 8835 | gimple *exit_phi; |
ebfd146a | 8836 | |
73fbfcad | 8837 | if (dump_enabled_p ()) |
78c60e3d | 8838 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 8839 | "Record the vdef for outer-loop vectorization.\n"); |
ebfd146a IR |
8840 | |
8841 | /* Find the relevant loop-exit phi-node, and reord the vec_stmt there | |
8842 | (to be used when vectorizing outer-loop stmts that use the DEF of | |
8843 | STMT). */ | |
8844 | if (gimple_code (stmt) == GIMPLE_PHI) | |
8845 | scalar_dest = PHI_RESULT (stmt); | |
8846 | else | |
8847 | scalar_dest = gimple_assign_lhs (stmt); | |
8848 | ||
8849 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) | |
8850 | { | |
8851 | if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p)))) | |
8852 | { | |
8853 | exit_phi = USE_STMT (use_p); | |
8854 | STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt; | |
8855 | } | |
8856 | } | |
8857 | } | |
8858 | ||
8859 | /* Handle stmts whose DEF is used outside the loop-nest that is | |
8860 | being vectorized. */ | |
b28ead45 AH |
8861 | if (slp_node) |
8862 | { | |
8863 | gimple *slp_stmt; | |
8864 | int i; | |
bd2f172f RB |
8865 | if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) |
8866 | FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt) | |
8867 | { | |
8868 | stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt); | |
8869 | if (STMT_VINFO_LIVE_P (slp_stmt_info)) | |
8870 | { | |
8871 | done = vectorizable_live_operation (slp_stmt, gsi, slp_node, i, | |
8872 | &vec_stmt); | |
8873 | gcc_assert (done); | |
8874 | } | |
8875 | } | |
b28ead45 AH |
8876 | } |
8877 | else if (STMT_VINFO_LIVE_P (stmt_info) | |
bd2f172f | 8878 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) |
ebfd146a | 8879 | { |
b28ead45 | 8880 | done = vectorizable_live_operation (stmt, gsi, slp_node, -1, &vec_stmt); |
ebfd146a IR |
8881 | gcc_assert (done); |
8882 | } | |
8883 | ||
8884 | if (vec_stmt) | |
83197f37 | 8885 | STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt; |
ebfd146a | 8886 | |
b8698a0f | 8887 | return is_store; |
ebfd146a IR |
8888 | } |
8889 | ||
8890 | ||
b8698a0f | 8891 | /* Remove a group of stores (for SLP or interleaving), free their |
ebfd146a IR |
8892 | stmt_vec_info. */ |
8893 | ||
8894 | void | |
355fe088 | 8895 | vect_remove_stores (gimple *first_stmt) |
ebfd146a | 8896 | { |
355fe088 TS |
8897 | gimple *next = first_stmt; |
8898 | gimple *tmp; | |
ebfd146a IR |
8899 | gimple_stmt_iterator next_si; |
8900 | ||
8901 | while (next) | |
8902 | { | |
78048b1c JJ |
8903 | stmt_vec_info stmt_info = vinfo_for_stmt (next); |
8904 | ||
8905 | tmp = GROUP_NEXT_ELEMENT (stmt_info); | |
8906 | if (is_pattern_stmt_p (stmt_info)) | |
8907 | next = STMT_VINFO_RELATED_STMT (stmt_info); | |
ebfd146a IR |
8908 | /* Free the attached stmt_vec_info and remove the stmt. */ |
8909 | next_si = gsi_for_stmt (next); | |
3d3f2249 | 8910 | unlink_stmt_vdef (next); |
ebfd146a | 8911 | gsi_remove (&next_si, true); |
3d3f2249 | 8912 | release_defs (next); |
ebfd146a IR |
8913 | free_stmt_vec_info (next); |
8914 | next = tmp; | |
8915 | } | |
8916 | } | |
8917 | ||
8918 | ||
8919 | /* Function new_stmt_vec_info. | |
8920 | ||
8921 | Create and initialize a new stmt_vec_info struct for STMT. */ | |
8922 | ||
8923 | stmt_vec_info | |
310213d4 | 8924 | new_stmt_vec_info (gimple *stmt, vec_info *vinfo) |
ebfd146a IR |
8925 | { |
8926 | stmt_vec_info res; | |
8927 | res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info)); | |
8928 | ||
8929 | STMT_VINFO_TYPE (res) = undef_vec_info_type; | |
8930 | STMT_VINFO_STMT (res) = stmt; | |
310213d4 | 8931 | res->vinfo = vinfo; |
8644a673 | 8932 | STMT_VINFO_RELEVANT (res) = vect_unused_in_scope; |
ebfd146a IR |
8933 | STMT_VINFO_LIVE_P (res) = false; |
8934 | STMT_VINFO_VECTYPE (res) = NULL; | |
8935 | STMT_VINFO_VEC_STMT (res) = NULL; | |
4b5caab7 | 8936 | STMT_VINFO_VECTORIZABLE (res) = true; |
ebfd146a IR |
8937 | STMT_VINFO_IN_PATTERN_P (res) = false; |
8938 | STMT_VINFO_RELATED_STMT (res) = NULL; | |
363477c0 | 8939 | STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL; |
ebfd146a | 8940 | STMT_VINFO_DATA_REF (res) = NULL; |
af29617a | 8941 | STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION; |
7e16ce79 | 8942 | STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK; |
ebfd146a | 8943 | |
ebfd146a IR |
8944 | if (gimple_code (stmt) == GIMPLE_PHI |
8945 | && is_loop_header_bb_p (gimple_bb (stmt))) | |
8946 | STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type; | |
8947 | else | |
8644a673 IR |
8948 | STMT_VINFO_DEF_TYPE (res) = vect_internal_def; |
8949 | ||
9771b263 | 8950 | STMT_VINFO_SAME_ALIGN_REFS (res).create (0); |
32e8bb8e | 8951 | STMT_SLP_TYPE (res) = loop_vect; |
78810bd3 RB |
8952 | STMT_VINFO_NUM_SLP_USES (res) = 0; |
8953 | ||
e14c1050 IR |
8954 | GROUP_FIRST_ELEMENT (res) = NULL; |
8955 | GROUP_NEXT_ELEMENT (res) = NULL; | |
8956 | GROUP_SIZE (res) = 0; | |
8957 | GROUP_STORE_COUNT (res) = 0; | |
8958 | GROUP_GAP (res) = 0; | |
8959 | GROUP_SAME_DR_STMT (res) = NULL; | |
ebfd146a IR |
8960 | |
8961 | return res; | |
8962 | } | |
8963 | ||
8964 | ||
8965 | /* Create a hash table for stmt_vec_info. */ | |
8966 | ||
8967 | void | |
8968 | init_stmt_vec_info_vec (void) | |
8969 | { | |
9771b263 DN |
8970 | gcc_assert (!stmt_vec_info_vec.exists ()); |
8971 | stmt_vec_info_vec.create (50); | |
ebfd146a IR |
8972 | } |
8973 | ||
8974 | ||
8975 | /* Free hash table for stmt_vec_info. */ | |
8976 | ||
8977 | void | |
8978 | free_stmt_vec_info_vec (void) | |
8979 | { | |
93675444 | 8980 | unsigned int i; |
3161455c | 8981 | stmt_vec_info info; |
93675444 JJ |
8982 | FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info) |
8983 | if (info != NULL) | |
3161455c | 8984 | free_stmt_vec_info (STMT_VINFO_STMT (info)); |
9771b263 DN |
8985 | gcc_assert (stmt_vec_info_vec.exists ()); |
8986 | stmt_vec_info_vec.release (); | |
ebfd146a IR |
8987 | } |
8988 | ||
8989 | ||
8990 | /* Free stmt vectorization related info. */ | |
8991 | ||
8992 | void | |
355fe088 | 8993 | free_stmt_vec_info (gimple *stmt) |
ebfd146a IR |
8994 | { |
8995 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
8996 | ||
8997 | if (!stmt_info) | |
8998 | return; | |
8999 | ||
78048b1c JJ |
9000 | /* Check if this statement has a related "pattern stmt" |
9001 | (introduced by the vectorizer during the pattern recognition | |
9002 | pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info | |
9003 | too. */ | |
9004 | if (STMT_VINFO_IN_PATTERN_P (stmt_info)) | |
9005 | { | |
9006 | stmt_vec_info patt_info | |
9007 | = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)); | |
9008 | if (patt_info) | |
9009 | { | |
363477c0 | 9010 | gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info); |
355fe088 | 9011 | gimple *patt_stmt = STMT_VINFO_STMT (patt_info); |
f0281fde RB |
9012 | gimple_set_bb (patt_stmt, NULL); |
9013 | tree lhs = gimple_get_lhs (patt_stmt); | |
e6f5c25d | 9014 | if (lhs && TREE_CODE (lhs) == SSA_NAME) |
f0281fde | 9015 | release_ssa_name (lhs); |
363477c0 JJ |
9016 | if (seq) |
9017 | { | |
9018 | gimple_stmt_iterator si; | |
9019 | for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si)) | |
f0281fde | 9020 | { |
355fe088 | 9021 | gimple *seq_stmt = gsi_stmt (si); |
f0281fde | 9022 | gimple_set_bb (seq_stmt, NULL); |
7532abf2 | 9023 | lhs = gimple_get_lhs (seq_stmt); |
e6f5c25d | 9024 | if (lhs && TREE_CODE (lhs) == SSA_NAME) |
f0281fde RB |
9025 | release_ssa_name (lhs); |
9026 | free_stmt_vec_info (seq_stmt); | |
9027 | } | |
363477c0 | 9028 | } |
f0281fde | 9029 | free_stmt_vec_info (patt_stmt); |
78048b1c JJ |
9030 | } |
9031 | } | |
9032 | ||
9771b263 | 9033 | STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release (); |
6c9e85fb | 9034 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release (); |
ebfd146a IR |
9035 | set_vinfo_for_stmt (stmt, NULL); |
9036 | free (stmt_info); | |
9037 | } | |
9038 | ||
9039 | ||
bb67d9c7 | 9040 | /* Function get_vectype_for_scalar_type_and_size. |
ebfd146a | 9041 | |
bb67d9c7 | 9042 | Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported |
ebfd146a IR |
9043 | by the target. */ |
9044 | ||
bb67d9c7 RG |
9045 | static tree |
9046 | get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size) | |
ebfd146a | 9047 | { |
c7d97b28 | 9048 | tree orig_scalar_type = scalar_type; |
ef4bddc2 RS |
9049 | machine_mode inner_mode = TYPE_MODE (scalar_type); |
9050 | machine_mode simd_mode; | |
2f816591 | 9051 | unsigned int nbytes = GET_MODE_SIZE (inner_mode); |
ebfd146a IR |
9052 | int nunits; |
9053 | tree vectype; | |
9054 | ||
cc4b5170 | 9055 | if (nbytes == 0) |
ebfd146a IR |
9056 | return NULL_TREE; |
9057 | ||
48f2e373 RB |
9058 | if (GET_MODE_CLASS (inner_mode) != MODE_INT |
9059 | && GET_MODE_CLASS (inner_mode) != MODE_FLOAT) | |
9060 | return NULL_TREE; | |
9061 | ||
7b7b1813 RG |
9062 | /* For vector types of elements whose mode precision doesn't |
9063 | match their types precision we use a element type of mode | |
9064 | precision. The vectorization routines will have to make sure | |
48f2e373 RB |
9065 | they support the proper result truncation/extension. |
9066 | We also make sure to build vector types with INTEGER_TYPE | |
9067 | component type only. */ | |
6d7971b8 | 9068 | if (INTEGRAL_TYPE_P (scalar_type) |
48f2e373 RB |
9069 | && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type) |
9070 | || TREE_CODE (scalar_type) != INTEGER_TYPE)) | |
7b7b1813 RG |
9071 | scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode), |
9072 | TYPE_UNSIGNED (scalar_type)); | |
6d7971b8 | 9073 | |
ccbf5bb4 RG |
9074 | /* We shouldn't end up building VECTOR_TYPEs of non-scalar components. |
9075 | When the component mode passes the above test simply use a type | |
9076 | corresponding to that mode. The theory is that any use that | |
9077 | would cause problems with this will disable vectorization anyway. */ | |
dfc2e2ac | 9078 | else if (!SCALAR_FLOAT_TYPE_P (scalar_type) |
e67f39f7 | 9079 | && !INTEGRAL_TYPE_P (scalar_type)) |
60b95d28 RB |
9080 | scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1); |
9081 | ||
9082 | /* We can't build a vector type of elements with alignment bigger than | |
9083 | their size. */ | |
dfc2e2ac | 9084 | else if (nbytes < TYPE_ALIGN_UNIT (scalar_type)) |
aca43c6c JJ |
9085 | scalar_type = lang_hooks.types.type_for_mode (inner_mode, |
9086 | TYPE_UNSIGNED (scalar_type)); | |
ccbf5bb4 | 9087 | |
dfc2e2ac RB |
9088 | /* If we felt back to using the mode fail if there was |
9089 | no scalar type for it. */ | |
9090 | if (scalar_type == NULL_TREE) | |
9091 | return NULL_TREE; | |
9092 | ||
bb67d9c7 RG |
9093 | /* If no size was supplied use the mode the target prefers. Otherwise |
9094 | lookup a vector mode of the specified size. */ | |
9095 | if (size == 0) | |
9096 | simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode); | |
9097 | else | |
9098 | simd_mode = mode_for_vector (inner_mode, size / nbytes); | |
cc4b5170 RG |
9099 | nunits = GET_MODE_SIZE (simd_mode) / nbytes; |
9100 | if (nunits <= 1) | |
9101 | return NULL_TREE; | |
ebfd146a IR |
9102 | |
9103 | vectype = build_vector_type (scalar_type, nunits); | |
ebfd146a IR |
9104 | |
9105 | if (!VECTOR_MODE_P (TYPE_MODE (vectype)) | |
9106 | && !INTEGRAL_MODE_P (TYPE_MODE (vectype))) | |
451dabda | 9107 | return NULL_TREE; |
ebfd146a | 9108 | |
c7d97b28 RB |
9109 | /* Re-attach the address-space qualifier if we canonicalized the scalar |
9110 | type. */ | |
9111 | if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype)) | |
9112 | return build_qualified_type | |
9113 | (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type))); | |
9114 | ||
ebfd146a IR |
9115 | return vectype; |
9116 | } | |
9117 | ||
bb67d9c7 RG |
9118 | unsigned int current_vector_size; |
9119 | ||
9120 | /* Function get_vectype_for_scalar_type. | |
9121 | ||
9122 | Returns the vector type corresponding to SCALAR_TYPE as supported | |
9123 | by the target. */ | |
9124 | ||
9125 | tree | |
9126 | get_vectype_for_scalar_type (tree scalar_type) | |
9127 | { | |
9128 | tree vectype; | |
9129 | vectype = get_vectype_for_scalar_type_and_size (scalar_type, | |
9130 | current_vector_size); | |
9131 | if (vectype | |
9132 | && current_vector_size == 0) | |
9133 | current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype)); | |
9134 | return vectype; | |
9135 | } | |
9136 | ||
42fd8198 IE |
9137 | /* Function get_mask_type_for_scalar_type. |
9138 | ||
9139 | Returns the mask type corresponding to a result of comparison | |
9140 | of vectors of specified SCALAR_TYPE as supported by target. */ | |
9141 | ||
9142 | tree | |
9143 | get_mask_type_for_scalar_type (tree scalar_type) | |
9144 | { | |
9145 | tree vectype = get_vectype_for_scalar_type (scalar_type); | |
9146 | ||
9147 | if (!vectype) | |
9148 | return NULL; | |
9149 | ||
9150 | return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype), | |
9151 | current_vector_size); | |
9152 | } | |
9153 | ||
b690cc0f RG |
9154 | /* Function get_same_sized_vectype |
9155 | ||
9156 | Returns a vector type corresponding to SCALAR_TYPE of size | |
9157 | VECTOR_TYPE if supported by the target. */ | |
9158 | ||
9159 | tree | |
bb67d9c7 | 9160 | get_same_sized_vectype (tree scalar_type, tree vector_type) |
b690cc0f | 9161 | { |
2568d8a1 | 9162 | if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)) |
9f47c7e5 IE |
9163 | return build_same_sized_truth_vector_type (vector_type); |
9164 | ||
bb67d9c7 RG |
9165 | return get_vectype_for_scalar_type_and_size |
9166 | (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type))); | |
b690cc0f RG |
9167 | } |
9168 | ||
ebfd146a IR |
9169 | /* Function vect_is_simple_use. |
9170 | ||
9171 | Input: | |
81c40241 RB |
9172 | VINFO - the vect info of the loop or basic block that is being vectorized. |
9173 | OPERAND - operand in the loop or bb. | |
9174 | Output: | |
9175 | DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME. | |
9176 | DT - the type of definition | |
ebfd146a IR |
9177 | |
9178 | Returns whether a stmt with OPERAND can be vectorized. | |
b8698a0f | 9179 | For loops, supportable operands are constants, loop invariants, and operands |
ff802fa1 | 9180 | that are defined by the current iteration of the loop. Unsupportable |
b8698a0f | 9181 | operands are those that are defined by a previous iteration of the loop (as |
a70d6342 IR |
9182 | is the case in reduction/induction computations). |
9183 | For basic blocks, supportable operands are constants and bb invariants. | |
9184 | For now, operands defined outside the basic block are not supported. */ | |
ebfd146a IR |
9185 | |
9186 | bool | |
81c40241 RB |
9187 | vect_is_simple_use (tree operand, vec_info *vinfo, |
9188 | gimple **def_stmt, enum vect_def_type *dt) | |
b8698a0f | 9189 | { |
ebfd146a | 9190 | *def_stmt = NULL; |
3fc356dc | 9191 | *dt = vect_unknown_def_type; |
b8698a0f | 9192 | |
73fbfcad | 9193 | if (dump_enabled_p ()) |
ebfd146a | 9194 | { |
78c60e3d SS |
9195 | dump_printf_loc (MSG_NOTE, vect_location, |
9196 | "vect_is_simple_use: operand "); | |
9197 | dump_generic_expr (MSG_NOTE, TDF_SLIM, operand); | |
e645e942 | 9198 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a | 9199 | } |
b8698a0f | 9200 | |
b758f602 | 9201 | if (CONSTANT_CLASS_P (operand)) |
ebfd146a IR |
9202 | { |
9203 | *dt = vect_constant_def; | |
9204 | return true; | |
9205 | } | |
b8698a0f | 9206 | |
ebfd146a IR |
9207 | if (is_gimple_min_invariant (operand)) |
9208 | { | |
8644a673 | 9209 | *dt = vect_external_def; |
ebfd146a IR |
9210 | return true; |
9211 | } | |
9212 | ||
ebfd146a IR |
9213 | if (TREE_CODE (operand) != SSA_NAME) |
9214 | { | |
73fbfcad | 9215 | if (dump_enabled_p ()) |
af29617a AH |
9216 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
9217 | "not ssa-name.\n"); | |
ebfd146a IR |
9218 | return false; |
9219 | } | |
b8698a0f | 9220 | |
3fc356dc | 9221 | if (SSA_NAME_IS_DEFAULT_DEF (operand)) |
ebfd146a | 9222 | { |
3fc356dc RB |
9223 | *dt = vect_external_def; |
9224 | return true; | |
ebfd146a IR |
9225 | } |
9226 | ||
3fc356dc | 9227 | *def_stmt = SSA_NAME_DEF_STMT (operand); |
73fbfcad | 9228 | if (dump_enabled_p ()) |
ebfd146a | 9229 | { |
78c60e3d SS |
9230 | dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: "); |
9231 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0); | |
ebfd146a IR |
9232 | } |
9233 | ||
61d371eb | 9234 | if (! vect_stmt_in_region_p (vinfo, *def_stmt)) |
8644a673 | 9235 | *dt = vect_external_def; |
ebfd146a IR |
9236 | else |
9237 | { | |
3fc356dc | 9238 | stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt); |
603cca93 | 9239 | *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo); |
ebfd146a IR |
9240 | } |
9241 | ||
2e8ab70c RB |
9242 | if (dump_enabled_p ()) |
9243 | { | |
9244 | dump_printf_loc (MSG_NOTE, vect_location, "type of def: "); | |
9245 | switch (*dt) | |
9246 | { | |
9247 | case vect_uninitialized_def: | |
9248 | dump_printf (MSG_NOTE, "uninitialized\n"); | |
9249 | break; | |
9250 | case vect_constant_def: | |
9251 | dump_printf (MSG_NOTE, "constant\n"); | |
9252 | break; | |
9253 | case vect_external_def: | |
9254 | dump_printf (MSG_NOTE, "external\n"); | |
9255 | break; | |
9256 | case vect_internal_def: | |
9257 | dump_printf (MSG_NOTE, "internal\n"); | |
9258 | break; | |
9259 | case vect_induction_def: | |
9260 | dump_printf (MSG_NOTE, "induction\n"); | |
9261 | break; | |
9262 | case vect_reduction_def: | |
9263 | dump_printf (MSG_NOTE, "reduction\n"); | |
9264 | break; | |
9265 | case vect_double_reduction_def: | |
9266 | dump_printf (MSG_NOTE, "double reduction\n"); | |
9267 | break; | |
9268 | case vect_nested_cycle: | |
9269 | dump_printf (MSG_NOTE, "nested cycle\n"); | |
9270 | break; | |
9271 | case vect_unknown_def_type: | |
9272 | dump_printf (MSG_NOTE, "unknown\n"); | |
9273 | break; | |
9274 | } | |
9275 | } | |
9276 | ||
81c40241 | 9277 | if (*dt == vect_unknown_def_type) |
ebfd146a | 9278 | { |
73fbfcad | 9279 | if (dump_enabled_p ()) |
78c60e3d | 9280 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 9281 | "Unsupported pattern.\n"); |
ebfd146a IR |
9282 | return false; |
9283 | } | |
9284 | ||
ebfd146a IR |
9285 | switch (gimple_code (*def_stmt)) |
9286 | { | |
9287 | case GIMPLE_PHI: | |
ebfd146a | 9288 | case GIMPLE_ASSIGN: |
ebfd146a | 9289 | case GIMPLE_CALL: |
81c40241 | 9290 | break; |
ebfd146a | 9291 | default: |
73fbfcad | 9292 | if (dump_enabled_p ()) |
78c60e3d | 9293 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 9294 | "unsupported defining stmt:\n"); |
ebfd146a IR |
9295 | return false; |
9296 | } | |
9297 | ||
9298 | return true; | |
9299 | } | |
9300 | ||
81c40241 | 9301 | /* Function vect_is_simple_use. |
b690cc0f | 9302 | |
81c40241 | 9303 | Same as vect_is_simple_use but also determines the vector operand |
b690cc0f RG |
9304 | type of OPERAND and stores it to *VECTYPE. If the definition of |
9305 | OPERAND is vect_uninitialized_def, vect_constant_def or | |
9306 | vect_external_def *VECTYPE will be set to NULL_TREE and the caller | |
9307 | is responsible to compute the best suited vector type for the | |
9308 | scalar operand. */ | |
9309 | ||
9310 | bool | |
81c40241 RB |
9311 | vect_is_simple_use (tree operand, vec_info *vinfo, |
9312 | gimple **def_stmt, enum vect_def_type *dt, tree *vectype) | |
b690cc0f | 9313 | { |
81c40241 | 9314 | if (!vect_is_simple_use (operand, vinfo, def_stmt, dt)) |
b690cc0f RG |
9315 | return false; |
9316 | ||
9317 | /* Now get a vector type if the def is internal, otherwise supply | |
9318 | NULL_TREE and leave it up to the caller to figure out a proper | |
9319 | type for the use stmt. */ | |
9320 | if (*dt == vect_internal_def | |
9321 | || *dt == vect_induction_def | |
9322 | || *dt == vect_reduction_def | |
9323 | || *dt == vect_double_reduction_def | |
9324 | || *dt == vect_nested_cycle) | |
9325 | { | |
9326 | stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt); | |
83197f37 IR |
9327 | |
9328 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) | |
9329 | && !STMT_VINFO_RELEVANT (stmt_info) | |
9330 | && !STMT_VINFO_LIVE_P (stmt_info)) | |
b690cc0f | 9331 | stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)); |
83197f37 | 9332 | |
b690cc0f RG |
9333 | *vectype = STMT_VINFO_VECTYPE (stmt_info); |
9334 | gcc_assert (*vectype != NULL_TREE); | |
9335 | } | |
9336 | else if (*dt == vect_uninitialized_def | |
9337 | || *dt == vect_constant_def | |
9338 | || *dt == vect_external_def) | |
9339 | *vectype = NULL_TREE; | |
9340 | else | |
9341 | gcc_unreachable (); | |
9342 | ||
9343 | return true; | |
9344 | } | |
9345 | ||
ebfd146a IR |
9346 | |
9347 | /* Function supportable_widening_operation | |
9348 | ||
b8698a0f L |
9349 | Check whether an operation represented by the code CODE is a |
9350 | widening operation that is supported by the target platform in | |
b690cc0f RG |
9351 | vector form (i.e., when operating on arguments of type VECTYPE_IN |
9352 | producing a result of type VECTYPE_OUT). | |
b8698a0f | 9353 | |
ebfd146a IR |
9354 | Widening operations we currently support are NOP (CONVERT), FLOAT |
9355 | and WIDEN_MULT. This function checks if these operations are supported | |
9356 | by the target platform either directly (via vector tree-codes), or via | |
9357 | target builtins. | |
9358 | ||
9359 | Output: | |
b8698a0f L |
9360 | - CODE1 and CODE2 are codes of vector operations to be used when |
9361 | vectorizing the operation, if available. | |
ebfd146a IR |
9362 | - MULTI_STEP_CVT determines the number of required intermediate steps in |
9363 | case of multi-step conversion (like char->short->int - in that case | |
9364 | MULTI_STEP_CVT will be 1). | |
b8698a0f L |
9365 | - INTERM_TYPES contains the intermediate type required to perform the |
9366 | widening operation (short in the above example). */ | |
ebfd146a IR |
9367 | |
9368 | bool | |
355fe088 | 9369 | supportable_widening_operation (enum tree_code code, gimple *stmt, |
b690cc0f | 9370 | tree vectype_out, tree vectype_in, |
ebfd146a IR |
9371 | enum tree_code *code1, enum tree_code *code2, |
9372 | int *multi_step_cvt, | |
9771b263 | 9373 | vec<tree> *interm_types) |
ebfd146a IR |
9374 | { |
9375 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
9376 | loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4ef69dfc | 9377 | struct loop *vect_loop = NULL; |
ef4bddc2 | 9378 | machine_mode vec_mode; |
81f40b79 | 9379 | enum insn_code icode1, icode2; |
ebfd146a | 9380 | optab optab1, optab2; |
b690cc0f RG |
9381 | tree vectype = vectype_in; |
9382 | tree wide_vectype = vectype_out; | |
ebfd146a | 9383 | enum tree_code c1, c2; |
4a00c761 JJ |
9384 | int i; |
9385 | tree prev_type, intermediate_type; | |
ef4bddc2 | 9386 | machine_mode intermediate_mode, prev_mode; |
4a00c761 | 9387 | optab optab3, optab4; |
ebfd146a | 9388 | |
4a00c761 | 9389 | *multi_step_cvt = 0; |
4ef69dfc IR |
9390 | if (loop_info) |
9391 | vect_loop = LOOP_VINFO_LOOP (loop_info); | |
9392 | ||
ebfd146a IR |
9393 | switch (code) |
9394 | { | |
9395 | case WIDEN_MULT_EXPR: | |
6ae6116f RH |
9396 | /* The result of a vectorized widening operation usually requires |
9397 | two vectors (because the widened results do not fit into one vector). | |
9398 | The generated vector results would normally be expected to be | |
9399 | generated in the same order as in the original scalar computation, | |
9400 | i.e. if 8 results are generated in each vector iteration, they are | |
9401 | to be organized as follows: | |
9402 | vect1: [res1,res2,res3,res4], | |
9403 | vect2: [res5,res6,res7,res8]. | |
9404 | ||
9405 | However, in the special case that the result of the widening | |
9406 | operation is used in a reduction computation only, the order doesn't | |
9407 | matter (because when vectorizing a reduction we change the order of | |
9408 | the computation). Some targets can take advantage of this and | |
9409 | generate more efficient code. For example, targets like Altivec, | |
9410 | that support widen_mult using a sequence of {mult_even,mult_odd} | |
9411 | generate the following vectors: | |
9412 | vect1: [res1,res3,res5,res7], | |
9413 | vect2: [res2,res4,res6,res8]. | |
9414 | ||
9415 | When vectorizing outer-loops, we execute the inner-loop sequentially | |
9416 | (each vectorized inner-loop iteration contributes to VF outer-loop | |
9417 | iterations in parallel). We therefore don't allow to change the | |
9418 | order of the computation in the inner-loop during outer-loop | |
9419 | vectorization. */ | |
9420 | /* TODO: Another case in which order doesn't *really* matter is when we | |
9421 | widen and then contract again, e.g. (short)((int)x * y >> 8). | |
9422 | Normally, pack_trunc performs an even/odd permute, whereas the | |
9423 | repack from an even/odd expansion would be an interleave, which | |
9424 | would be significantly simpler for e.g. AVX2. */ | |
9425 | /* In any case, in order to avoid duplicating the code below, recurse | |
9426 | on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values | |
9427 | are properly set up for the caller. If we fail, we'll continue with | |
9428 | a VEC_WIDEN_MULT_LO/HI_EXPR check. */ | |
9429 | if (vect_loop | |
9430 | && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction | |
9431 | && !nested_in_vect_loop_p (vect_loop, stmt) | |
9432 | && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR, | |
9433 | stmt, vectype_out, vectype_in, | |
a86ec597 RH |
9434 | code1, code2, multi_step_cvt, |
9435 | interm_types)) | |
ebc047a2 CH |
9436 | { |
9437 | /* Elements in a vector with vect_used_by_reduction property cannot | |
9438 | be reordered if the use chain with this property does not have the | |
9439 | same operation. One such an example is s += a * b, where elements | |
9440 | in a and b cannot be reordered. Here we check if the vector defined | |
9441 | by STMT is only directly used in the reduction statement. */ | |
9442 | tree lhs = gimple_assign_lhs (stmt); | |
9443 | use_operand_p dummy; | |
355fe088 | 9444 | gimple *use_stmt; |
ebc047a2 CH |
9445 | stmt_vec_info use_stmt_info = NULL; |
9446 | if (single_imm_use (lhs, &dummy, &use_stmt) | |
9447 | && (use_stmt_info = vinfo_for_stmt (use_stmt)) | |
9448 | && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def) | |
9449 | return true; | |
9450 | } | |
4a00c761 JJ |
9451 | c1 = VEC_WIDEN_MULT_LO_EXPR; |
9452 | c2 = VEC_WIDEN_MULT_HI_EXPR; | |
ebfd146a IR |
9453 | break; |
9454 | ||
81c40241 RB |
9455 | case DOT_PROD_EXPR: |
9456 | c1 = DOT_PROD_EXPR; | |
9457 | c2 = DOT_PROD_EXPR; | |
9458 | break; | |
9459 | ||
9460 | case SAD_EXPR: | |
9461 | c1 = SAD_EXPR; | |
9462 | c2 = SAD_EXPR; | |
9463 | break; | |
9464 | ||
6ae6116f RH |
9465 | case VEC_WIDEN_MULT_EVEN_EXPR: |
9466 | /* Support the recursion induced just above. */ | |
9467 | c1 = VEC_WIDEN_MULT_EVEN_EXPR; | |
9468 | c2 = VEC_WIDEN_MULT_ODD_EXPR; | |
9469 | break; | |
9470 | ||
36ba4aae | 9471 | case WIDEN_LSHIFT_EXPR: |
4a00c761 JJ |
9472 | c1 = VEC_WIDEN_LSHIFT_LO_EXPR; |
9473 | c2 = VEC_WIDEN_LSHIFT_HI_EXPR; | |
36ba4aae IR |
9474 | break; |
9475 | ||
ebfd146a | 9476 | CASE_CONVERT: |
4a00c761 JJ |
9477 | c1 = VEC_UNPACK_LO_EXPR; |
9478 | c2 = VEC_UNPACK_HI_EXPR; | |
ebfd146a IR |
9479 | break; |
9480 | ||
9481 | case FLOAT_EXPR: | |
4a00c761 JJ |
9482 | c1 = VEC_UNPACK_FLOAT_LO_EXPR; |
9483 | c2 = VEC_UNPACK_FLOAT_HI_EXPR; | |
ebfd146a IR |
9484 | break; |
9485 | ||
9486 | case FIX_TRUNC_EXPR: | |
9487 | /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/ | |
9488 | VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for | |
9489 | computing the operation. */ | |
9490 | return false; | |
9491 | ||
9492 | default: | |
9493 | gcc_unreachable (); | |
9494 | } | |
9495 | ||
6ae6116f | 9496 | if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR) |
6b4db501 | 9497 | std::swap (c1, c2); |
4a00c761 | 9498 | |
ebfd146a IR |
9499 | if (code == FIX_TRUNC_EXPR) |
9500 | { | |
9501 | /* The signedness is determined from output operand. */ | |
b690cc0f RG |
9502 | optab1 = optab_for_tree_code (c1, vectype_out, optab_default); |
9503 | optab2 = optab_for_tree_code (c2, vectype_out, optab_default); | |
ebfd146a IR |
9504 | } |
9505 | else | |
9506 | { | |
9507 | optab1 = optab_for_tree_code (c1, vectype, optab_default); | |
9508 | optab2 = optab_for_tree_code (c2, vectype, optab_default); | |
9509 | } | |
9510 | ||
9511 | if (!optab1 || !optab2) | |
9512 | return false; | |
9513 | ||
9514 | vec_mode = TYPE_MODE (vectype); | |
947131ba RS |
9515 | if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing |
9516 | || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing) | |
ebfd146a IR |
9517 | return false; |
9518 | ||
4a00c761 JJ |
9519 | *code1 = c1; |
9520 | *code2 = c2; | |
9521 | ||
9522 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) | |
9523 | && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype)) | |
5e8d6dff IE |
9524 | /* For scalar masks we may have different boolean |
9525 | vector types having the same QImode. Thus we | |
9526 | add additional check for elements number. */ | |
9527 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
9528 | || (TYPE_VECTOR_SUBPARTS (vectype) / 2 | |
9529 | == TYPE_VECTOR_SUBPARTS (wide_vectype))); | |
4a00c761 | 9530 | |
b8698a0f | 9531 | /* Check if it's a multi-step conversion that can be done using intermediate |
ebfd146a | 9532 | types. */ |
ebfd146a | 9533 | |
4a00c761 JJ |
9534 | prev_type = vectype; |
9535 | prev_mode = vec_mode; | |
b8698a0f | 9536 | |
4a00c761 JJ |
9537 | if (!CONVERT_EXPR_CODE_P (code)) |
9538 | return false; | |
b8698a0f | 9539 | |
4a00c761 JJ |
9540 | /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS |
9541 | intermediate steps in promotion sequence. We try | |
9542 | MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do | |
9543 | not. */ | |
9771b263 | 9544 | interm_types->create (MAX_INTERM_CVT_STEPS); |
4a00c761 JJ |
9545 | for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) |
9546 | { | |
9547 | intermediate_mode = insn_data[icode1].operand[0].mode; | |
3ae0661a IE |
9548 | if (VECTOR_BOOLEAN_TYPE_P (prev_type)) |
9549 | { | |
9550 | intermediate_type | |
9551 | = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) / 2, | |
9552 | current_vector_size); | |
9553 | if (intermediate_mode != TYPE_MODE (intermediate_type)) | |
9554 | return false; | |
9555 | } | |
9556 | else | |
9557 | intermediate_type | |
9558 | = lang_hooks.types.type_for_mode (intermediate_mode, | |
9559 | TYPE_UNSIGNED (prev_type)); | |
9560 | ||
4a00c761 JJ |
9561 | optab3 = optab_for_tree_code (c1, intermediate_type, optab_default); |
9562 | optab4 = optab_for_tree_code (c2, intermediate_type, optab_default); | |
9563 | ||
9564 | if (!optab3 || !optab4 | |
9565 | || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing | |
9566 | || insn_data[icode1].operand[0].mode != intermediate_mode | |
9567 | || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing | |
9568 | || insn_data[icode2].operand[0].mode != intermediate_mode | |
9569 | || ((icode1 = optab_handler (optab3, intermediate_mode)) | |
9570 | == CODE_FOR_nothing) | |
9571 | || ((icode2 = optab_handler (optab4, intermediate_mode)) | |
9572 | == CODE_FOR_nothing)) | |
9573 | break; | |
ebfd146a | 9574 | |
9771b263 | 9575 | interm_types->quick_push (intermediate_type); |
4a00c761 JJ |
9576 | (*multi_step_cvt)++; |
9577 | ||
9578 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) | |
9579 | && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype)) | |
5e8d6dff IE |
9580 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) |
9581 | || (TYPE_VECTOR_SUBPARTS (intermediate_type) / 2 | |
9582 | == TYPE_VECTOR_SUBPARTS (wide_vectype))); | |
4a00c761 JJ |
9583 | |
9584 | prev_type = intermediate_type; | |
9585 | prev_mode = intermediate_mode; | |
ebfd146a IR |
9586 | } |
9587 | ||
9771b263 | 9588 | interm_types->release (); |
4a00c761 | 9589 | return false; |
ebfd146a IR |
9590 | } |
9591 | ||
9592 | ||
9593 | /* Function supportable_narrowing_operation | |
9594 | ||
b8698a0f L |
9595 | Check whether an operation represented by the code CODE is a |
9596 | narrowing operation that is supported by the target platform in | |
b690cc0f RG |
9597 | vector form (i.e., when operating on arguments of type VECTYPE_IN |
9598 | and producing a result of type VECTYPE_OUT). | |
b8698a0f | 9599 | |
ebfd146a | 9600 | Narrowing operations we currently support are NOP (CONVERT) and |
ff802fa1 | 9601 | FIX_TRUNC. This function checks if these operations are supported by |
ebfd146a IR |
9602 | the target platform directly via vector tree-codes. |
9603 | ||
9604 | Output: | |
b8698a0f L |
9605 | - CODE1 is the code of a vector operation to be used when |
9606 | vectorizing the operation, if available. | |
ebfd146a IR |
9607 | - MULTI_STEP_CVT determines the number of required intermediate steps in |
9608 | case of multi-step conversion (like int->short->char - in that case | |
9609 | MULTI_STEP_CVT will be 1). | |
9610 | - INTERM_TYPES contains the intermediate type required to perform the | |
b8698a0f | 9611 | narrowing operation (short in the above example). */ |
ebfd146a IR |
9612 | |
9613 | bool | |
9614 | supportable_narrowing_operation (enum tree_code code, | |
b690cc0f | 9615 | tree vectype_out, tree vectype_in, |
ebfd146a | 9616 | enum tree_code *code1, int *multi_step_cvt, |
9771b263 | 9617 | vec<tree> *interm_types) |
ebfd146a | 9618 | { |
ef4bddc2 | 9619 | machine_mode vec_mode; |
ebfd146a IR |
9620 | enum insn_code icode1; |
9621 | optab optab1, interm_optab; | |
b690cc0f RG |
9622 | tree vectype = vectype_in; |
9623 | tree narrow_vectype = vectype_out; | |
ebfd146a | 9624 | enum tree_code c1; |
3ae0661a | 9625 | tree intermediate_type, prev_type; |
ef4bddc2 | 9626 | machine_mode intermediate_mode, prev_mode; |
ebfd146a | 9627 | int i; |
4a00c761 | 9628 | bool uns; |
ebfd146a | 9629 | |
4a00c761 | 9630 | *multi_step_cvt = 0; |
ebfd146a IR |
9631 | switch (code) |
9632 | { | |
9633 | CASE_CONVERT: | |
9634 | c1 = VEC_PACK_TRUNC_EXPR; | |
9635 | break; | |
9636 | ||
9637 | case FIX_TRUNC_EXPR: | |
9638 | c1 = VEC_PACK_FIX_TRUNC_EXPR; | |
9639 | break; | |
9640 | ||
9641 | case FLOAT_EXPR: | |
9642 | /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR | |
9643 | tree code and optabs used for computing the operation. */ | |
9644 | return false; | |
9645 | ||
9646 | default: | |
9647 | gcc_unreachable (); | |
9648 | } | |
9649 | ||
9650 | if (code == FIX_TRUNC_EXPR) | |
9651 | /* The signedness is determined from output operand. */ | |
b690cc0f | 9652 | optab1 = optab_for_tree_code (c1, vectype_out, optab_default); |
ebfd146a IR |
9653 | else |
9654 | optab1 = optab_for_tree_code (c1, vectype, optab_default); | |
9655 | ||
9656 | if (!optab1) | |
9657 | return false; | |
9658 | ||
9659 | vec_mode = TYPE_MODE (vectype); | |
947131ba | 9660 | if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing) |
ebfd146a IR |
9661 | return false; |
9662 | ||
4a00c761 JJ |
9663 | *code1 = c1; |
9664 | ||
9665 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) | |
5e8d6dff IE |
9666 | /* For scalar masks we may have different boolean |
9667 | vector types having the same QImode. Thus we | |
9668 | add additional check for elements number. */ | |
9669 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
9670 | || (TYPE_VECTOR_SUBPARTS (vectype) * 2 | |
9671 | == TYPE_VECTOR_SUBPARTS (narrow_vectype))); | |
4a00c761 | 9672 | |
ebfd146a IR |
9673 | /* Check if it's a multi-step conversion that can be done using intermediate |
9674 | types. */ | |
4a00c761 | 9675 | prev_mode = vec_mode; |
3ae0661a | 9676 | prev_type = vectype; |
4a00c761 JJ |
9677 | if (code == FIX_TRUNC_EXPR) |
9678 | uns = TYPE_UNSIGNED (vectype_out); | |
9679 | else | |
9680 | uns = TYPE_UNSIGNED (vectype); | |
9681 | ||
9682 | /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer | |
9683 | conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more | |
9684 | costly than signed. */ | |
9685 | if (code == FIX_TRUNC_EXPR && uns) | |
9686 | { | |
9687 | enum insn_code icode2; | |
9688 | ||
9689 | intermediate_type | |
9690 | = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0); | |
9691 | interm_optab | |
9692 | = optab_for_tree_code (c1, intermediate_type, optab_default); | |
2225b9f2 | 9693 | if (interm_optab != unknown_optab |
4a00c761 JJ |
9694 | && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing |
9695 | && insn_data[icode1].operand[0].mode | |
9696 | == insn_data[icode2].operand[0].mode) | |
9697 | { | |
9698 | uns = false; | |
9699 | optab1 = interm_optab; | |
9700 | icode1 = icode2; | |
9701 | } | |
9702 | } | |
ebfd146a | 9703 | |
4a00c761 JJ |
9704 | /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS |
9705 | intermediate steps in promotion sequence. We try | |
9706 | MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */ | |
9771b263 | 9707 | interm_types->create (MAX_INTERM_CVT_STEPS); |
4a00c761 JJ |
9708 | for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) |
9709 | { | |
9710 | intermediate_mode = insn_data[icode1].operand[0].mode; | |
3ae0661a IE |
9711 | if (VECTOR_BOOLEAN_TYPE_P (prev_type)) |
9712 | { | |
9713 | intermediate_type | |
9714 | = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2, | |
9715 | current_vector_size); | |
9716 | if (intermediate_mode != TYPE_MODE (intermediate_type)) | |
9717 | return false; | |
9718 | } | |
9719 | else | |
9720 | intermediate_type | |
9721 | = lang_hooks.types.type_for_mode (intermediate_mode, uns); | |
4a00c761 JJ |
9722 | interm_optab |
9723 | = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type, | |
9724 | optab_default); | |
9725 | if (!interm_optab | |
9726 | || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing) | |
9727 | || insn_data[icode1].operand[0].mode != intermediate_mode | |
9728 | || ((icode1 = optab_handler (interm_optab, intermediate_mode)) | |
9729 | == CODE_FOR_nothing)) | |
9730 | break; | |
9731 | ||
9771b263 | 9732 | interm_types->quick_push (intermediate_type); |
4a00c761 JJ |
9733 | (*multi_step_cvt)++; |
9734 | ||
9735 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) | |
5e8d6dff IE |
9736 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) |
9737 | || (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2 | |
9738 | == TYPE_VECTOR_SUBPARTS (narrow_vectype))); | |
4a00c761 JJ |
9739 | |
9740 | prev_mode = intermediate_mode; | |
3ae0661a | 9741 | prev_type = intermediate_type; |
4a00c761 | 9742 | optab1 = interm_optab; |
ebfd146a IR |
9743 | } |
9744 | ||
9771b263 | 9745 | interm_types->release (); |
4a00c761 | 9746 | return false; |
ebfd146a | 9747 | } |